diff --git a/Makefile b/Makefile index e0bf6b01b1f7..cf15c5021472 100644 --- a/Makefile +++ b/Makefile @@ -44,7 +44,7 @@ all build: # Example: # make build-all build-all: - hack/build-go.sh vendor/k8s.io/kubernetes/cmd/hyperkube vendor/github.com/openshift/oc/cmd/oc cmd/openshift-tests + hack/build-go.sh vendor/k8s.io/kubernetes/cmd/hyperkube cmd/openshift-tests .PHONY: build-all # Build the test binaries. diff --git a/cmd/doc.go b/cmd/doc.go index 152fd7460621..ac904096f105 100644 --- a/cmd/doc.go +++ b/cmd/doc.go @@ -12,6 +12,4 @@ import ( _ "k8s.io/kubernetes/pkg/client/metrics/prometheus" // for client metric registration _ "k8s.io/kubernetes/pkg/kubectl/cmd" _ "k8s.io/kubernetes/pkg/version/prometheus" // for version metric registration - - _ "github.com/openshift/oc/pkg/cli" ) diff --git a/glide.lock b/glide.lock index 3295cc6b6b3b..7463062209e6 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ -hash: 7622b75aea01706933f2e1470797c5cd1ceb7d1c97869ce895928386abb09a93 -updated: 2019-08-09T14:43:52.240397386-05:00 +hash: 401854ad46e02fa874a17049fec8d155a96641322670e60d2f8634301f27f70c +updated: 2019-08-19T09:45:49.451597341-04:00 imports: - name: bitbucket.org/ww/goautoneg version: 2ae31c8b6b30d2f4c8100c20d527b571e9c433bb @@ -9,14 +9,8 @@ imports: subpackages: - compute/metadata - internal -- name: github.com/alexbrainman/sspi - version: e580b900e9f5657daa5473021296289be6da2661 - subpackages: - - negotiate -- name: github.com/apcera/gssapi - version: 5fb4217df13b8e6878046fe1e5c10e560e1b86dc - name: github.com/apparentlymart/go-cidr - version: 1755c023625ec3a84979b90841a1ab067ed6c071 + version: b1115bf8e14a60131a196f908223e4506b0ddc35 subpackages: - cidr - name: github.com/armon/circbuf @@ -45,21 +39,17 @@ imports: - aws/session - aws/signer/v4 - internal/ini - - internal/s3err - internal/sdkio - internal/sdkrand - internal/sdkuri - internal/shareddefaults - private/protocol - private/protocol/ec2query - - private/protocol/eventstream - - private/protocol/eventstream/eventstreamapi - private/protocol/json/jsonutil - private/protocol/jsonrpc - private/protocol/query - private/protocol/query/queryutil - private/protocol/rest - - private/protocol/restxml - private/protocol/xml/xmlutil - service/autoscaling - service/ec2 @@ -67,9 +57,6 @@ imports: - service/elb - service/elbv2 - service/kms - - service/s3 - - service/s3/s3iface - - service/s3/s3manager - service/sts - name: github.com/Azure/azure-sdk-for-go version: da91af54816b4cf72949c225a2d0980f51fab01b @@ -201,7 +188,7 @@ imports: - errdefs - namespaces - name: github.com/containerd/continuity - version: aaeac12a7ffcd198ae25440a9dff125c2e2703a7 + version: f2a389ac0a02ce21c09edd7344677a601970f41c subpackages: - pathdriver - name: github.com/containernetworking/cni @@ -213,25 +200,8 @@ imports: - pkg/types/020 - pkg/types/current - pkg/version -- name: github.com/containers/image - version: 4bc6d24282b115f8b61a6d08470ed42ac7c91392 - repo: https://github.com/openshift/containers-image.git - subpackages: - - docker/policyconfiguration - - docker/reference - - manifest - - signature - - transports - - types - - version - name: github.com/containers/storage version: 912de200380ac2fd1e90f91b78c1495ed8d5beea - subpackages: - - pkg/fileutils - - pkg/homedir - - pkg/idtools - - pkg/mount - - pkg/system - name: github.com/coreos/bbolt version: 48ea1b39c25fc1bab3506fbc712ecbaa842c4d2d - name: github.com/coreos/etcd @@ -352,19 +322,9 @@ imports: subpackages: - digestset - manifest - - manifest/manifestlist - manifest/schema1 - manifest/schema2 - - metrics - reference - - registry/api/errcode - - registry/api/v2 - - registry/client - - registry/client/auth - - registry/client/auth/challenge - - registry/client/transport - - registry/storage/cache - - registry/storage/cache/memory - name: github.com/docker/docker version: a9fbbdc8dd8794b20af358382ab780559bca589d subpackages: @@ -409,8 +369,6 @@ imports: - nat - sockets - tlsconfig -- name: github.com/docker/go-metrics - version: b84716841b82eab644a0c64fc8b42d480e49add5 - name: github.com/docker/go-units version: 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1 - name: github.com/docker/libnetwork @@ -419,7 +377,7 @@ imports: - ipamutils - ipvs - name: github.com/docker/libtrust - version: aabc10ec26b754e797f9028f4589c5b7bd90dc20 + version: 9cbd2a1374f46905c68a4eb3694a130610adc62a - name: github.com/docker/spdystream version: 449fdfce4d962303d702fec724ef0ad181c92528 subpackages: @@ -447,7 +405,7 @@ imports: - name: github.com/getsentry/raven-go version: c977f96e109525a5d8fa10a19165341f601f38b0 - name: github.com/ghodss/yaml - version: 73d445a93680fa1a78ae23a5839bad48f32ba1ee + version: c7ce16629ff4cd059ed96ed06419dd3856fd3577 - name: github.com/globalsign/mgo version: eeefdecb41b842af6dc652aaea4026e8403e62df subpackages: @@ -552,13 +510,10 @@ imports: - formats/dot/internal/lexer - formats/dot/internal/parser - formats/dot/internal/token - - internal/linear - internal/ordered - internal/set - path - simple - - topo - - traverse - name: github.com/gonum/internal version: e57e4534cf9b3b00ef6c0175f59d8d2d34f60914 subpackages: @@ -574,7 +529,7 @@ imports: subpackages: - mat64 - name: github.com/google/btree - version: 20236160a414454a9c64b6c8829381c6f4bddcaa + version: 7d79101e329e5a3adf994758c578dab82b90c017 - name: github.com/google/cadvisor version: 5fa6b13d2628c5c85c8508082cc1bdfa3373e8a9 repo: https://github.com/openshift/google-cadvisor.git @@ -675,8 +630,6 @@ imports: - openstack/networking/v2/ports - openstack/utils - pagination -- name: github.com/gorilla/mux - version: e67b3c02c7195c052acff13261f0c9fd1ba53011 - name: github.com/gorilla/websocket version: 4201258b820c74ac8e6922fc9e6b52f71fe46f8d - name: github.com/gregjones/httpcache @@ -722,8 +675,6 @@ imports: version: 76bb4ee9f0ab50f77826f2a2ee7fb9d3880d6ec2 - name: github.com/jmespath/go-jmespath version: 0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74 -- name: github.com/joho/godotenv - version: 6d367c18edf6ca7fd004efd6863e4c5728fa858e - name: github.com/jonboulle/clockwork version: 72f9bd7c4e0c2a40055ab3d0f09654f730cce982 - name: github.com/json-iterator/go @@ -736,24 +687,16 @@ imports: version: 2de2192f9e35ce981c152a873ed943b93b79ced4 - name: github.com/kr/fs version: 2788f0dbd16903de03cb8186e5c7d97b69ad387b -- name: github.com/lestrrat-go/jspointer - version: 82fadba7561c3a8d78133c2b957263c0963bb79d -- name: github.com/lestrrat-go/jsref - version: 1b590508f37d3af76c77c8328e16978d2889b486 - subpackages: - - provider -- name: github.com/lestrrat-go/pdebug - version: 39f9a71bcabe9432cbdfe4d3d33f41988acd2ce6 -- name: github.com/lestrrat-go/structinfo - version: acd51874663bf3297433cb1f1015075c3cbe6130 - name: github.com/lestrrat/go-jspointer - version: 82fadba7561c3a8d78133c2b957263c0963bb79d + version: f4881e611bdbe9fb413a7780721ef8400a1f2341 repo: https://github.com/lestrrat/go-jspointer.git - name: github.com/lestrrat/go-jsref version: 50df7b2d07d799426a9ac43fa24bdb4785f72a54 repo: https://github.com/lestrrat/go-jsref.git + subpackages: + - provider - name: github.com/lestrrat/go-jsschema - version: 5c81c58ffcc359c4390d440b45f5462edb0107cb + version: a6a42341b50d8d7e2a733db922eefaa756321021 - name: github.com/lestrrat/go-pdebug version: 569c97477ae8837e053e5a50bc739e15172b8ebe repo: https://github.com/lestrrat/go-pdebug.git @@ -783,7 +726,7 @@ imports: - jlexer - jwriter - name: github.com/MakeNowJust/heredoc - version: e9091a26100e9cfb2b6a8f470085bfa541931a91 + version: bb23615498cded5e105af4ce27de75b089cbe851 - name: github.com/marstr/guid version: 8bdf7d1a087ccc975cf37dd6507da50698fd19ca - name: github.com/mattn/go-shellwords @@ -833,7 +776,7 @@ imports: - internal/timeout - internal/wclayer - name: github.com/miekg/dns - version: 5a2b9fab83ff0f8bfc99684bd5f43a37abe560f1 + version: 5d001d020961ae1c184f9f8152fdc73810481677 - name: github.com/mindprince/gonvml version: fee913ce8fb235edf54739d259ca0ecc226c7b8a - name: github.com/mistifyio/go-zfs @@ -842,11 +785,6 @@ imports: version: ad45545899c7b13c020ea92b2072220eefad42b8 - name: github.com/mitchellh/mapstructure version: 53818660ed4955e899c0bcafa97299a388bd7c8e -- name: github.com/moby/buildkit - version: c3a857e3fca0a5cadd44ffd886a977559841aeaa - subpackages: - - frontend/dockerfile/command - - frontend/dockerfile/parser - name: github.com/modern-go/concurrent version: bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94 - name: github.com/modern-go/reflect2 @@ -855,8 +793,6 @@ imports: version: 491d3605edfb866af34a48075bd4355ac1bf46ca - name: github.com/mrunalp/fileutils version: 4ee1cc9a80582a0c75febdd5cfa779ee4361cbca -- name: github.com/mtrmac/gpgme - version: b2432428689ca58c2b8e8dea9449d3295cf96fc9 - name: github.com/munnerz/goautoneg version: a547fc61f48d567d5b4ec6f8aee5573d8efce11d - name: github.com/mxk/go-flowrate @@ -942,7 +878,7 @@ imports: - go-selinux - go-selinux/label - name: github.com/openshift/api - version: 8f800d2391d0b451818fd5bfc348b16d7cc1e588 + version: a94e914914f4228d0bcba6fc8a22614c5f5e2dad subpackages: - annotations - apps @@ -1012,7 +948,7 @@ imports: - pkg/securitycontextconstraints/util - pkg/securitycontextconstraints/util/sort - name: github.com/openshift/client-go - version: e9678e3b850da36470c5554609c6cd110aace47e + version: 5a5508328169b8a6992ea4ef711add89ddce3c6d subpackages: - apps/clientset/versioned - apps/clientset/versioned/scheme @@ -1113,7 +1049,7 @@ imports: - user/informers/externalversions/user/v1 - user/listers/user/v1 - name: github.com/openshift/library-go - version: 211d32684d6cd8b9a50db92e213d4b6ea827bb63 + version: 97bb8b699c927fbdb7a80d48865d89acc3980d94 subpackages: - pkg/apiserver/admission/admissionrestconfig - pkg/apiserver/admission/admissiontimeout @@ -1122,12 +1058,8 @@ imports: - pkg/apps/appsserialization - pkg/apps/appsutil - pkg/authentication/bootstrapauthenticator - - pkg/authorization/authorizationutil - pkg/authorization/scopemetadata - - pkg/build/buildutil - - pkg/build/envresolve - pkg/build/naming - - pkg/certs - pkg/config/client - pkg/config/configdefaults - pkg/config/helpers @@ -1139,199 +1071,18 @@ imports: - pkg/image/internal/digest - pkg/image/internal/reference - pkg/image/reference - - pkg/image/referencemutator - - pkg/image/registryclient - - pkg/image/trigger - - pkg/legacyapi/legacygroupification - - pkg/network/networkapihelpers - pkg/network/networkutils - pkg/oauth/oauthdiscovery - pkg/operator/events - pkg/operator/resource/resourceapply - pkg/operator/resource/resourcemerge - pkg/operator/resource/resourceread - - pkg/operator/resource/retry - pkg/quota/clusterquotamapping - pkg/quota/quotautil - - pkg/security/ldapclient - - pkg/security/ldapquery - pkg/security/ldaputil - pkg/security/uid - pkg/serviceability - - pkg/template/generator - - pkg/template/templateprocessing - pkg/template/templateprocessingclient - - pkg/unidling/unidlingclient -- name: github.com/openshift/oc - version: 8fdb79e549651c0f3c91d54349715309b5d149d3 - subpackages: - - pkg/cli - - pkg/cli/admin - - pkg/cli/admin/buildchain - - pkg/cli/admin/cert - - pkg/cli/admin/createbootstrapprojecttemplate - - pkg/cli/admin/createerrortemplate - - pkg/cli/admin/createkubeconfig - - pkg/cli/admin/createlogintemplate - - pkg/cli/admin/createproviderselectiontemplate - - pkg/cli/admin/groups - - pkg/cli/admin/groups/new - - pkg/cli/admin/groups/sync - - pkg/cli/admin/groups/users - - pkg/cli/admin/migrate - - pkg/cli/admin/migrate/etcd - - pkg/cli/admin/migrate/images - - pkg/cli/admin/migrate/legacyhpa - - pkg/cli/admin/migrate/storage - - pkg/cli/admin/migrate/templateinstances - - pkg/cli/admin/mustgather - - pkg/cli/admin/network - - pkg/cli/admin/node - - pkg/cli/admin/policy - - pkg/cli/admin/project - - pkg/cli/admin/prune - - pkg/cli/admin/prune/auth - - pkg/cli/admin/prune/builds - - pkg/cli/admin/prune/deployments - - pkg/cli/admin/prune/imageprune - - pkg/cli/admin/prune/images - - pkg/cli/admin/release - - pkg/cli/admin/top - - pkg/cli/admin/upgrade - - pkg/cli/admin/verifyimagesignature - - pkg/cli/buildlogs - - pkg/cli/cancelbuild - - pkg/cli/create - - pkg/cli/create/route - - pkg/cli/debug - - pkg/cli/deployer - - pkg/cli/deployer/strategy - - pkg/cli/deployer/strategy/recreate - - pkg/cli/deployer/strategy/rolling - - pkg/cli/deployer/strategy/support - - pkg/cli/deployer/strategy/util - - pkg/cli/experimental/dockergc - - pkg/cli/expose - - pkg/cli/extract - - pkg/cli/idle - - pkg/cli/image - - pkg/cli/image/append - - pkg/cli/image/archive - - pkg/cli/image/extract - - pkg/cli/image/info - - pkg/cli/image/manifest - - pkg/cli/image/manifest/dockercredentials - - pkg/cli/image/mirror - - pkg/cli/image/workqueue - - pkg/cli/importimage - - pkg/cli/kubectlwrappers - - pkg/cli/login - - pkg/cli/logout - - pkg/cli/logs - - pkg/cli/newapp - - pkg/cli/newbuild - - pkg/cli/observe - - pkg/cli/options - - pkg/cli/policy - - pkg/cli/policy/cani - - pkg/cli/process - - pkg/cli/project - - pkg/cli/projects - - pkg/cli/recycle - - pkg/cli/registry - - pkg/cli/registry/info - - pkg/cli/registry/login - - pkg/cli/requestproject - - pkg/cli/rollback - - pkg/cli/rollout - - pkg/cli/rsh - - pkg/cli/rsync - - pkg/cli/rsync/fsnotification - - pkg/cli/secrets - - pkg/cli/serviceaccounts - - pkg/cli/set - - pkg/cli/startbuild - - pkg/cli/status - - pkg/cli/tag - - pkg/cli/version - - pkg/cli/whoami - - pkg/helpers/authorization - - pkg/helpers/build - - pkg/helpers/build/client/v1 - - pkg/helpers/bulk - - pkg/helpers/clientcmd - - pkg/helpers/cmd - - pkg/helpers/conditions - - pkg/helpers/describe - - pkg/helpers/dot - - pkg/helpers/env - - pkg/helpers/errors - - pkg/helpers/file - - pkg/helpers/flagtypes - - pkg/helpers/graph/appsgraph - - pkg/helpers/graph/appsgraph/analysis - - pkg/helpers/graph/appsgraph/nodes - - pkg/helpers/graph/buildgraph - - pkg/helpers/graph/buildgraph/analysis - - pkg/helpers/graph/buildgraph/nodes - - pkg/helpers/graph/genericgraph - - pkg/helpers/graph/genericgraph/graphview - - pkg/helpers/graph/imagegraph - - pkg/helpers/graph/imagegraph/nodes - - pkg/helpers/graph/kubegraph - - pkg/helpers/graph/kubegraph/analysis - - pkg/helpers/graph/kubegraph/nodes - - pkg/helpers/graph/routegraph - - pkg/helpers/graph/routegraph/analysis - - pkg/helpers/graph/routegraph/nodes - - pkg/helpers/groupsync - - pkg/helpers/groupsync/ad - - pkg/helpers/groupsync/groupdetector - - pkg/helpers/groupsync/interfaces - - pkg/helpers/groupsync/ldap - - pkg/helpers/groupsync/rfc2307 - - pkg/helpers/groupsync/syncerror - - pkg/helpers/image - - pkg/helpers/image/dockerlayer - - pkg/helpers/image/dockerlayer/add - - pkg/helpers/kubeconfig - - pkg/helpers/legacy - - pkg/helpers/newapp - - pkg/helpers/newapp/app - - pkg/helpers/newapp/cmd - - pkg/helpers/newapp/docker - - pkg/helpers/newapp/docker/dockerfile - - pkg/helpers/newapp/dockerfile - - pkg/helpers/newapp/jenkinsfile - - pkg/helpers/newapp/portutils - - pkg/helpers/newapp/source - - pkg/helpers/originkubeconfignames - - pkg/helpers/originpolymorphichelpers - - pkg/helpers/originpolymorphichelpers/deploymentconfigs - - pkg/helpers/parallel - - pkg/helpers/proc - - pkg/helpers/project - - pkg/helpers/quota - - pkg/helpers/route - - pkg/helpers/route/generator - - pkg/helpers/template/templateprocessorclient - - pkg/helpers/term - - pkg/helpers/tokencmd - - pkg/version -- name: github.com/openshift/source-to-image - version: 2a579ecd66dfaf9ee21bbc860fcde8e4d1d12301 - subpackages: - - pkg/api - - pkg/api/constants - - pkg/errors - - pkg/scm/git - - pkg/tar - - pkg/util - - pkg/util/cmd - - pkg/util/cygpath - - pkg/util/fs - - pkg/util/log - - pkg/util/user - name: github.com/pborman/uuid version: ca53cad383cad2479bbba7f7a1a05797ec1386e4 - name: github.com/pelletier/go-toml @@ -1424,7 +1175,9 @@ imports: - name: github.com/spf13/cast version: e31f36ffc91a2ba9ddb72a4b6a607ff9b3d3cb63 - name: github.com/spf13/cobra - version: b80588d523ec50c7fee20218426cf2ff70920f06 + version: c439c4fa093711d42e1b01acb1235b52004753c1 + subpackages: + - doc - name: github.com/spf13/jwalterweatherman version: 33c24e77fb80341fe7130ee7c594256ff08ccc46 - name: github.com/spf13/pflag @@ -1532,7 +1285,6 @@ imports: subpackages: - bcrypt - blowfish - - cast5 - cryptobyte - cryptobyte/asn1 - curve25519 @@ -1542,12 +1294,6 @@ imports: - internal/subtle - nacl/secretbox - ocsp - - openpgp - - openpgp/armor - - openpgp/elgamal - - openpgp/errors - - openpgp/packet - - openpgp/s2k - pkcs12 - pkcs12/internal/rc2 - poly1305 @@ -1557,7 +1303,6 @@ imports: - name: golang.org/x/net version: 65e2d4e15006aab9813ff8769e768bbf4bb667a0 subpackages: - - bpf - context - context/ctxhttp - html @@ -1567,14 +1312,10 @@ imports: - http2 - http2/hpack - idna - - internal/iana - internal/nettest - - internal/socket - internal/socks - internal/sockstest - internal/timeseries - - ipv4 - - ipv6 - proxy - trace - websocket @@ -1697,6 +1438,7 @@ imports: - credentials - encoding - encoding/proto + - grpclb/grpc_lb_v1/messages - grpclog - health - health/grpc_health_v1 @@ -1740,7 +1482,7 @@ imports: - name: gopkg.in/yaml.v2 version: 5420a8b6744d3b0345ab293f6fcba19c978f1183 - name: k8s.io/api - version: 5b6d4ec96213966713b65f5d4aa1ee1b765f2c7c + version: ffaba947a5e96ed8955a635a8bf105089e311cbc repo: https://github.com/openshift/kubernetes-api.git subpackages: - admission/v1beta1 @@ -1782,7 +1524,7 @@ imports: - storage/v1alpha1 - storage/v1beta1 - name: k8s.io/apiextensions-apiserver - version: ef1fb026cb0ebf50e765bba38c951dc095f7fddf + version: e58a314b37491f6f889b761cd779b560f02e062b repo: https://github.com/openshift/kubernetes-apiextensions-apiserver.git subpackages: - pkg/apihelpers @@ -2354,7 +2096,7 @@ imports: subpackages: - config/v1beta1 - name: k8s.io/kubernetes - version: faf0538d61897caa1a1b8aea789d8def9d585e74 + version: 379111049d2c950d6c3ea70c6d87930ca48124cc repo: https://github.com/openshift/kubernetes.git subpackages: - cmd/cloud-controller-manager/app diff --git a/glide.yaml b/glide.yaml index 2392e8fbc4be..af27394d964a 100644 --- a/glide.yaml +++ b/glide.yaml @@ -94,8 +94,6 @@ import: version: master - package: github.com/openshift/apiserver-library-go version: master -- package: github.com/openshift/oc - version: master # forks third # master diff --git a/hack/build-cross.sh b/hack/build-cross.sh index b63e2a1884b2..7c1a51a9017b 100755 --- a/hack/build-cross.sh +++ b/hack/build-cross.sh @@ -15,8 +15,6 @@ platforms=( image_platforms=( ) test_platforms=( "${host_platform}" ) -targets=( "${OS_CROSS_COMPILE_TARGETS[@]}" ) - # Special case ppc64le if [[ "${host_platform}" == "linux/ppc64le" ]]; then platforms+=( "linux/ppc64le" ) @@ -73,14 +71,8 @@ os::build::build_binaries "${OS_IMAGE_COMPILE_TARGETS_LINUX[@]-}" # Build the primary client/server for all platforms OS_BUILD_PLATFORMS=("${platforms[@]+"${platforms[@]}"}") -os::build::build_binaries "${OS_CROSS_COMPILE_TARGETS[@]}" if [[ "${OS_BUILD_RELEASE_ARCHIVES-}" != "n" ]]; then - # Make the primary client/server release. - OS_BUILD_PLATFORMS=("${platforms[@]+"${platforms[@]}"}") - OS_RELEASE_ARCHIVE="openshift-origin" \ - os::build::place_bins "${OS_CROSS_COMPILE_BINARIES[@]}" - # Make the image binaries release. OS_BUILD_PLATFORMS=("${image_platforms[@]+"${image_platforms[@]}"}") OS_RELEASE_ARCHIVE="openshift-origin-image" \ @@ -89,8 +81,6 @@ if [[ "${OS_BUILD_RELEASE_ARCHIVES-}" != "n" ]]; then os::build::release_sha else # Place binaries only - OS_BUILD_PLATFORMS=("${platforms[@]+"${platforms[@]}"}") - os::build::place_bins "${OS_CROSS_COMPILE_BINARIES[@]}" OS_BUILD_PLATFORMS=("${image_platforms[@]+"${image_platforms[@]}"}") os::build::place_bins "${OS_IMAGE_COMPILE_BINARIES[@]}" fi diff --git a/hack/build-go.sh b/hack/build-go.sh index fc7366c3879c..7f9398b47f52 100755 --- a/hack/build-go.sh +++ b/hack/build-go.sh @@ -15,9 +15,9 @@ platform="$(os::build::host_platform)" build_targets=("$@") if [[ -z "$@" ]]; then if [[ "${platform}" == linux/* ]]; then - build_targets=("${OS_CROSS_COMPILE_TARGETS[@]}" vendor/k8s.io/kubernetes/cmd/hyperkube) + build_targets=(vendor/k8s.io/kubernetes/cmd/hyperkube) else - build_targets=("${OS_CROSS_COMPILE_TARGETS[@]}" vendor/k8s.io/kubernetes/cmd/hyperkube) + build_targets=(vendor/k8s.io/kubernetes/cmd/hyperkube) fi fi diff --git a/hack/import-restrictions.json b/hack/import-restrictions.json index 1a1f16d33816..a1e6df34db6a 100644 --- a/hack/import-restrictions.json +++ b/hack/import-restrictions.json @@ -1,18 +1,4 @@ [ - { - "checkedPackageRoots": [ - "github.com/openshift/origin/pkg", - "github.com/openshift/origin/test" - ], - "ignoredSubTrees": [], - "forbiddenImportPackageRoots": [ - "vendor/github.com/openshift/oc" - ], - "allowedImportPackageRoots": [ - "vendor" - ] - }, - { "checkedPackageRoots": [ "github.com/openshift/origin" @@ -21,5 +7,4 @@ "k8s.io/kubernetes/staging" ] } - ] diff --git a/hack/lib/build/archive.sh b/hack/lib/build/archive.sh index 8d36036539bd..d0e40c57294a 100644 --- a/hack/lib/build/archive.sh +++ b/hack/lib/build/archive.sh @@ -125,12 +125,6 @@ function os::build::archive::detect_local_release_tars() { [[ -z "${WARN-}" ]] && return 2 fi - local client=$(find ${OS_OUTPUT_RELEASEPATH} -maxdepth 1 -type f -name openshift-origin-client-tools-*-${platform}* \( -name *.tar.gz -or -name *.zip \)) - if [[ $(echo "${client}" | wc -l) -ne 1 || -z "${client}" ]]; then - echo "There should be exactly one ${platform} client tar in $OS_OUTPUT_RELEASEPATH" - [[ -n "${WARN-}" ]] || return 2 - fi - local image=$(find ${OS_OUTPUT_RELEASEPATH} -maxdepth 1 -type f -name openshift-origin-image*-${platform}* \( -name *.tar.gz -or -name *.zip \)) if [[ $(echo "${image}" | wc -l) -ne 1 || -z "${image}" ]]; then echo "There should be exactly one ${platform} image tar in $OS_OUTPUT_RELEASEPATH" @@ -139,7 +133,6 @@ function os::build::archive::detect_local_release_tars() { export OS_PRIMARY_RELEASE_TAR="${primary}" export OS_IMAGE_RELEASE_TAR="${image}" - export OS_CLIENT_RELEASE_TAR="${client}" export OS_RELEASE_COMMIT="$(cat ${OS_OUTPUT_RELEASEPATH}/.commit)" } -readonly -f os::build::archive::detect_local_release_tars \ No newline at end of file +readonly -f os::build::archive::detect_local_release_tars diff --git a/hack/lib/build/binaries.sh b/hack/lib/build/binaries.sh index 9ccf34db21f1..e2ad15a06a1c 100644 --- a/hack/lib/build/binaries.sh +++ b/hack/lib/build/binaries.sh @@ -321,14 +321,6 @@ function os::build::place_bins() { if [[ $platform == "windows/amd64" ]]; then suffix=".exe" fi - if [[ "${OS_RELEASE_WITHOUT_LINKS-}" == "" ]]; then - for linkname in "${OC_BINARY_COPY[@]}"; do - local src="${OS_OUTPUT_BINPATH}/${platform}/oc${suffix}" - if [[ -f "${src}" ]]; then - ln -f "$src" "${OS_OUTPUT_BINPATH}/${platform}/${linkname}${suffix}" - fi - done - fi # If no release archive was requested, we're done. if [[ "${OS_RELEASE_ARCHIVE-}" == "" ]]; then @@ -341,37 +333,19 @@ function os::build::place_bins() { cp "${OS_OUTPUT_BINPATH}/${platform}/${binary}" "${release_binpath}/" done - # Create binary copies where specified. - for linkname in "${OC_BINARY_COPY[@]}"; do - local src="${release_binpath}/oc${suffix}" - if [[ -f "${src}" ]]; then - cp -f "${release_binpath}/oc${suffix}" "${release_binpath}/${linkname}${suffix}" - fi - done - # Create the release archive. platform="$( os::build::host_platform_friendly "${platform}" )" if [[ ${OS_RELEASE_ARCHIVE} == "openshift-origin" ]]; then for file in "${OS_BINARY_RELEASE_CLIENT_EXTRA[@]}"; do cp "${file}" "${release_binpath}/" done - if [[ $platform == "windows" ]]; then - OS_RELEASE_ARCHIVE="openshift-origin-client-tools" os::build::archive::zip "${OS_BINARY_RELEASE_CLIENT_WINDOWS[@]}" - elif [[ $platform == "mac" ]]; then - OS_RELEASE_ARCHIVE="openshift-origin-client-tools" os::build::archive::zip "${OS_BINARY_RELEASE_CLIENT_MAC[@]}" - elif [[ $platform == "linux-32bit" ]]; then - OS_RELEASE_ARCHIVE="openshift-origin-client-tools" os::build::archive::tar "${OS_BINARY_RELEASE_CLIENT_LINUX[@]}" - elif [[ $platform == "linux-64bit" ]]; then - OS_RELEASE_ARCHIVE="openshift-origin-client-tools" os::build::archive::tar "${OS_BINARY_RELEASE_CLIENT_LINUX[@]}" + if [[ $platform == "linux-64bit" ]]; then OS_RELEASE_ARCHIVE="openshift-origin-server" os::build::archive::tar "${OS_BINARY_RELEASE_SERVER_LINUX[@]}" elif [[ $platform == "linux-powerpc64" ]]; then - OS_RELEASE_ARCHIVE="openshift-origin-client-tools" os::build::archive::tar "${OS_BINARY_RELEASE_CLIENT_LINUX[@]}" OS_RELEASE_ARCHIVE="openshift-origin-server" os::build::archive::tar "${OS_BINARY_RELEASE_SERVER_LINUX[@]}" elif [[ $platform == "linux-arm64" ]]; then - OS_RELEASE_ARCHIVE="openshift-origin-client-tools" os::build::archive::tar "${OS_BINARY_RELEASE_CLIENT_LINUX[@]}" OS_RELEASE_ARCHIVE="openshift-origin-server" os::build::archive::tar "${OS_BINARY_RELEASE_SERVER_LINUX[@]}" elif [[ $platform == "linux-s390" ]]; then - OS_RELEASE_ARCHIVE="openshift-origin-client-tools" os::build::archive::tar "${OS_BINARY_RELEASE_CLIENT_LINUX[@]}" OS_RELEASE_ARCHIVE="openshift-origin-server" os::build::archive::tar "${OS_BINARY_RELEASE_SERVER_LINUX[@]}" else echo "++ ERROR: No release type defined for $platform" @@ -402,25 +376,6 @@ readonly -f os::build::release_sha # binary in _output/local/bin/${platform} function os::build::make_openshift_binary_symlinks() { platform=$(os::build::host_platform) - if [[ -f "${OS_OUTPUT_BINPATH}/${platform}/openshift" ]]; then - if (( ${#OPENSHIFT_BINARY_SYMLINKS[@]} )); then - for linkname in "${OPENSHIFT_BINARY_SYMLINKS[@]##*/}"; do - ln -sf openshift "${OS_OUTPUT_BINPATH}/${platform}/${linkname}" - done - fi - fi - if [[ -f "${OS_OUTPUT_BINPATH}/${platform}/oc" ]]; then - if (( ${#OC_BINARY_SYMLINKS[@]} )); then - for linkname in "${OC_BINARY_SYMLINKS[@]##*/}"; do - ln -sf oc "${OS_OUTPUT_BINPATH}/${platform}/${linkname}" - done - fi - if (( ${#OC_BINARY_COPY[@]} )); then - for linkname in "${OC_BINARY_COPY[@]##*/}"; do - ln -sf oc "${OS_OUTPUT_BINPATH}/${platform}/${linkname}" - done - fi - fi } readonly -f os::build::make_openshift_binary_symlinks diff --git a/hack/lib/constants.sh b/hack/lib/constants.sh index 3b838d84fdfa..f69320332f15 100755 --- a/hack/lib/constants.sh +++ b/hack/lib/constants.sh @@ -35,40 +35,10 @@ readonly OS_SCRATCH_IMAGE_COMPILE_TARGETS_LINUX=( ) readonly OS_IMAGE_COMPILE_BINARIES=("${OS_SCRATCH_IMAGE_COMPILE_TARGETS_LINUX[@]##*/}" "${OS_IMAGE_COMPILE_TARGETS_LINUX[@]##*/}") -readonly OS_CROSS_COMPILE_TARGETS=( - vendor/github.com/openshift/oc/cmd/oc -) -readonly OS_CROSS_COMPILE_BINARIES=("${OS_CROSS_COMPILE_TARGETS[@]##*/}") - readonly OS_GOVET_BLACKLIST=( ) #If you update this list, be sure to get the images/origin/Dockerfile -readonly OPENSHIFT_BINARY_SYMLINKS=( -) -readonly OC_BINARY_SYMLINKS=( -) -readonly OC_BINARY_COPY=( - kubectl -) -readonly OS_BINARY_RELEASE_CLIENT_WINDOWS=( - oc.exe - kubectl.exe - README.md - ./LICENSE -) -readonly OS_BINARY_RELEASE_CLIENT_MAC=( - oc - kubectl - README.md - ./LICENSE -) -readonly OS_BINARY_RELEASE_CLIENT_LINUX=( - ./oc - ./kubectl - ./README.md - ./LICENSE -) readonly OS_BINARY_RELEASE_SERVER_LINUX=( './*' ) @@ -103,9 +73,6 @@ function os::build::ldflags() { "-w" ) - ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/pkg/oc/clusterup.defaultImageStreams" "${OS_BUILD_LDFLAGS_DEFAULT_IMAGE_STREAMS}")) - ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/pkg/cmd/util/variable.DefaultImagePrefix" "${OS_BUILD_LDFLAGS_IMAGE_PREFIX}")) - ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/pkg/version.majorFromGit" "${OS_GIT_MAJOR}")) ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/pkg/version.minorFromGit" "${OS_GIT_MINOR}")) ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/pkg/version.versionFromGit" "${OS_GIT_VERSION}")) @@ -113,27 +80,6 @@ function os::build::ldflags() { ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/pkg/version.gitTreeState" "${OS_GIT_TREE_STATE}")) ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/pkg/version.buildDate" "${buildDate}")) - ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/vendor/github.com/openshift/oc/pkg/version.majorFromGit" "${OS_GIT_MAJOR}")) - ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/vendor/github.com/openshift/oc/pkg/version.minorFromGit" "${OS_GIT_MINOR}")) - ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/vendor/github.com/openshift/oc/pkg/version.versionFromGit" "${OS_GIT_VERSION}")) - ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/vendor/github.com/openshift/oc/pkg/version.commitFromGit" "${OS_GIT_COMMIT}")) - ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/vendor/github.com/openshift/oc/pkg/version.gitTreeState" "${OS_GIT_TREE_STATE}")) - ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/vendor/github.com/openshift/oc/pkg/version.buildDate" "${buildDate}")) - - ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/vendor/github.com/openshift/openshift-apiserver/pkg/version.majorFromGit" "${OS_GIT_MAJOR}")) - ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/vendor/github.com/openshift/openshift-apiserver/pkg/version.minorFromGit" "${OS_GIT_MINOR}")) - ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/vendor/github.com/openshift/openshift-apiserver/pkg/version.versionFromGit" "${OS_GIT_VERSION}")) - ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/vendor/github.com/openshift/openshift-apiserver/pkg/version.commitFromGit" "${OS_GIT_COMMIT}")) - ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/vendor/github.com/openshift/openshift-apiserver/pkg/version.gitTreeState" "${OS_GIT_TREE_STATE}")) - ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/vendor/github.com/openshift/openshift-apiserver/pkg/version.buildDate" "${buildDate}")) - - ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/vendor/github.com/openshift/openshift-controller-manager/pkg/version.majorFromGit" "${OS_GIT_MAJOR}")) - ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/vendor/github.com/openshift/openshift-controller-manager/pkg/version.minorFromGit" "${OS_GIT_MINOR}")) - ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/vendor/github.com/openshift/openshift-controller-manager/pkg/version.versionFromGit" "${OS_GIT_VERSION}")) - ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/vendor/github.com/openshift/openshift-controller-manager/pkg/version.commitFromGit" "${OS_GIT_COMMIT}")) - ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/vendor/github.com/openshift/openshift-controller-manager/pkg/version.gitTreeState" "${OS_GIT_TREE_STATE}")) - ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/vendor/github.com/openshift/openshift-controller-manager/pkg/version.buildDate" "${buildDate}")) - ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/vendor/k8s.io/kubernetes/pkg/version.gitMajor" "${KUBE_GIT_MAJOR}")) ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/vendor/k8s.io/kubernetes/pkg/version.gitMinor" "${KUBE_GIT_MINOR}")) ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/vendor/k8s.io/kubernetes/pkg/version.gitCommit" "${OS_GIT_COMMIT}")) @@ -337,14 +283,6 @@ function os::build::check_binaries() { fi fi - # enforce that certain binaries don't accidentally grow too large - # IMPORTANT: contact Clayton or another master team member before altering this code - if [[ -f "${OS_OUTPUT_BINPATH}/${platform}/oc" ]]; then - size=$($duexe --apparent-size -m "${OS_OUTPUT_BINPATH}/${platform}/oc" | cut -f 1) - if [[ "${size}" -gt "118" ]]; then - os::log::fatal "oc binary has grown substantially to ${size}. You must have approval before bumping this limit." - fi - fi if [[ -f "${OS_OUTPUT_BINPATH}/${platform}/pod" ]]; then size=$($duexe --apparent-size -m "${OS_OUTPUT_BINPATH}/${platform}/pod" | cut -f 1) if [[ "${size}" -gt "2" ]]; then diff --git a/origin.spec b/origin.spec index 2e28cbcfd264..09f6ac437b83 100644 --- a/origin.spec +++ b/origin.spec @@ -77,7 +77,6 @@ BuildRequires: bsdtar BuildRequires: golang >= %{golang_version} BuildRequires: krb5-devel BuildRequires: rsync -Requires: %{name}-clients = %{version}-%{release} # # The following Bundled Provides entries are populated automatically by the @@ -110,26 +109,6 @@ Provides: atomic-openshift-node %description hyperkube %{summary} -%package clients -Summary: %{product_name} Client binaries for Linux -Provides: atomic-openshift-clients -Obsoletes: atomic-openshift-clients -Requires: bash-completion - -%description clients -%{summary} - -%if 0%{?make_redistributable} -%package clients-redistributable -Summary: %{product_name} Client binaries for Linux, Mac OSX, and Windows -Provides: atomic-openshift-clients-redistributable -Obsoletes: atomic-openshift-clients-redistributable -BuildRequires: goversioninfo - -%description clients-redistributable -%{summary} -%endif - %prep %if 0%{do_prep} %setup -q @@ -168,62 +147,15 @@ PLATFORM="$(go env GOHOSTOS)/$(go env GOHOSTARCH)" install -d %{buildroot}%{_bindir} # Install linux components -for bin in oc hyperkube +for bin in hyperkube do echo "+++ INSTALLING ${bin}" install -p -m 755 _output/local/bin/${PLATFORM}/${bin} %{buildroot}%{_bindir}/${bin} done -%if 0%{?make_redistributable} -# Install client executable for windows and mac -install -d %{buildroot}%{_datadir}/%{name}/{linux,macosx,windows} -install -p -m 755 _output/local/bin/linux/amd64/oc %{buildroot}%{_datadir}/%{name}/linux/oc -install -p -m 755 _output/local/bin/linux/amd64/kubectl %{buildroot}%{_datadir}/%{name}/linux/kubectl -install -p -m 755 _output/local/bin/darwin/amd64/oc %{buildroot}/%{_datadir}/%{name}/macosx/oc -install -p -m 755 _output/local/bin/darwin/amd64/kubectl %{buildroot}/%{_datadir}/%{name}/macosx/kubectl -install -p -m 755 _output/local/bin/windows/amd64/oc.exe %{buildroot}/%{_datadir}/%{name}/windows/oc.exe -install -p -m 755 _output/local/bin/windows/amd64/kubectl.exe %{buildroot}/%{_datadir}/%{name}/windows/kubectl.exe -%endif - -for cmd in \ - kubectl -do - ln -s oc %{buildroot}%{_bindir}/$cmd -done - -# Install bash completions -install -d -m 755 %{buildroot}%{_sysconfdir}/bash_completion.d/ -for bin in oc kubectl -do - echo "+++ INSTALLING BASH COMPLETIONS FOR ${bin} " - %{buildroot}%{_bindir}/${bin} completion bash > %{buildroot}%{_sysconfdir}/bash_completion.d/${bin} - chmod 644 %{buildroot}%{_sysconfdir}/bash_completion.d/${bin} -done - %files hyperkube %license LICENSE %{_bindir}/hyperkube %defattr(-,root,root,0700) -%files clients -%license LICENSE -%{_bindir}/oc -%{_bindir}/kubectl -%{_sysconfdir}/bash_completion.d/oc -%{_sysconfdir}/bash_completion.d/kubectl - -%if 0%{?make_redistributable} -%files clients-redistributable -%license LICENSE -%dir %{_datadir}/%{name}/linux/ -%dir %{_datadir}/%{name}/macosx/ -%dir %{_datadir}/%{name}/windows/ -%{_datadir}/%{name}/linux/oc -%{_datadir}/%{name}/linux/kubectl -%{_datadir}/%{name}/macosx/oc -%{_datadir}/%{name}/macosx/kubectl -%{_datadir}/%{name}/windows/oc.exe -%{_datadir}/%{name}/windows/kubectl.exe -%endif - %changelog diff --git a/vendor/github.com/MakeNowJust/heredoc/heredoc.go b/vendor/github.com/MakeNowJust/heredoc/heredoc.go index 63ce72adc413..fea12e622f12 100644 --- a/vendor/github.com/MakeNowJust/heredoc/heredoc.go +++ b/vendor/github.com/MakeNowJust/heredoc/heredoc.go @@ -33,7 +33,7 @@ const maxInt = int(^uint(0) >> 1) // Doc returns un-indented string as here-document. func Doc(raw string) string { skipFirstLine := false - if len(raw) > 0 && raw[0] == '\n' { + if raw[0] == '\n' { raw = raw[1:] } else { skipFirstLine = true diff --git a/vendor/github.com/MakeNowJust/heredoc/heredoc_test.go b/vendor/github.com/MakeNowJust/heredoc/heredoc_test.go index 290805b1447d..3b8d5eee9149 100644 --- a/vendor/github.com/MakeNowJust/heredoc/heredoc_test.go +++ b/vendor/github.com/MakeNowJust/heredoc/heredoc_test.go @@ -13,7 +13,6 @@ type testCase struct { } var tests = []testCase{ - {"", ""}, {` Foo Bar diff --git a/vendor/github.com/alexbrainman/sspi/LICENSE b/vendor/github.com/alexbrainman/sspi/LICENSE deleted file mode 100644 index 74487567632c..000000000000 --- a/vendor/github.com/alexbrainman/sspi/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/alexbrainman/sspi/README.md b/vendor/github.com/alexbrainman/sspi/README.md deleted file mode 100644 index 848b6b586bbd..000000000000 --- a/vendor/github.com/alexbrainman/sspi/README.md +++ /dev/null @@ -1 +0,0 @@ -This repository holds Go packages for accessing Security Support Provider Interface on Windows. diff --git a/vendor/github.com/alexbrainman/sspi/buffer.go b/vendor/github.com/alexbrainman/sspi/buffer.go deleted file mode 100644 index f4b0ef326da0..000000000000 --- a/vendor/github.com/alexbrainman/sspi/buffer.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package sspi - -import ( - "io" - "unsafe" -) - -func (b *SecBuffer) Set(buftype uint32, data []byte) { - b.BufferType = buftype - if len(data) > 0 { - b.Buffer = &data[0] - b.BufferSize = uint32(len(data)) - } else { - b.Buffer = nil - b.BufferSize = 0 - } -} - -func (b *SecBuffer) Free() error { - if b.Buffer == nil { - return nil - } - return FreeContextBuffer((*byte)(unsafe.Pointer(b.Buffer))) -} - -func (b *SecBuffer) Bytes() []byte { - if b.Buffer == nil || b.BufferSize <= 0 { - return nil - } - return (*[2 << 20]byte)(unsafe.Pointer(b.Buffer))[:b.BufferSize] -} - -func (b *SecBuffer) WriteAll(w io.Writer) (int, error) { - if b.BufferSize == 0 || b.Buffer == nil { - return 0, nil - } - data := b.Bytes() - total := 0 - for { - n, err := w.Write(data) - total += n - if err != nil { - return total, err - } - if n >= len(data) { - break - } - data = data[n:] - } - return total, nil -} diff --git a/vendor/github.com/alexbrainman/sspi/flags_test.go b/vendor/github.com/alexbrainman/sspi/flags_test.go deleted file mode 100644 index 5d27c53d2915..000000000000 --- a/vendor/github.com/alexbrainman/sspi/flags_test.go +++ /dev/null @@ -1,133 +0,0 @@ -package sspi - -import ( - "strconv" - "testing" -) - -func Test_verifySelectiveFlags(t *testing.T) { - type args struct { - flags uint32 - establishedFlags uint32 - } - tests := []struct { - name string - args args - wantValid bool - wantMissing uint32 - wantExtra uint32 - }{ - { - name: "all zeros", - args: args{ - flags: binary("00000"), - establishedFlags: binary("00000"), - }, - wantValid: true, - wantMissing: binary("00000"), - wantExtra: binary("00000"), - }, - { - name: "all ones", - args: args{ - flags: binary("11111"), - establishedFlags: binary("11111"), - }, - wantValid: true, - wantMissing: binary("00000"), - wantExtra: binary("00000"), - }, - { - name: "missing one bit", - args: args{ - flags: binary("11111"), - establishedFlags: binary("11011"), - }, - wantValid: false, - wantMissing: binary("00100"), - wantExtra: binary("00000"), - }, - { - name: "missing two bits", - args: args{ - flags: binary("11111"), - establishedFlags: binary("01011"), - }, - wantValid: false, - wantMissing: binary("10100"), - wantExtra: binary("00000"), - }, - { - name: "missing all bits", - args: args{ - flags: binary("11101"), - establishedFlags: binary("00000"), - }, - wantValid: false, - wantMissing: binary("11101"), - wantExtra: binary("00000"), - }, - { - name: "one extra bit", - args: args{ - flags: binary("00111"), - establishedFlags: binary("01111"), - }, - wantValid: true, - wantMissing: binary("00000"), - wantExtra: binary("01000"), - }, - { - name: "two extra bits", - args: args{ - flags: binary("01000"), - establishedFlags: binary("11001"), - }, - wantValid: true, - wantMissing: binary("00000"), - wantExtra: binary("10001"), - }, - { - name: "all extra bits", - args: args{ - flags: binary("00000"), - establishedFlags: binary("11111"), - }, - wantValid: true, - wantMissing: binary("00000"), - wantExtra: binary("11111"), - }, - { - name: "missing and extra bits", - args: args{ - flags: binary("00101"), - establishedFlags: binary("11001"), - }, - wantValid: false, - wantMissing: binary("00100"), - wantExtra: binary("11000"), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - gotValid, gotMissing, gotExtra := verifySelectiveFlags(tt.args.flags, tt.args.establishedFlags) - if gotValid != tt.wantValid { - t.Errorf("verifySelectiveFlags() gotValid = %v, want %v", gotValid, tt.wantValid) - } - if gotMissing != tt.wantMissing { - t.Errorf("verifySelectiveFlags() gotMissing = %v, want %v", gotMissing, tt.wantMissing) - } - if gotExtra != tt.wantExtra { - t.Errorf("verifySelectiveFlags() gotExtra = %v, want %v", gotExtra, tt.wantExtra) - } - }) - } -} - -func binary(b string) uint32 { - n, err := strconv.ParseUint(b, 2, 32) - if err != nil { - panic(err) // programmer error due to invalid test data - } - return uint32(n) -} diff --git a/vendor/github.com/alexbrainman/sspi/mksyscall.go b/vendor/github.com/alexbrainman/sspi/mksyscall.go deleted file mode 100644 index 19e119598422..000000000000 --- a/vendor/github.com/alexbrainman/sspi/mksyscall.go +++ /dev/null @@ -1,7 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package sspi - -//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -systemdll=false -output=zsyscall_windows.go syscall.go diff --git a/vendor/github.com/alexbrainman/sspi/negotiate/http_test.go b/vendor/github.com/alexbrainman/sspi/negotiate/http_test.go deleted file mode 100644 index bfd81875744c..000000000000 --- a/vendor/github.com/alexbrainman/sspi/negotiate/http_test.go +++ /dev/null @@ -1,283 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package negotiate_test - -import ( - "encoding/base64" - "flag" - "fmt" - "io/ioutil" - "net" - "net/http" - "net/http/httptest" - "net/url" - "strings" - "testing" - "time" - - "github.com/alexbrainman/sspi/negotiate" -) - -var ( - testURL = flag.String("url", "", "server URL for TestNegotiateHTTPClient") -) - -// TODO: perhaps add Transport that is similar to http.Transport -// TODO: perhaps implement separate NTLMTransport and KerberosTransport (not sure about this idea) -// TODO: KerberosTransport is (I beleive) sinlge leg protocol, so it can be implemented easily (unlike NTLM) -// TODO: perhaps implement both server and client Transport - -type httpClient struct { - client *http.Client - transport *http.Transport - url string -} - -func newHTTPClient(url string) *httpClient { - transport := &http.Transport{ - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - } - return &httpClient{ - client: &http.Client{Transport: transport}, - transport: transport, - url: url, - } -} - -func (c *httpClient) CloseIdleConnections() { - c.transport.CloseIdleConnections() -} - -func (c *httpClient) get(req *http.Request) (*http.Response, string, error) { - res, err := c.client.Do(req) - if err != nil { - return nil, "", err - } - defer res.Body.Close() - - body, err := ioutil.ReadAll(res.Body) - if err != nil { - return nil, "", err - } - return res, string(body), nil -} - -func (c *httpClient) canDoNegotiate() error { - req, err := http.NewRequest("GET", c.url, nil) - if err != nil { - return err - } - res, _, err := c.get(req) - if err != nil { - return err - } - if res.StatusCode != http.StatusUnauthorized { - return fmt.Errorf("Unauthorized expected, but got %v", res.StatusCode) - } - authHeaders, found := res.Header["Www-Authenticate"] - if !found { - return fmt.Errorf("Www-Authenticate not found") - } - for _, h := range authHeaders { - if h == "Negotiate" { - return nil - } - } - return fmt.Errorf("Www-Authenticate header does not contain Negotiate, but has %v", authHeaders) -} - -func findAuthHeader(res *http.Response) ([]byte, error) { - authHeaders, found := res.Header["Www-Authenticate"] - if !found { - return nil, fmt.Errorf("Www-Authenticate not found") - } - if len(authHeaders) != 1 { - return nil, fmt.Errorf("Only one Www-Authenticate header expected, but %d found: %v", len(authHeaders), authHeaders) - } - if len(authHeaders[0]) < 10 { - return nil, fmt.Errorf("Www-Authenticate header is to short: %q", authHeaders[0]) - } - if !strings.HasPrefix(authHeaders[0], "Negotiate ") { - return nil, fmt.Errorf("Www-Authenticate header is suppose to starts with \"Negotiate \", but is %q", authHeaders[0]) - } - token, err := base64.StdEncoding.DecodeString(authHeaders[0][10:]) - if err != nil { - return nil, err - } - return token, nil -} - -func (c *httpClient) startAuthorization(inputToken []byte) ([]byte, error) { - req, err := http.NewRequest("GET", c.url, nil) - if err != nil { - return nil, err - } - req.Header.Set("Authorization", "Negotiate "+base64.StdEncoding.EncodeToString(inputToken)) - res, _, err := c.get(req) - if err != nil { - return nil, err - } - if res.StatusCode != http.StatusUnauthorized { - return nil, fmt.Errorf("Unauthorized expected, but got %v", res.StatusCode) - } - outputToken, err := findAuthHeader(res) - if err != nil { - return nil, err - } - return outputToken, nil -} - -func (c *httpClient) completeAuthorization(inputToken []byte) (*http.Response, string, error) { - req, err := http.NewRequest("GET", c.url, nil) - if err != nil { - return nil, "", err - } - req.Header.Set("Authorization", "Negotiate "+base64.StdEncoding.EncodeToString(inputToken)) - res, body, err := c.get(req) - if err != nil { - return nil, "", err - } - if res.StatusCode != http.StatusOK { - return nil, "", fmt.Errorf("OK expected, but got %v", res.StatusCode) - } - return res, body, nil -} - -func TestNTLMHTTPClient(t *testing.T) { - // TODO: combine client and server tests so we don't need external server - if len(*testURL) == 0 { - t.Skip("Skipping due to empty \"url\" parameter") - } - - cred, err := negotiate.AcquireCurrentUserCredentials() - if err != nil { - t.Fatal(err) - } - defer cred.Release() - - secctx, clientToken1, err := negotiate.NewClientContext(cred, "") - if err != nil { - t.Fatal(err) - } - defer secctx.Release() - - client := newHTTPClient(*testURL) - defer client.CloseIdleConnections() - - err = client.canDoNegotiate() - if err != nil { - t.Fatal(err) - } - serverToken1, err := client.startAuthorization(clientToken1) - if err != nil { - t.Fatal(err) - } - authCompleted, clientToken2, err := secctx.Update(serverToken1) - if err != nil { - t.Fatal(err) - } - if len(clientToken2) == 0 { - t.Fatal("secctx.Update returns empty token for the peer, but our authentication is not done yet") - } - res, _, err := client.completeAuthorization(clientToken2) - if err != nil { - t.Fatal(err) - } - if authCompleted { - return - } - serverToken2, err := findAuthHeader(res) - if err != nil { - t.Fatal(err) - } - authCompleted, lastToken, err := secctx.Update(serverToken2) - if err != nil { - t.Fatal(err) - } - if !authCompleted { - t.Fatal("client authentication should be completed now") - } - if len(lastToken) > 0 { - t.Fatalf("last token supposed to be empty, but %v returned", lastToken) - } -} - -func TestKerberosHTTPClient(t *testing.T) { - // TODO: combine client and server tests so we don't need external server - if len(*testURL) == 0 { - t.Skip("Skipping due to empty \"url\" parameter") - } - - u, err := url.Parse(*testURL) - if err != nil { - t.Fatal(err) - } - targetName := "http/" + strings.ToUpper(u.Host) - - cred, err := negotiate.AcquireCurrentUserCredentials() - if err != nil { - t.Fatal(err) - } - defer cred.Release() - - secctx, token, err := negotiate.NewClientContext(cred, targetName) - if err != nil { - t.Fatal(err) - } - defer secctx.Release() - - client := newHTTPClient(*testURL) - defer client.CloseIdleConnections() - - err = client.canDoNegotiate() - if err != nil { - t.Fatal(err) - } - res, _, err := client.completeAuthorization(token) - if err != nil { - t.Fatal(err) - } - serverToken, err := findAuthHeader(res) - if err != nil { - t.Fatal(err) - } - authCompleted, lastToken, err := secctx.Update(serverToken) - if err != nil { - t.Fatal(err) - } - if !authCompleted { - t.Fatal("client authentication should be completed now") - } - if len(lastToken) > 0 { - t.Fatalf("last token supposed to be empty, but %v returned", lastToken) - } -} - -// TODO: See http://www.innovation.ch/personal/ronald/ntlm.html#connections about needed to keep connection alive during authentication. - -func TestNegotiateHTTPServer(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // TODO: implement Negotiate authentication here - w.Write([]byte("hello")) - })) - defer ts.Close() - - res, err := http.Get(ts.URL) - if err != nil { - t.Fatal(err) - } - got, err := ioutil.ReadAll(res.Body) - if err != nil { - t.Fatal(err) - } - if string(got) != "hello" { - t.Errorf("got %q, want hello", string(got)) - } -} diff --git a/vendor/github.com/alexbrainman/sspi/negotiate/negotiate.go b/vendor/github.com/alexbrainman/sspi/negotiate/negotiate.go deleted file mode 100644 index e04126087eee..000000000000 --- a/vendor/github.com/alexbrainman/sspi/negotiate/negotiate.go +++ /dev/null @@ -1,462 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -// Package negotiate provides access to the Microsoft Negotiate SSP Package. -// -package negotiate - -import ( - "errors" - "syscall" - "time" - "unsafe" - - "github.com/alexbrainman/sspi" -) - -// TODO: maybe (if possible) move all winapi related out of sspi and into sspi/internal/winapi - -// PackageInfo contains Negotiate SSP package description. -var PackageInfo *sspi.PackageInfo - -func init() { - var err error - PackageInfo, err = sspi.QueryPackageInfo(sspi.NEGOSSP_NAME) - if err != nil { - panic("failed to fetch Negotiate package info: " + err.Error()) - } -} - -func acquireCredentials(principalName string, creduse uint32, ai *sspi.SEC_WINNT_AUTH_IDENTITY) (*sspi.Credentials, error) { - c, err := sspi.AcquireCredentials(principalName, sspi.NEGOSSP_NAME, creduse, (*byte)(unsafe.Pointer(ai))) - if err != nil { - return nil, err - } - return c, nil -} - -// AcquireCurrentUserCredentials acquires credentials of currently -// logged on user. These will be used by the client to authenticate -// itself to the server. It will also be used by the server -// to impersonate the user. -func AcquireCurrentUserCredentials() (*sspi.Credentials, error) { - return acquireCredentials("", sspi.SECPKG_CRED_OUTBOUND, nil) -} - -// TODO: see if I can share this common ntlm and negotiate code - -// AcquireUserCredentials acquires credentials of user described by -// domain, username and password. These will be used by the client to -// authenticate itself to the server. It will also be used by the -// server to impersonate the user. -func AcquireUserCredentials(domain, username, password string) (*sspi.Credentials, error) { - if len(username) == 0 { - return nil, errors.New("username parameter cannot be empty") - } - d, err := syscall.UTF16FromString(domain) - if err != nil { - return nil, err - } - u, err := syscall.UTF16FromString(username) - if err != nil { - return nil, err - } - p, err := syscall.UTF16FromString(password) - if err != nil { - return nil, err - } - ai := sspi.SEC_WINNT_AUTH_IDENTITY{ - User: &u[0], - UserLength: uint32(len(u) - 1), // do not count terminating 0 - Domain: &d[0], - DomainLength: uint32(len(d) - 1), // do not count terminating 0 - Password: &p[0], - PasswordLength: uint32(len(p) - 1), // do not count terminating 0 - Flags: sspi.SEC_WINNT_AUTH_IDENTITY_UNICODE, - } - return acquireCredentials("", sspi.SECPKG_CRED_OUTBOUND, &ai) -} - -// AcquireServerCredentials acquires server credentials that will -// be used to authenticate clients. -// The principalName parameter is passed to the underlying call to -// the winapi AcquireCredentialsHandle function (and specifies the -// name of the principal whose credentials the underlying handle -// will reference). -// As a special case, using an empty string for the principal name -// will require the credential of the user under whose security context -// the current process is running. -func AcquireServerCredentials(principalName string) (*sspi.Credentials, error) { - return acquireCredentials(principalName, sspi.SECPKG_CRED_INBOUND, nil) -} - -func updateContext(c *sspi.Context, dst, src []byte, targetName *uint16) (authCompleted bool, n int, err error) { - var inBuf, outBuf [1]sspi.SecBuffer - inBuf[0].Set(sspi.SECBUFFER_TOKEN, src) - inBufs := &sspi.SecBufferDesc{ - Version: sspi.SECBUFFER_VERSION, - BuffersCount: 1, - Buffers: &inBuf[0], - } - outBuf[0].Set(sspi.SECBUFFER_TOKEN, dst) - outBufs := &sspi.SecBufferDesc{ - Version: sspi.SECBUFFER_VERSION, - BuffersCount: 1, - Buffers: &outBuf[0], - } - ret := c.Update(targetName, outBufs, inBufs) - switch ret { - case sspi.SEC_E_OK: - // session established -> return success - return true, int(outBuf[0].BufferSize), nil - case sspi.SEC_I_COMPLETE_NEEDED, sspi.SEC_I_COMPLETE_AND_CONTINUE: - ret = sspi.CompleteAuthToken(c.Handle, outBufs) - if ret != sspi.SEC_E_OK { - return false, 0, ret - } - case sspi.SEC_I_CONTINUE_NEEDED: - default: - return false, 0, ret - } - return false, int(outBuf[0].BufferSize), nil -} - -func makeSignature(c *sspi.Context, msg []byte, qop, seqno uint32) ([]byte, error) { - _, maxSignature, _, _, err := c.Sizes() - if err != nil { - return nil, err - } - - if maxSignature == 0 { - return nil, errors.New("integrity services are not requested or unavailable") - } - - var b [2]sspi.SecBuffer - b[0].Set(sspi.SECBUFFER_DATA, msg) - b[1].Set(sspi.SECBUFFER_TOKEN, make([]byte, maxSignature)) - - ret := sspi.MakeSignature(c.Handle, qop, sspi.NewSecBufferDesc(b[:]), seqno) - if ret != sspi.SEC_E_OK { - return nil, ret - } - - return b[1].Bytes(), nil -} - -func encryptMessage(c *sspi.Context, msg []byte, qop, seqno uint32) ([]byte, error) { - _ /*maxToken*/, maxSignature, cBlockSize, cSecurityTrailer, err := c.Sizes() - if err != nil { - return nil, err - } - - if maxSignature == 0 { - return nil, errors.New("integrity services are not requested or unavailable") - } - - var b [3]sspi.SecBuffer - b[0].Set(sspi.SECBUFFER_TOKEN, make([]byte, cSecurityTrailer)) - b[1].Set(sspi.SECBUFFER_DATA, msg) - b[2].Set(sspi.SECBUFFER_PADDING, make([]byte, cBlockSize)) - - ret := sspi.EncryptMessage(c.Handle, qop, sspi.NewSecBufferDesc(b[:]), seqno) - if ret != sspi.SEC_E_OK { - return nil, ret - } - - r0, r1, r2 := b[0].Bytes(), b[1].Bytes(), b[2].Bytes() - res := make([]byte, 0, len(r0)+len(r1)+len(r2)) - res = append(res, r0...) - res = append(res, r1...) - res = append(res, r2...) - - return res, nil -} - -func decryptMessage(c *sspi.Context, msg []byte, seqno uint32) (uint32, []byte, error) { - var b [2]sspi.SecBuffer - b[0].Set(sspi.SECBUFFER_STREAM, msg) - b[1].Set(sspi.SECBUFFER_DATA, []byte{}) - - var qop uint32 - ret := sspi.DecryptMessage(c.Handle, sspi.NewSecBufferDesc(b[:]), seqno, &qop) - if ret != sspi.SEC_E_OK { - return qop, nil, ret - } - - return qop, b[1].Bytes(), nil -} - -func verifySignature(c *sspi.Context, msg, token []byte, seqno uint32) (uint32, error) { - var b [2]sspi.SecBuffer - b[0].Set(sspi.SECBUFFER_DATA, msg) - b[1].Set(sspi.SECBUFFER_TOKEN, token) - - var qop uint32 - - ret := sspi.VerifySignature(c.Handle, sspi.NewSecBufferDesc(b[:]), seqno, &qop) - if ret != sspi.SEC_E_OK { - return 0, ret - } - - return qop, nil -} - -// ClientContext is used by the client to manage all steps of Negotiate negotiation. -type ClientContext struct { - sctxt *sspi.Context - targetName *uint16 -} - -// NewClientContext creates a new client context. It uses client -// credentials cred generated by AcquireCurrentUserCredentials or -// AcquireUserCredentials and SPN to start a client Negotiate -// negotiation sequence. targetName is the service principal name -// (SPN) or the security context of the destination server. -// NewClientContext returns a new token to be sent to the server. -func NewClientContext(cred *sspi.Credentials, targetName string) (cc *ClientContext, outputToken []byte, err error) { - return NewClientContextWithFlags(cred, targetName, sspi.ISC_REQ_CONNECTION) -} - -// NewClientContextWithFlags creates a new client context. It uses client -// credentials cred generated by AcquireCurrentUserCredentials or -// AcquireUserCredentials and SPN to start a client Negotiate -// negotiation sequence. targetName is the service principal name -// (SPN) or the security context of the destination server. -// The flags parameter is used to indicate requests for the context -// (for example sspi.ISC_REQ_CONFIDENTIALITY|sspi.ISC_REQ_REPLAY_DETECT) -// NewClientContextWithFlags returns a new token to be sent to the server. -func NewClientContextWithFlags(cred *sspi.Credentials, targetName string, flags uint32) (cc *ClientContext, outputToken []byte, err error) { - var tname *uint16 - if len(targetName) > 0 { - p, err2 := syscall.UTF16FromString(targetName) - if err2 != nil { - return nil, nil, err2 - } - if len(p) > 0 { - tname = &p[0] - } - } - otoken := make([]byte, PackageInfo.MaxToken) - c := sspi.NewClientContext(cred, flags) - - authCompleted, n, err2 := updateContext(c, otoken, nil, tname) - if err2 != nil { - return nil, nil, err2 - } - if authCompleted { - c.Release() - return nil, nil, errors.New("negotiate authentication should not be completed yet") - } - if n == 0 { - c.Release() - return nil, nil, errors.New("negotiate token should not be empty") - } - otoken = otoken[:n] - return &ClientContext{sctxt: c, targetName: tname}, otoken, nil -} - -// Release free up resources associated with client context c. -func (c *ClientContext) Release() error { - if c == nil { - return nil - } - return c.sctxt.Release() -} - -// Expiry returns c expiry time. -func (c *ClientContext) Expiry() time.Time { - return c.sctxt.Expiry() -} - -// Update advances client part of Negotiate negotiation c. It uses -// token received from the server and returns true if client part -// of authentication is complete. It also returns new token to be -// sent to the server. -func (c *ClientContext) Update(token []byte) (authCompleted bool, outputToken []byte, err error) { - otoken := make([]byte, PackageInfo.MaxToken) - authDone, n, err2 := updateContext(c.sctxt, otoken, token, c.targetName) - if err2 != nil { - return false, nil, err2 - } - if n == 0 && !authDone { - return false, nil, errors.New("negotiate token should not be empty") - } - otoken = otoken[:n] - return authDone, otoken, nil -} - -// Sizes queries the client context for the sizes used in per-message -// functions. It returns the maximum token size used in authentication -// exchanges, the maximum signature size, the preferred integral size of -// messages, the size of any security trailer, and any error. -func (c *ClientContext) Sizes() (uint32, uint32, uint32, uint32, error) { - return c.sctxt.Sizes() -} - -// MakeSignature uses the established client context to create a signature -// for the given message using the provided quality of protection flags and -// sequence number. It returns the signature token in addition to any error. -func (c *ClientContext) MakeSignature(msg []byte, qop, seqno uint32) ([]byte, error) { - return makeSignature(c.sctxt, msg, qop, seqno) -} - -// VerifySignature uses the established client context and signature token -// to check that the provided message hasn't been tampered or received out -// of sequence. It returns any quality of protection flags and any error -// that occurred. -func (c *ClientContext) VerifySignature(msg, token []byte, seqno uint32) (uint32, error) { - return verifySignature(c.sctxt, msg, token, seqno) -} - -// EncryptMessage uses the established client context to encrypt a message -// using the provided quality of protection flags and sequence number. -// It returns the signature token in addition to any error. -// IMPORTANT: the input msg parameter is updated in place by the low-level windows api -// so must be copied if the initial content should not be modified. -func (c *ClientContext) EncryptMessage(msg []byte, qop, seqno uint32) ([]byte, error) { - return encryptMessage(c.sctxt, msg, qop, seqno) -} - -// DecryptMessage uses the established client context to decrypt a message -// using the provided sequence number. -// It returns the quality of protection flag and the decrypted message in addition to any error. -func (c *ClientContext) DecryptMessage(msg []byte, seqno uint32) (uint32, []byte, error) { - return decryptMessage(c.sctxt, msg, seqno) -} - -// VerifyFlags determines if all flags used to construct the client context -// were honored (see NewClientContextWithFlags). It should be called after c.Update. -func (c *ClientContext) VerifyFlags() error { - return c.sctxt.VerifyFlags() -} - -// VerifySelectiveFlags determines if the given flags were honored (see NewClientContextWithFlags). -// It should be called after c.Update. -func (c *ClientContext) VerifySelectiveFlags(flags uint32) error { - return c.sctxt.VerifySelectiveFlags(flags) -} - -// ServerContext is used by the server to manage all steps of Negotiate -// negotiation. Once authentication is completed the context can be -// used to impersonate client. -type ServerContext struct { - sctxt *sspi.Context -} - -// NewServerContext creates new server context. It uses server -// credentials created by AcquireServerCredentials and token from -// the client to start server Negotiate negotiation sequence. -// It also returns new token to be sent to the client. -func NewServerContext(cred *sspi.Credentials, token []byte) (sc *ServerContext, authDone bool, outputToken []byte, err error) { - otoken := make([]byte, PackageInfo.MaxToken) - c := sspi.NewServerContext(cred, sspi.ASC_REQ_CONNECTION) - authDone, n, err2 := updateContext(c, otoken, token, nil) - if err2 != nil { - return nil, false, nil, err2 - } - otoken = otoken[:n] - return &ServerContext{sctxt: c}, authDone, otoken, nil -} - -// Release free up resources associated with server context c. -func (c *ServerContext) Release() error { - if c == nil { - return nil - } - return c.sctxt.Release() -} - -// Expiry returns c expiry time. -func (c *ServerContext) Expiry() time.Time { - return c.sctxt.Expiry() -} - -// Update advances server part of Negotiate negotiation c. It uses -// token received from the client and returns true if server part -// of authentication is complete. It also returns new token to be -// sent to the client. -func (c *ServerContext) Update(token []byte) (authCompleted bool, outputToken []byte, err error) { - otoken := make([]byte, PackageInfo.MaxToken) - authDone, n, err2 := updateContext(c.sctxt, otoken, token, nil) - if err2 != nil { - return false, nil, err2 - } - if n == 0 && !authDone { - return false, nil, errors.New("negotiate token should not be empty") - } - otoken = otoken[:n] - return authDone, otoken, nil -} - -const _SECPKG_ATTR_NATIVE_NAMES = 13 - -type _SecPkgContext_NativeNames struct { - ClientName *uint16 - ServerName *uint16 -} - -// GetUsername returns the username corresponding to the authenticated client -func (c *ServerContext) GetUsername() (string, error) { - var ns _SecPkgContext_NativeNames - ret := sspi.QueryContextAttributes(c.sctxt.Handle, _SECPKG_ATTR_NATIVE_NAMES, (*byte)(unsafe.Pointer(&ns))) - if ret != sspi.SEC_E_OK { - return "", ret - } - sspi.FreeContextBuffer((*byte)(unsafe.Pointer(ns.ServerName))) - defer sspi.FreeContextBuffer((*byte)(unsafe.Pointer(ns.ClientName))) - return syscall.UTF16ToString((*[2 << 20]uint16)(unsafe.Pointer(ns.ClientName))[:]), nil -} - -// ImpersonateUser changes current OS thread user. New user is -// the user as specified by client credentials. -func (c *ServerContext) ImpersonateUser() error { - return c.sctxt.ImpersonateUser() -} - -// RevertToSelf stops impersonation. It changes current OS thread -// user to what it was before ImpersonateUser was executed. -func (c *ServerContext) RevertToSelf() error { - return c.sctxt.RevertToSelf() -} - -// Sizes queries the server context for the sizes used in per-message -// functions. It returns the maximum token size used in authentication -// exchanges, the maximum signature size, the preferred integral size of -// messages, the size of any security trailer, and any error. -func (c *ServerContext) Sizes() (uint32, uint32, uint32, uint32, error) { - return c.sctxt.Sizes() -} - -// MakeSignature uses the established server context to create a signature -// for the given message using the provided quality of protection flags and -// sequence number. It returns the signature token in addition to any error. -func (c *ServerContext) MakeSignature(msg []byte, qop, seqno uint32) ([]byte, error) { - return makeSignature(c.sctxt, msg, qop, seqno) -} - -// VerifySignature uses the established server context and signature token -// to check that the provided message hasn't been tampered or received out -// of sequence. It returns any quality of protection flags and any error -// that occurred. -func (c *ServerContext) VerifySignature(msg, token []byte, seqno uint32) (uint32, error) { - return verifySignature(c.sctxt, msg, token, seqno) -} - -// EncryptMessage uses the established server context to encrypt a message -// using the provided quality of protection flags and sequence number. -// It returns the signature token in addition to any error. -// IMPORTANT: the input msg parameter is updated in place by the low-level windows api -// so must be copied if the initial content should not be modified. -func (c *ServerContext) EncryptMessage(msg []byte, qop, seqno uint32) ([]byte, error) { - return encryptMessage(c.sctxt, msg, qop, seqno) -} - -// DecryptMessage uses the established server context to decrypt a message -// using the provided sequence number. -// It returns the quality of protection flag and the decrypted message in addition to any error. -func (c *ServerContext) DecryptMessage(msg []byte, seqno uint32) (uint32, []byte, error) { - return decryptMessage(c.sctxt, msg, seqno) -} diff --git a/vendor/github.com/alexbrainman/sspi/negotiate/negotiate_test.go b/vendor/github.com/alexbrainman/sspi/negotiate/negotiate_test.go deleted file mode 100644 index 3c3c5ebcda8d..000000000000 --- a/vendor/github.com/alexbrainman/sspi/negotiate/negotiate_test.go +++ /dev/null @@ -1,418 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package negotiate_test - -import ( - "bytes" - "crypto/rand" - "flag" - "os" - "os/user" - "runtime" - "strings" - "syscall" - "testing" - "time" - - "github.com/alexbrainman/sspi" - "github.com/alexbrainman/sspi/negotiate" -) - -var ( - testDomain = flag.String("domain", "", "domain parameter for TestAcquireUserCredentials") - testUsername = flag.String("username", "", "username parameter for TestAcquireUserCredentials") - testPassword = flag.String("password", "", "password parameter for TestAcquireUserCredentials") -) - -func TestPackageInfo(t *testing.T) { - if negotiate.PackageInfo.Name != "Negotiate" { - t.Fatalf(`invalid Negotiate package name of %q, "Negotiate" is expected.`, negotiate.PackageInfo.Name) - } -} - -func testContextExpiry(t *testing.T, name string, c interface { - Expiry() time.Time -}) { - validFor := c.Expiry().Sub(time.Now()) - if validFor < time.Hour { - t.Errorf("%v expires in %v, more than 1 hour expected", name, validFor) - } - if validFor > 10*24*time.Hour { - t.Errorf("%v expires in %v, less than 10 days expected", name, validFor) - } -} - -func testNegotiate(t *testing.T, clientCred *sspi.Credentials, SPN string) { - if len(SPN) == 0 { - t.Log("testing with blank SPN") - } else { - t.Logf("testing with SPN=%s", SPN) - } - - serverCred, err := negotiate.AcquireServerCredentials("") - if err != nil { - t.Fatal(err) - } - defer serverCred.Release() - - client, toServerToken, err := negotiate.NewClientContext(clientCred, SPN) - if err != nil { - t.Fatal(err) - } - defer client.Release() - - if len(toServerToken) == 0 { - t.Fatal("token for server cannot be empty") - } - t.Logf("sent %d bytes to server", len(toServerToken)) - - testContextExpiry(t, "client security context", client) - - server, serverDone, toClientToken, err := negotiate.NewServerContext(serverCred, toServerToken) - if err != nil { - t.Fatal(err) - } - defer server.Release() - - testContextExpiry(t, "server security context", server) - - var clientDone bool - for { - if len(toClientToken) == 0 { - break - } - t.Logf("sent %d bytes to client", len(toClientToken)) - clientDone, toServerToken, err = client.Update(toClientToken) - if err != nil { - t.Fatal(err) - } - if len(toServerToken) == 0 { - break - } - t.Logf("sent %d bytes to server", len(toServerToken)) - serverDone, toClientToken, err = server.Update(toServerToken) - if err != nil { - t.Fatal(err) - } - } - if !clientDone { - t.Fatal("client authentication should be completed now") - } - if !serverDone { - t.Fatal("server authentication should be completed now") - } - - runtime.LockOSThread() - defer runtime.UnlockOSThread() - - err = server.ImpersonateUser() - if err != nil { - t.Fatal(err) - } - defer server.RevertToSelf() - - _, err = user.Current() - if err != nil { - t.Fatal(err) - } -} - -func TestNegotiate(t *testing.T) { - cred, err := negotiate.AcquireCurrentUserCredentials() - if err != nil { - t.Fatal(err) - } - defer cred.Release() - - testNegotiate(t, cred, "") - - hostname, err := os.Hostname() - if err != nil { - t.Fatal(err) - } - testNegotiate(t, cred, "HOST/"+strings.ToUpper(hostname)) - - testNegotiate(t, cred, "HOST/127.0.0.1") -} - -func TestNegotiateFailure(t *testing.T) { - clientCred, err := negotiate.AcquireCurrentUserCredentials() - if err != nil { - t.Fatal(err) - } - defer clientCred.Release() - - serverCred, err := negotiate.AcquireServerCredentials("") - if err != nil { - t.Fatal(err) - } - defer serverCred.Release() - - client, toServerToken, err := negotiate.NewClientContext(clientCred, "HOST/UNKNOWN_HOST_NAME") - if err != nil { - t.Fatal(err) - } - defer client.Release() - - if len(toServerToken) == 0 { - t.Fatal("token for server cannot be empty") - } - t.Logf("sent %d bytes to server", len(toServerToken)) - - server, serverDone, toClientToken, err := negotiate.NewServerContext(serverCred, toServerToken) - if err != nil { - t.Fatal(err) - } - defer server.Release() - - for { - var clientDone bool - if len(toClientToken) == 0 { - t.Fatal("token for client cannot be empty") - } - t.Logf("sent %d bytes to client", len(toClientToken)) - clientDone, toServerToken, err = client.Update(toClientToken) - if err != nil { - t.Fatal(err) - } - t.Logf("clientDone=%v serverDone=%v", clientDone, serverDone) - if clientDone { - // t.Fatal("client authentication cannot be completed") - } - if len(toServerToken) == 0 { - t.Fatal("token for server cannot be empty") - } - t.Logf("sent %d bytes to server", len(toServerToken)) - serverDone, toClientToken, err = server.Update(toServerToken) - if err != nil { - if err == sspi.SEC_E_LOGON_DENIED { - return - } - t.Fatalf("unexpected failure 0x%x: %v", uintptr(err.(syscall.Errno)), err) - } - t.Logf("clientDone=%v serverDone=%v", clientDone, serverDone) - if serverDone { - t.Fatal("server authentication cannot be completed") - } - } -} - -func TestAcquireUserCredentials(t *testing.T) { - if len(*testDomain) == 0 { - t.Skip("Skipping due to empty \"domain\" parameter") - } - if len(*testUsername) == 0 { - t.Skip("Skipping due to empty \"username\" parameter") - } - if len(*testPassword) == 0 { - t.Skip("Skipping due to empty \"password\" parameter") - } - cred, err := negotiate.AcquireUserCredentials(*testDomain, *testUsername, *testPassword) - if err != nil { - t.Fatal(err) - } - defer cred.Release() - - testNegotiate(t, cred, "") -} - -func TestSignatureEncryption(t *testing.T) { - clientCred, err := negotiate.AcquireCurrentUserCredentials() - if err != nil { - t.Fatal(err) - } - defer clientCred.Release() - - serverCred, err := negotiate.AcquireServerCredentials("") - if err != nil { - t.Fatal(err) - } - defer serverCred.Release() - - client, toServerToken, err := negotiate.NewClientContext(clientCred, "") - if err != nil { - t.Fatal(err) - } - defer client.Release() - - if len(toServerToken) == 0 { - t.Fatal("token for server cannot be empty") - } - - server, serverDone, toClientToken, err := negotiate.NewServerContext(serverCred, toServerToken) - if err != nil { - t.Fatal(err) - } - defer server.Release() - - var clientDone bool - for { - if len(toClientToken) == 0 { - break - } - clientDone, toServerToken, err = client.Update(toClientToken) - if err != nil { - t.Fatal(err) - } - if len(toServerToken) == 0 { - break - } - serverDone, toClientToken, err = server.Update(toServerToken) - if err != nil { - t.Fatal(err) - } - } - if !clientDone { - t.Fatal("client authentication should be completed now") - } - if !serverDone { - t.Fatal("server authentication should be completed now") - } - - clientMsg := make([]byte, 10) - _, err = rand.Read(clientMsg) - if err != nil { - t.Fatal(err) - } - t.Logf("clientMsg=%v", clientMsg) - - clientSig, err := client.MakeSignature(clientMsg, 0, 0) - if err != nil { - t.Fatal(err) - } - t.Logf("clientSig=%v", clientSig) - - _, err = server.VerifySignature(clientMsg, clientSig, 0) - if err != nil { - t.Fatal(err) - } - t.Logf("server verified client signature") - - var clientQop uint32 - clientCrypt, err := client.EncryptMessage(copyArray(clientMsg), clientQop, 0) - if err != nil { - t.Fatal(err) - } - t.Logf("clientMsg=%v,clientCrypt=%v", clientMsg, clientCrypt) - - qop, m2, err := server.DecryptMessage(clientCrypt, 0) - if err != nil { - t.Fatal(err) - } - if qop != clientQop { - t.Fatalf("Wrong value %d for qop", qop) - } - if !bytes.Equal(clientMsg, m2) { - t.Fatalf("Wrong value %v for message decrypted by server (expected %v)", m2, clientMsg) - } - t.Logf("server decrypted client message") - - serverMsg := make([]byte, 10) - _, err = rand.Read(serverMsg) - if err != nil { - t.Fatal(err) - } - t.Logf("serverMsg=%v", serverMsg) - - serverSig, err := server.MakeSignature(serverMsg, 0, 0) - if err != nil { - t.Fatal(err) - } - t.Logf("serverSig=%v", serverSig) - - _, err = client.VerifySignature(serverMsg, serverSig, 0) - if err != nil { - t.Fatal(err) - } - t.Logf("client verified server signature") -} - -func TestFlagVerification(t *testing.T) { - clientCred, err := negotiate.AcquireCurrentUserCredentials() - if err != nil { - t.Fatal(err) - } - defer clientCred.Release() - - serverCred, err := negotiate.AcquireServerCredentials("") - if err != nil { - t.Fatal(err) - } - defer serverCred.Release() - - const desiredFlags = sspi.ISC_REQ_CONFIDENTIALITY | - sspi.ISC_REQ_INTEGRITY | - sspi.ISC_REQ_MUTUAL_AUTH | - sspi.ISC_REQ_REPLAY_DETECT | - sspi.ISC_REQ_SEQUENCE_DETECT - - client, toServerToken, err := negotiate.NewClientContextWithFlags(clientCred, "", desiredFlags) - if err != nil { - t.Fatal(err) - } - defer client.Release() - - if len(toServerToken) == 0 { - t.Fatal("token for server cannot be empty") - } - - server, serverDone, toClientToken, err := negotiate.NewServerContext(serverCred, toServerToken) - if err != nil { - t.Fatal(err) - } - defer server.Release() - - if len(toClientToken) == 0 { - t.Fatal("token for client cannot be empty") - } - - errMsg := "sspi: invalid flags check: desired=100000000 requested=10000000000011110 missing=100000000 extra=10000000000011110" - - var clientDone bool - for { - if len(toClientToken) == 0 { - break - } - clientDone, toServerToken, err = client.Update(toClientToken) - if err != nil { - t.Fatal(err) - } - - // verify all flags - if err := client.VerifyFlags(); err != nil { - t.Fatal(err) - } - // verify a subset of flags - if err := client.VerifySelectiveFlags(sspi.ISC_REQ_MUTUAL_AUTH); err != nil { - t.Fatal(err) - } - // try to verify a flag that was not initially requested - if err := client.VerifySelectiveFlags(sspi.ISC_REQ_ALLOCATE_MEMORY); err == nil || err.Error() != errMsg { - t.Fatalf("wrong error found: %v", err) - } - - if len(toServerToken) == 0 { - break - } - serverDone, toClientToken, err = server.Update(toServerToken) - if err != nil { - t.Fatal(err) - } - } - if !clientDone { - t.Fatal("client authentication should be completed now") - } - if !serverDone { - t.Fatal("server authentication should be completed now") - } -} - -func copyArray(a []byte) []byte { - b := make([]byte, len(a)) - copy(b, a) - return b -} diff --git a/vendor/github.com/alexbrainman/sspi/ntlm/http_test.go b/vendor/github.com/alexbrainman/sspi/ntlm/http_test.go deleted file mode 100644 index 72d47a5327fb..000000000000 --- a/vendor/github.com/alexbrainman/sspi/ntlm/http_test.go +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package ntlm_test - -import ( - "encoding/base64" - "flag" - "fmt" - "io/ioutil" - "net/http" - "net/http/httptest" - "strings" - "testing" - - "github.com/alexbrainman/sspi/ntlm" -) - -var ( - testURL = flag.String("url", "", "server URL for TestNTLMHTTPClient") -) - -func newRequest() (*http.Request, error) { - req, err := http.NewRequest("GET", *testURL, nil) - if err != nil { - return nil, err - } - return req, nil -} - -func get(req *http.Request) (*http.Response, string, error) { - res, err := http.DefaultClient.Do(req) - if err != nil { - return nil, "", err - } - defer res.Body.Close() - - body, err := ioutil.ReadAll(res.Body) - if err != nil { - return nil, "", err - } - return res, string(body), nil -} - -func canDoNTLM() error { - req, err := newRequest() - if err != nil { - return err - } - res, _, err := get(req) - if err != nil { - return err - } - if res.StatusCode != http.StatusUnauthorized { - return fmt.Errorf("Unauthorized expected, but got %v", res.StatusCode) - } - authHeaders, found := res.Header["Www-Authenticate"] - if !found { - return fmt.Errorf("Www-Authenticate not found") - } - for _, h := range authHeaders { - if h == "NTLM" { - return nil - } - } - return fmt.Errorf("Www-Authenticate header does not contain NTLM, but has %v", authHeaders) -} - -func doNTLMNegotiate(negotiate []byte) ([]byte, error) { - req, err := newRequest() - if err != nil { - return nil, err - } - req.Header.Set("Authorization", "NTLM "+base64.StdEncoding.EncodeToString(negotiate)) - res, _, err := get(req) - if err != nil { - return nil, err - } - if res.StatusCode != http.StatusUnauthorized { - return nil, fmt.Errorf("Unauthorized expected, but got %v", res.StatusCode) - } - authHeaders, found := res.Header["Www-Authenticate"] - if !found { - return nil, fmt.Errorf("Www-Authenticate not found") - } - if len(authHeaders) != 1 { - return nil, fmt.Errorf("Only one Www-Authenticate header expected, but %d found: %v", len(authHeaders), authHeaders) - } - if len(authHeaders[0]) < 6 { - return nil, fmt.Errorf("Www-Authenticate header is to short: %q", authHeaders[0]) - } - if !strings.HasPrefix(authHeaders[0], "NTLM ") { - return nil, fmt.Errorf("Www-Authenticate header is suppose to starts with \"NTLM \", but is %q", authHeaders[0]) - } - authenticate, err := base64.StdEncoding.DecodeString(authHeaders[0][5:]) - if err != nil { - return nil, err - } - return authenticate, nil -} - -func doNTLMAuthenticate(authenticate []byte) (string, error) { - req, err := newRequest() - if err != nil { - return "", err - } - req.Header.Set("Authorization", "NTLM "+base64.StdEncoding.EncodeToString(authenticate)) - res, body, err := get(req) - if err != nil { - return "", err - } - if res.StatusCode != http.StatusOK { - return "", fmt.Errorf("OK expected, but got %v", res.StatusCode) - } - return body, nil -} - -func TestNTLMHTTPClient(t *testing.T) { - // TODO: combine client and server tests so we don't need external server - if len(*testURL) == 0 { - t.Skip("Skipping due to empty \"url\" parameter") - } - - cred, err := ntlm.AcquireCurrentUserCredentials() - if err != nil { - t.Fatal(err) - } - defer cred.Release() - - secctx, negotiate, err := ntlm.NewClientContext(cred) - if err != nil { - t.Fatal(err) - } - defer secctx.Release() - - err = canDoNTLM() - if err != nil { - t.Fatal(err) - } - challenge, err := doNTLMNegotiate(negotiate) - if err != nil { - t.Fatal(err) - } - authenticate, err := secctx.Update(challenge) - if err != nil { - t.Fatal(err) - } - _, err = doNTLMAuthenticate(authenticate) - if err != nil { - t.Fatal(err) - } -} - -// TODO: See http://www.innovation.ch/personal/ronald/ntlm.html#connections about needed to keep connection alive during authentication. - -func TestNTLMHTTPServer(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // TODO: implement NTLM authentication here - w.Write([]byte("hello")) - })) - defer ts.Close() - - res, err := http.Get(ts.URL) - if err != nil { - t.Fatal(err) - } - got, err := ioutil.ReadAll(res.Body) - if err != nil { - t.Fatal(err) - } - if string(got) != "hello" { - t.Errorf("got %q, want hello", string(got)) - } -} diff --git a/vendor/github.com/alexbrainman/sspi/ntlm/ntlm.go b/vendor/github.com/alexbrainman/sspi/ntlm/ntlm.go deleted file mode 100644 index c0cf7e920474..000000000000 --- a/vendor/github.com/alexbrainman/sspi/ntlm/ntlm.go +++ /dev/null @@ -1,263 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -// Package ntlm provides access to the Microsoft NTLM SSP Package. -// -package ntlm - -import ( - "errors" - "syscall" - "time" - "unsafe" - - "github.com/alexbrainman/sspi" -) - -// PackageInfo contains NTLM SSP package description. -var PackageInfo *sspi.PackageInfo - -func init() { - var err error - PackageInfo, err = sspi.QueryPackageInfo(sspi.NTLMSP_NAME) - if err != nil { - panic("failed to fetch NTLM package info: " + err.Error()) - } -} - -func acquireCredentials(creduse uint32, ai *sspi.SEC_WINNT_AUTH_IDENTITY) (*sspi.Credentials, error) { - c, err := sspi.AcquireCredentials("", sspi.NTLMSP_NAME, creduse, (*byte)(unsafe.Pointer(ai))) - if err != nil { - return nil, err - } - return c, nil -} - -// AcquireCurrentUserCredentials acquires credentials of currently -// logged on user. These will be used by the client to authenticate -// itself to the server. It will also be used by the server -// to impersonate the user. -func AcquireCurrentUserCredentials() (*sspi.Credentials, error) { - return acquireCredentials(sspi.SECPKG_CRED_OUTBOUND, nil) -} - -// AcquireUserCredentials acquires credentials of user described by -// domain, username and password. These will be used by the client to -// authenticate itself to the server. It will also be used by the -// server to impersonate the user. -func AcquireUserCredentials(domain, username, password string) (*sspi.Credentials, error) { - if len(username) == 0 { - return nil, errors.New("username parameter cannot be empty") - } - d, err := syscall.UTF16FromString(domain) - if err != nil { - return nil, err - } - u, err := syscall.UTF16FromString(username) - if err != nil { - return nil, err - } - p, err := syscall.UTF16FromString(password) - if err != nil { - return nil, err - } - ai := sspi.SEC_WINNT_AUTH_IDENTITY{ - User: &u[0], - UserLength: uint32(len(u) - 1), // do not count terminating 0 - Domain: &d[0], - DomainLength: uint32(len(d) - 1), // do not count terminating 0 - Password: &p[0], - PasswordLength: uint32(len(p) - 1), // do not count terminating 0 - Flags: sspi.SEC_WINNT_AUTH_IDENTITY_UNICODE, - } - return acquireCredentials(sspi.SECPKG_CRED_OUTBOUND, &ai) -} - -// AcquireServerCredentials acquires server credentials that will -// be used to authenticate client. -func AcquireServerCredentials() (*sspi.Credentials, error) { - return acquireCredentials(sspi.SECPKG_CRED_INBOUND, nil) -} - -func updateContext(c *sspi.Context, dst, src []byte) (authCompleted bool, n int, err error) { - var inBuf, outBuf [1]sspi.SecBuffer - inBuf[0].Set(sspi.SECBUFFER_TOKEN, src) - inBufs := &sspi.SecBufferDesc{ - Version: sspi.SECBUFFER_VERSION, - BuffersCount: 1, - Buffers: &inBuf[0], - } - outBuf[0].Set(sspi.SECBUFFER_TOKEN, dst) - outBufs := &sspi.SecBufferDesc{ - Version: sspi.SECBUFFER_VERSION, - BuffersCount: 1, - Buffers: &outBuf[0], - } - ret := c.Update(nil, outBufs, inBufs) - switch ret { - case sspi.SEC_E_OK: - // session established -> return success - return true, int(outBuf[0].BufferSize), nil - case sspi.SEC_I_COMPLETE_NEEDED, sspi.SEC_I_COMPLETE_AND_CONTINUE: - ret = sspi.CompleteAuthToken(c.Handle, outBufs) - if ret != sspi.SEC_E_OK { - return false, 0, ret - } - case sspi.SEC_I_CONTINUE_NEEDED: - default: - return false, 0, ret - } - return false, int(outBuf[0].BufferSize), nil -} - -// ClientContext is used by the client to manage all steps of NTLM negotiation. -type ClientContext struct { - sctxt *sspi.Context -} - -// NewClientContext creates new client context. It uses client -// credentials cred generated by AcquireCurrentUserCredentials or -// AcquireUserCredentials and, if successful, outputs negotiate -// message. Negotiate message needs to be sent to the server to -// start NTLM negotiation sequence. -func NewClientContext(cred *sspi.Credentials) (*ClientContext, []byte, error) { - negotiate := make([]byte, PackageInfo.MaxToken) - c := sspi.NewClientContext(cred, sspi.ISC_REQ_CONNECTION) - authCompleted, n, err := updateContext(c, negotiate, nil) - if err != nil { - return nil, nil, err - } - if authCompleted { - c.Release() - return nil, nil, errors.New("ntlm authentication should not be completed yet") - } - if n == 0 { - c.Release() - return nil, nil, errors.New("ntlm token should not be empty") - } - negotiate = negotiate[:n] - return &ClientContext{sctxt: c}, negotiate, nil -} - -// Release free up resources associated with client context c. -func (c *ClientContext) Release() error { - if c == nil { - return nil - } - return c.sctxt.Release() -} - -// Expiry returns c expiry time. -func (c *ClientContext) Expiry() time.Time { - return c.sctxt.Expiry() -} - -// Update completes client part of NTLM negotiation c. It uses -// challenge message received from the server, and generates -// authenticate message to be returned to the server. -func (c *ClientContext) Update(challenge []byte) ([]byte, error) { - authenticate := make([]byte, PackageInfo.MaxToken) - authCompleted, n, err := updateContext(c.sctxt, authenticate, challenge) - if err != nil { - return nil, err - } - if !authCompleted { - return nil, errors.New("ntlm authentication should be completed now") - } - if n == 0 { - return nil, errors.New("ntlm token should not be empty") - } - authenticate = authenticate[:n] - return authenticate, nil -} - -// Sizes queries the client context for the sizes used in per-message -// functions. It returns the maximum token size used in authentication -// exchanges, the maximum signature size, the preferred integral size of -// messages, the size of any security trailer, and any error. -func (c *ClientContext) Sizes() (uint32, uint32, uint32, uint32, error) { - return c.sctxt.Sizes() -} - -// ServerContext is used by the server to manage all steps of NTLM -// negotiation. Once authentication is completed the context can be -// used to impersonate client. -type ServerContext struct { - sctxt *sspi.Context -} - -// NewServerContext creates new server context. It uses server -// credentials created by AcquireServerCredentials and client -// negotiate message and, if successful, outputs challenge message. -// Challenge message needs to be sent to the client to continue -// NTLM negotiation sequence. -func NewServerContext(cred *sspi.Credentials, negotiate []byte) (*ServerContext, []byte, error) { - challenge := make([]byte, PackageInfo.MaxToken) - c := sspi.NewServerContext(cred, sspi.ASC_REQ_CONNECTION) - authCompleted, n, err := updateContext(c, challenge, negotiate) - if err != nil { - return nil, nil, err - } - if authCompleted { - c.Release() - return nil, nil, errors.New("ntlm authentication should not be completed yet") - } - if n == 0 { - c.Release() - return nil, nil, errors.New("ntlm token should not be empty") - } - challenge = challenge[:n] - return &ServerContext{sctxt: c}, challenge, nil -} - -// Release free up resources associated with server context c. -func (c *ServerContext) Release() error { - if c == nil { - return nil - } - return c.sctxt.Release() -} - -// Expiry returns c expiry time. -func (c *ServerContext) Expiry() time.Time { - return c.sctxt.Expiry() -} - -// Update completes server part of NTLM negotiation c. It uses -// authenticate message received from the client. -func (c *ServerContext) Update(authenticate []byte) error { - authCompleted, n, err := updateContext(c.sctxt, nil, authenticate) - if err != nil { - return err - } - if !authCompleted { - return errors.New("ntlm authentication should be completed now") - } - if n != 0 { - return errors.New("ntlm token should be empty now") - } - return nil -} - -// ImpersonateUser changes current OS thread user. New user is -// the user as specified by client credentials. -func (c *ServerContext) ImpersonateUser() error { - return c.sctxt.ImpersonateUser() -} - -// RevertToSelf stops impersonation. It changes current OS thread -// user to what it was before ImpersonateUser was executed. -func (c *ServerContext) RevertToSelf() error { - return c.sctxt.RevertToSelf() -} - -// Sizes queries the server context for the sizes used in per-message -// functions. It returns the maximum token size used in authentication -// exchanges, the maximum signature size, the preferred integral size of -// messages, the size of any security trailer, and any error. -func (c *ServerContext) Sizes() (uint32, uint32, uint32, uint32, error) { - return c.sctxt.Sizes() -} diff --git a/vendor/github.com/alexbrainman/sspi/ntlm/ntlm_test.go b/vendor/github.com/alexbrainman/sspi/ntlm/ntlm_test.go deleted file mode 100644 index 730d150957b1..000000000000 --- a/vendor/github.com/alexbrainman/sspi/ntlm/ntlm_test.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package ntlm_test - -import ( - "flag" - "os/user" - "runtime" - "testing" - "time" - - "github.com/alexbrainman/sspi" - "github.com/alexbrainman/sspi/ntlm" -) - -var ( - testDomain = flag.String("domain", "", "domain parameter for TestAcquireUserCredentials") - testUsername = flag.String("username", "", "username parameter for TestAcquireUserCredentials") - testPassword = flag.String("password", "", "password parameter for TestAcquireUserCredentials") -) - -func TestPackageInfo(t *testing.T) { - if ntlm.PackageInfo.Name != "NTLM" { - t.Fatalf(`invalid NTLM package name of %q, "NTLM" is expected.`, ntlm.PackageInfo.Name) - } -} - -func testContextExpiry(t *testing.T, name string, c interface { - Expiry() time.Time -}) { - validFor := c.Expiry().Sub(time.Now()) - if validFor < time.Hour { - t.Errorf("%v exipries in %v, more then 1 hour expected", name, validFor) - } - if validFor > 10*24*time.Hour { - t.Errorf("%v exipries in %v, less then 10 days expected", name, validFor) - } -} - -func testNTLM(t *testing.T, clientCred *sspi.Credentials) { - serverCred, err := ntlm.AcquireServerCredentials() - if err != nil { - t.Fatal(err) - } - defer serverCred.Release() - - client, token1, err := ntlm.NewClientContext(clientCred) - if err != nil { - t.Fatal(err) - } - defer client.Release() - - testContextExpiry(t, "clent security context", client) - - server, token2, err := ntlm.NewServerContext(serverCred, token1) - if err != nil { - t.Fatal(err) - } - defer server.Release() - - testContextExpiry(t, "server security context", server) - - token3, err := client.Update(token2) - if err != nil { - t.Fatal(err) - } - - err = server.Update(token3) - if err != nil { - t.Fatal(err) - } - - runtime.LockOSThread() - defer runtime.UnlockOSThread() - - err = server.ImpersonateUser() - if err != nil { - t.Fatal(err) - } - defer server.RevertToSelf() - - _, err = user.Current() - if err != nil { - t.Fatal(err) - } -} - -func TestNTLM(t *testing.T) { - cred, err := ntlm.AcquireCurrentUserCredentials() - if err != nil { - t.Fatal(err) - } - defer cred.Release() - - testNTLM(t, cred) -} - -func TestAcquireUserCredentials(t *testing.T) { - if len(*testDomain) == 0 { - t.Skip("Skipping due to empty \"domain\" parameter") - } - if len(*testUsername) == 0 { - t.Skip("Skipping due to empty \"username\" parameter") - } - if len(*testPassword) == 0 { - t.Skip("Skipping due to empty \"password\" parameter") - } - cred, err := ntlm.AcquireUserCredentials(*testDomain, *testUsername, *testPassword) - if err != nil { - t.Fatal(err) - } - defer cred.Release() - - testNTLM(t, cred) -} diff --git a/vendor/github.com/alexbrainman/sspi/schannel/attribute.go b/vendor/github.com/alexbrainman/sspi/schannel/attribute.go deleted file mode 100644 index 79d8ed2b8384..000000000000 --- a/vendor/github.com/alexbrainman/sspi/schannel/attribute.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package schannel - -import ( - "syscall" - "unsafe" - - "github.com/alexbrainman/sspi" -) - -// TODO: maybe move all these into a separate package or something - -func (c *Client) streamSizes() (*_SecPkgContext_StreamSizes, error) { - // TODO: do not retrive _SecPkgContext_StreamSizes every time (cache the data and invalidate it every time is possible can be changed: handshake, redo, ...) - // TODO: maybe return (header, trailer, maxmsg int, err error) instead - // TODO: maybe this needs to be exported - var ss _SecPkgContext_StreamSizes - ret := sspi.QueryContextAttributes(c.ctx.Handle, _SECPKG_ATTR_STREAM_SIZES, (*byte)(unsafe.Pointer(&ss))) - if ret != sspi.SEC_E_OK { - return nil, ret - } - return &ss, nil -} - -func (c *Client) ProtocolInfo() (name string, major, minor uint32, err error) { - var pi _SecPkgContext_ProtoInfo - ret := sspi.QueryContextAttributes(c.ctx.Handle, _SECPKG_ATTR_PROTO_INFO, (*byte)(unsafe.Pointer(&pi))) - if ret != sspi.SEC_E_OK { - return "", 0, 0, ret - } - defer sspi.FreeContextBuffer((*byte)(unsafe.Pointer(pi.ProtocolName))) - s := syscall.UTF16ToString((*[2 << 20]uint16)(unsafe.Pointer(pi.ProtocolName))[:]) - return s, pi.MajorVersion, pi.MinorVersion, nil -} - -func (c *Client) UserName() (string, error) { - var ns _SecPkgContext_Names - ret := sspi.QueryContextAttributes(c.ctx.Handle, _SECPKG_ATTR_NAMES, (*byte)(unsafe.Pointer(&ns))) - if ret != sspi.SEC_E_OK { - return "", ret - } - defer sspi.FreeContextBuffer((*byte)(unsafe.Pointer(ns.UserName))) - s := syscall.UTF16ToString((*[2 << 20]uint16)(unsafe.Pointer(ns.UserName))[:]) - return s, nil -} - -func (c *Client) AuthorityName() (string, error) { - var a _SecPkgContext_Authority - ret := sspi.QueryContextAttributes(c.ctx.Handle, _SECPKG_ATTR_AUTHORITY, (*byte)(unsafe.Pointer(&a))) - if ret != sspi.SEC_E_OK { - return "", ret - } - defer sspi.FreeContextBuffer((*byte)(unsafe.Pointer(a.AuthorityName))) - s := syscall.UTF16ToString((*[2 << 20]uint16)(unsafe.Pointer(a.AuthorityName))[:]) - return s, nil -} - -func (c *Client) KeyInfo() (sessionKeySize uint32, sigAlg uint32, sigAlgName string, encAlg uint32, encAlgName string, err error) { - var ki _SecPkgContext_KeyInfo - ret := sspi.QueryContextAttributes(c.ctx.Handle, _SECPKG_ATTR_KEY_INFO, (*byte)(unsafe.Pointer(&ki))) - if ret != sspi.SEC_E_OK { - return 0, 0, "", 0, "", ret - } - defer sspi.FreeContextBuffer((*byte)(unsafe.Pointer(ki.SignatureAlgorithmName))) - defer sspi.FreeContextBuffer((*byte)(unsafe.Pointer(ki.EncryptAlgorithmName))) - saname := syscall.UTF16ToString((*[2 << 20]uint16)(unsafe.Pointer(ki.SignatureAlgorithmName))[:]) - eaname := syscall.UTF16ToString((*[2 << 20]uint16)(unsafe.Pointer(ki.EncryptAlgorithmName))[:]) - return ki.KeySize, ki.SignatureAlgorithm, saname, ki.EncryptAlgorithm, eaname, nil -} - -// Sizes queries the context for the sizes used in per-message functions. -// It returns the maximum token size used in authentication exchanges, the -// maximum signature size, the preferred integral size of messages, the -// size of any security trailer, and any error. -func (c *Client) Sizes() (uint32, uint32, uint32, uint32, error) { - return c.ctx.Sizes() -} diff --git a/vendor/github.com/alexbrainman/sspi/schannel/buffer.go b/vendor/github.com/alexbrainman/sspi/schannel/buffer.go deleted file mode 100644 index 7c0aa4c6ec6d..000000000000 --- a/vendor/github.com/alexbrainman/sspi/schannel/buffer.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package schannel - -import ( - "io" - - "github.com/alexbrainman/sspi" -) - -type inputBuffer struct { - data []byte - reader io.Reader -} - -func newInputBuffer(initialsize int, reader io.Reader) *inputBuffer { - return &inputBuffer{ - data: make([]byte, 0, initialsize), - reader: reader, - } -} - -// copy copies data d into buffer ib. copy grows destination if needed. -func (ib *inputBuffer) copy(d []byte) int { - // TODO: check all call sites, maybe this can be made more efficient - return copy(ib.data, d) -} - -func (ib *inputBuffer) reset() { - ib.data = ib.data[:0] -} - -func (ib *inputBuffer) grow() { - b := make([]byte, len(ib.data), cap(ib.data)*2) - copy(b, ib.data) - ib.data = b -} - -func (ib *inputBuffer) readMore() error { - if len(ib.data) == cap(ib.data) { - ib.grow() - } - n0 := len(ib.data) - ib.data = ib.data[:cap(ib.data)] - n, err := ib.reader.Read(ib.data[n0:]) - if err != nil { - return err - } - ib.data = ib.data[:n0+n] - return nil -} - -func (ib *inputBuffer) bytes() []byte { - return ib.data -} - -func sendOutBuffer(w io.Writer, b *sspi.SecBuffer) error { - _, err := b.WriteAll(w) - // TODO: see if I can preallocate buffers instead - b.Free() - b.Set(sspi.SECBUFFER_TOKEN, nil) - return err -} - -// indexOfSecBuffer searches buffers bs for buffer type buftype. -// It returns -1 if not found. -func indexOfSecBuffer(bs []sspi.SecBuffer, buftype uint32) int { - for i := range bs { - if bs[i].BufferType == buftype { - return i - } - } - return -1 -} diff --git a/vendor/github.com/alexbrainman/sspi/schannel/client.go b/vendor/github.com/alexbrainman/sspi/schannel/client.go deleted file mode 100644 index 99f3016d4537..000000000000 --- a/vendor/github.com/alexbrainman/sspi/schannel/client.go +++ /dev/null @@ -1,276 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package schannel - -import ( - "errors" - "io" - "syscall" - "unsafe" - - "github.com/alexbrainman/sspi" -) - -// TODO: add documentation - -// TODO: maybe come up with a better name - -type Client struct { - ctx *sspi.Context - conn io.ReadWriter - inbuf *inputBuffer -} - -func NewClientContext(cred *sspi.Credentials, conn io.ReadWriter) *Client { - return &Client{ - ctx: sspi.NewClientContext(cred, sspi.ISC_REQ_STREAM|sspi.ISC_REQ_ALLOCATE_MEMORY|sspi.ISC_REQ_EXTENDED_ERROR|sspi.ISC_REQ_MANUAL_CRED_VALIDATION), - conn: conn, - // TODO: decide how large this buffer needs to be (it cannot be too small otherwise messages won't fit) - inbuf: newInputBuffer(1000, conn), - } -} - -func (c *Client) Handshake(serverName string) error { - name, err := syscall.UTF16PtrFromString(serverName) - if err != nil { - return err - } - inBuf := []sspi.SecBuffer{ - {BufferType: sspi.SECBUFFER_TOKEN}, - {BufferType: sspi.SECBUFFER_EMPTY}, - } - // TODO: InitializeSecurityContext doco says that inBufs should be nil on the first call - inBufs := sspi.NewSecBufferDesc(inBuf[:]) - outBuf := []sspi.SecBuffer{ - {BufferType: sspi.SECBUFFER_TOKEN}, - } - outBufs := sspi.NewSecBufferDesc(outBuf) - - for { - ret := c.ctx.Update(name, outBufs, inBufs) - - // send data to peer - err := sendOutBuffer(c.conn, &outBuf[0]) - if err != nil { - return err - } - - // update input buffer - fetchMore := true - switch ret { - case sspi.SEC_E_OK, sspi.SEC_I_CONTINUE_NEEDED: - if inBuf[1].BufferType == sspi.SECBUFFER_EXTRA { - c.inbuf.copy(inBuf[1].Bytes()) - fetchMore = false - } else { - c.inbuf.reset() - } - } - - // decide what to do next - switch ret { - case sspi.SEC_E_OK: - // negotiation is competed - return nil - case sspi.SEC_I_CONTINUE_NEEDED, sspi.SEC_E_INCOMPLETE_MESSAGE: - // continue on - default: - return ret - } - - // fetch more input data if needed - if fetchMore { - err := c.inbuf.readMore() - if err != nil { - return err - } - } - inBuf[0].Set(sspi.SECBUFFER_TOKEN, c.inbuf.bytes()) - inBuf[1].Set(sspi.SECBUFFER_EMPTY, nil) - } -} - -// TODO: protect Handshake, Read, Write and Shutdown with locks -// TODO: call Handshake at the start Read and Write unless handshake is already complete - -func (c *Client) writeBlock(data []byte) (int, error) { - ss, err := c.streamSizes() - if err != nil { - return 0, err - } - // TODO: maybe make this buffer (and header and trailer buffers) part of Context struct - var b [4]sspi.SecBuffer - b[0].Set(sspi.SECBUFFER_STREAM_HEADER, make([]byte, ss.Header)) - b[1].Set(sspi.SECBUFFER_DATA, data) - b[2].Set(sspi.SECBUFFER_STREAM_TRAILER, make([]byte, ss.Trailer)) - b[3].Set(sspi.SECBUFFER_EMPTY, nil) - ret := sspi.EncryptMessage(c.ctx.Handle, 0, sspi.NewSecBufferDesc(b[:]), 0) - switch ret { - case sspi.SEC_E_OK: - case sspi.SEC_E_CONTEXT_EXPIRED: - // TODO: handle this - panic("writeBlock: SEC_E_CONTEXT_EXPIRED") - default: - return 0, ret - } - n1, err := b[0].WriteAll(c.conn) - if err != nil { - return n1, err - } - n2, err := b[1].WriteAll(c.conn) - if err != nil { - return n1 + n2, err - } - n3, err := b[2].WriteAll(c.conn) - return n1 + n2 + n3, err -} - -func (c *Client) Write(b []byte) (int, error) { - ss, err := c.streamSizes() - if err != nil { - return 0, err - } - // TODO: handle redoing context here - total := 0 - for len(b) > 0 { - // TODO: maybe use ss.BlockSize to decide on optimum block size - b2 := b - if len(b) > int(ss.MaximumMessage) { - b2 = b2[:ss.MaximumMessage] - } - n, err := c.writeBlock(b2) - total += n - if err != nil { - return total, err - } - b = b[len(b2):] - } - return total, nil -} - -func (c *Client) Read(data []byte) (int, error) { - if len(c.inbuf.bytes()) == 0 { - err := c.inbuf.readMore() - if err != nil { - return 0, err - } - } - var b [4]sspi.SecBuffer - desc := sspi.NewSecBufferDesc(b[:]) -loop: - for { - b[0].Set(sspi.SECBUFFER_DATA, c.inbuf.bytes()) - b[1].Set(sspi.SECBUFFER_EMPTY, nil) - b[2].Set(sspi.SECBUFFER_EMPTY, nil) - b[3].Set(sspi.SECBUFFER_EMPTY, nil) - ret := sspi.DecryptMessage(c.ctx.Handle, desc, 0, nil) - switch ret { - case sspi.SEC_E_OK: - break loop - case sspi.SEC_E_INCOMPLETE_MESSAGE: - // TODO: it seems b[0].BufferSize or b[1].BufferSize contains "how many more bytes needed for full message" - maybe use it somehow - // read more and try again - err := c.inbuf.readMore() - if err != nil { - return 0, err - } - default: - // TODO: handle other ret values - return 0, errors.New("not implemented") - } - } - i := indexOfSecBuffer(b[:], sspi.SECBUFFER_DATA) - if i == -1 { - return 0, errors.New("DecryptMessage did not return SECBUFFER_DATA") - } - n := copy(data, b[i].Bytes()) - i = indexOfSecBuffer(b[:], sspi.SECBUFFER_EXTRA) - if i == -1 { - c.inbuf.reset() - } else { - c.inbuf.copy(b[i].Bytes()) - } - return n, nil -} - -func (c *Client) applyShutdownControlToken() error { - data := uint32(_SCHANNEL_SHUTDOWN) - b := sspi.SecBuffer{ - BufferType: sspi.SECBUFFER_TOKEN, - Buffer: (*byte)(unsafe.Pointer(&data)), - BufferSize: uint32(unsafe.Sizeof(data)), - } - desc := sspi.SecBufferDesc{ - Version: sspi.SECBUFFER_VERSION, - BuffersCount: 1, - Buffers: &b, - } - ret := sspi.ApplyControlToken(c.ctx.Handle, &desc) - if ret != sspi.SEC_E_OK { - return ret - } - return nil -} - -func (c *Client) Shutdown() error { - err := c.applyShutdownControlToken() - if err != nil { - return err - } - inBuf := []sspi.SecBuffer{ - {BufferType: sspi.SECBUFFER_TOKEN}, - {BufferType: sspi.SECBUFFER_EMPTY}, - } - inBufs := sspi.NewSecBufferDesc(inBuf[:]) - outBuf := []sspi.SecBuffer{ - {BufferType: sspi.SECBUFFER_TOKEN}, - } - outBufs := sspi.NewSecBufferDesc(outBuf) - for { - // TODO: I am not sure if I can pass nil as targname - ret := c.ctx.Update(nil, outBufs, inBufs) - - // send data to peer - err := sendOutBuffer(c.conn, &outBuf[0]) - if err != nil { - return err - } - - // update input buffer - fetchMore := true - switch ret { - case sspi.SEC_E_OK, sspi.SEC_I_CONTINUE_NEEDED: - if inBuf[1].BufferType == sspi.SECBUFFER_EXTRA { - c.inbuf.copy(inBuf[1].Bytes()) - fetchMore = false - } else { - c.inbuf.reset() - } - } - - // decide what to do next - switch ret { - case sspi.SEC_E_OK, sspi.SEC_E_CONTEXT_EXPIRED: - // shutdown is competed - return nil - case sspi.SEC_I_CONTINUE_NEEDED, sspi.SEC_E_INCOMPLETE_MESSAGE: - // continue on - default: - return ret - } - - // fetch more input data if needed - if fetchMore { - err := c.inbuf.readMore() - if err != nil { - return err - } - } - inBuf[0].Set(sspi.SECBUFFER_TOKEN, c.inbuf.bytes()) - inBuf[1].Set(sspi.SECBUFFER_EMPTY, nil) - } -} diff --git a/vendor/github.com/alexbrainman/sspi/schannel/creds.go b/vendor/github.com/alexbrainman/sspi/schannel/creds.go deleted file mode 100644 index 6046e4276c3a..000000000000 --- a/vendor/github.com/alexbrainman/sspi/schannel/creds.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -// Package schannel provides access to the Secure Channel SSP Package. -// -package schannel - -import ( - "unsafe" - - "github.com/alexbrainman/sspi" -) - -// TODO: add documentation - -// PackageInfo contains Secure Channel SSP package description. -var PackageInfo *sspi.PackageInfo - -func init() { - var err error - PackageInfo, err = sspi.QueryPackageInfo(sspi.UNISP_NAME) - if err != nil { - panic("failed to fetch Schannel package info: " + err.Error()) - } -} - -func acquireCredentials(creduse uint32) (*sspi.Credentials, error) { - sc := &__SCHANNEL_CRED{ - Version: __SCHANNEL_CRED_VERSION, - // TODO: allow for Creds / CredCount - // TODO: allow for RootStore - // TODO: allow for EnabledProtocols - // TODO: allow for MinimumCipherStrength / MaximumCipherStrength - } - c, err := sspi.AcquireCredentials("", sspi.UNISP_NAME, creduse, (*byte)(unsafe.Pointer(sc))) - if err != nil { - return nil, err - } - return c, nil -} - -func AcquireClientCredentials() (*sspi.Credentials, error) { - return acquireCredentials(sspi.SECPKG_CRED_OUTBOUND) -} diff --git a/vendor/github.com/alexbrainman/sspi/schannel/schannel_test.go b/vendor/github.com/alexbrainman/sspi/schannel/schannel_test.go deleted file mode 100644 index f8a718d11f72..000000000000 --- a/vendor/github.com/alexbrainman/sspi/schannel/schannel_test.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package schannel_test - -import ( - "fmt" - "io/ioutil" - "net" - "testing" - - "github.com/alexbrainman/sspi/schannel" -) - -func TestPackageInfo(t *testing.T) { - want := "Microsoft Unified Security Protocol Provider" - if schannel.PackageInfo.Name != want { - t.Fatalf(`invalid Schannel package name of %q, %q is expected.`, schannel.PackageInfo.Name, want) - } -} - -func TestSchannel(t *testing.T) { - cred, err := schannel.AcquireClientCredentials() - if err != nil { - t.Fatal(err) - } - defer cred.Release() - - conn, err := net.Dial("tcp", "microsoft.com:https") - if err != nil { - t.Fatal(err) - } - defer conn.Close() - - client := schannel.NewClientContext(cred, conn) - err = client.Handshake("microsoft.com") - if err != nil { - t.Fatal(err) - } - protoName, major, minor, err := client.ProtocolInfo() - if err != nil { - t.Fatal(err) - } - t.Logf("protocol info: %s %d.%d", protoName, major, minor) - userName, err := client.UserName() - if err != nil { - t.Fatal(err) - } - t.Logf("user name: %q", userName) - authorityName, err := client.AuthorityName() - if err != nil { - t.Fatal(err) - } - t.Logf("authority name: %q", authorityName) - sessionKeySize, sigAlg, sigAlgName, encAlg, encAlgName, err := client.KeyInfo() - if err != nil { - t.Fatal(err) - } - t.Logf("key info: session_key_size=%d signature_alg=%q(%d) encryption_alg=%q(%d)", sessionKeySize, sigAlgName, sigAlg, encAlgName, encAlg) - // TODO: add some code to verify if negotiated connection is suitable (ciper and so on) - _, err = fmt.Fprintf(client, "GET / HTTP/1.1\r\nHost: foo\r\nConnection: close\r\n\r\n") - if err != nil { - t.Fatal(err) - } - data, err := ioutil.ReadAll(client) - if err != nil { - t.Fatal(err) - } - t.Logf("web page: %q", data) - err = client.Shutdown() - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/alexbrainman/sspi/schannel/syscall.go b/vendor/github.com/alexbrainman/sspi/schannel/syscall.go deleted file mode 100644 index d449ecf96ba1..000000000000 --- a/vendor/github.com/alexbrainman/sspi/schannel/syscall.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package schannel - -import ( - "syscall" -) - -// TODO: maybe put all these into a separate package, like sspi/schannel/winapi or similar - -const ( - __SCHANNEL_CRED_VERSION = 4 - - _SP_PROT_PCT1_SERVER = 0x00000001 - _SP_PROT_PCT1_CLIENT = 0x00000002 - _SP_PROT_PCT1 = _SP_PROT_PCT1_SERVER | _SP_PROT_PCT1_CLIENT - _SP_PROT_SSL2_SERVER = 0x00000004 - _SP_PROT_SSL2_CLIENT = 0x00000008 - _SP_PROT_SSL2 = _SP_PROT_SSL2_SERVER | _SP_PROT_SSL2_CLIENT - _SP_PROT_SSL3_SERVER = 0x00000010 - _SP_PROT_SSL3_CLIENT = 0x00000020 - _SP_PROT_SSL3 = _SP_PROT_SSL3_SERVER | _SP_PROT_SSL3_CLIENT - _SP_PROT_TLS1_SERVER = 0x00000040 - _SP_PROT_TLS1_CLIENT = 0x00000080 - _SP_PROT_TLS1 = _SP_PROT_TLS1_SERVER | _SP_PROT_TLS1_CLIENT - _SP_PROT_SSL3TLS1_CLIENTS = _SP_PROT_TLS1_CLIENT | _SP_PROT_SSL3_CLIENT - _SP_PROT_SSL3TLS1_SERVERS = _SP_PROT_TLS1_SERVER | _SP_PROT_SSL3_SERVER - _SP_PROT_SSL3TLS1 = _SP_PROT_SSL3 | _SP_PROT_TLS1 -) - -type __SCHANNEL_CRED struct { - Version uint32 - CredCount uint32 - Creds *syscall.CertContext - RootStore syscall.Handle // TODO: make sure this field is syscall.Handle - cMappers uint32 - aphMappers uintptr - SupportedAlgCount uint32 - SupportedAlgs *uint32 - EnabledProtocols uint32 - MinimumCipherStrength uint32 - MaximumCipherStrength uint32 - SessionLifespan uint32 - Flags uint32 - CredFormat uint32 -} - -const ( - _SECPKG_ATTR_SIZES = 0 - _SECPKG_ATTR_NAMES = 1 - _SECPKG_ATTR_LIFESPAN = 2 - _SECPKG_ATTR_DCE_INFO = 3 - _SECPKG_ATTR_STREAM_SIZES = 4 - _SECPKG_ATTR_KEY_INFO = 5 - _SECPKG_ATTR_AUTHORITY = 6 - _SECPKG_ATTR_PROTO_INFO = 7 - _SECPKG_ATTR_PASSWORD_EXPIRY = 8 - _SECPKG_ATTR_SESSION_KEY = 9 - _SECPKG_ATTR_PACKAGE_INFO = 10 - _SECPKG_ATTR_USER_FLAGS = 11 - _SECPKG_ATTR_NEGOTIATION_INFO = 12 - _SECPKG_ATTR_NATIVE_NAMES = 13 - _SECPKG_ATTR_FLAGS = 14 - - _SCHANNEL_RENEGOTIATE = 0 - _SCHANNEL_SHUTDOWN = 1 - _SCHANNEL_ALERT = 2 -) - -type _SecPkgContext_StreamSizes struct { - Header uint32 - Trailer uint32 - MaximumMessage uint32 - Buffers uint32 - BlockSize uint32 -} - -type _SecPkgContext_ProtoInfo struct { - ProtocolName *uint16 - MajorVersion uint32 - MinorVersion uint32 -} - -type _SecPkgContext_Names struct { - UserName *uint16 -} - -type _SecPkgContext_Authority struct { - AuthorityName *uint16 -} - -type _SecPkgContext_KeyInfo struct { - SignatureAlgorithmName *uint16 - EncryptAlgorithmName *uint16 - KeySize uint32 - SignatureAlgorithm uint32 - EncryptAlgorithm uint32 -} - -// TODO: SecPkgContext_ConnectionInfo - -// TODO: SECPKG_ATTR_REMOTE_CERT_CONTEXT -// TODO: SECPKG_ATTR_LOCAL_CERT_CONTEXT - -// TODO: SecPkgContext_IssuerListInfoEx diff --git a/vendor/github.com/alexbrainman/sspi/sspi.go b/vendor/github.com/alexbrainman/sspi/sspi.go deleted file mode 100644 index 04f20b75aeea..000000000000 --- a/vendor/github.com/alexbrainman/sspi/sspi.go +++ /dev/null @@ -1,226 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package sspi - -import ( - "fmt" - "syscall" - "time" - "unsafe" -) - -// TODO: add documentation - -type PackageInfo struct { - Capabilities uint32 - Version uint16 - RPCID uint16 - MaxToken uint32 - Name string - Comment string -} - -func QueryPackageInfo(pkgname string) (*PackageInfo, error) { - name, err := syscall.UTF16PtrFromString(pkgname) - if err != nil { - return nil, err - } - var pi *SecPkgInfo - ret := QuerySecurityPackageInfo(name, &pi) - if ret != SEC_E_OK { - return nil, ret - } - defer FreeContextBuffer((*byte)(unsafe.Pointer(pi))) - - return &PackageInfo{ - Capabilities: pi.Capabilities, - Version: pi.Version, - RPCID: pi.RPCID, - MaxToken: pi.MaxToken, - Name: syscall.UTF16ToString((*[2 << 12]uint16)(unsafe.Pointer(pi.Name))[:]), - Comment: syscall.UTF16ToString((*[2 << 12]uint16)(unsafe.Pointer(pi.Comment))[:]), - }, nil -} - -type Credentials struct { - Handle CredHandle - expiry syscall.Filetime -} - -// AcquireCredentials calls the windows AcquireCredentialsHandle function and -// returns Credentials containing a security handle that can be used for -// InitializeSecurityContext or AcceptSecurityContext operations. -// As a special case, passing an empty string as the principal parameter will -// pass a null string to the underlying function. -func AcquireCredentials(principal string, pkgname string, creduse uint32, authdata *byte) (*Credentials, error) { - var principalName *uint16 - if principal != "" { - var err error - principalName, err = syscall.UTF16PtrFromString(principal) - if err != nil { - return nil, err - } - } - name, err := syscall.UTF16PtrFromString(pkgname) - if err != nil { - return nil, err - } - var c Credentials - ret := AcquireCredentialsHandle(principalName, name, creduse, nil, authdata, 0, 0, &c.Handle, &c.expiry) - if ret != SEC_E_OK { - return nil, ret - } - return &c, nil -} - -func (c *Credentials) Release() error { - if c == nil { - return nil - } - ret := FreeCredentialsHandle(&c.Handle) - if ret != SEC_E_OK { - return ret - } - return nil -} - -func (c *Credentials) Expiry() time.Time { - return time.Unix(0, c.expiry.Nanoseconds()) -} - -// TODO: add functions to display and manage RequestedFlags and EstablishedFlags fields. -// TODO: maybe get rid of RequestedFlags and EstablishedFlags fields, and replace them with input parameter for New...Context and return value of Update (instead of current bool parameter). - -type updateFunc func(c *Context, targname *uint16, h, newh *CtxtHandle, out, in *SecBufferDesc) syscall.Errno - -type Context struct { - Cred *Credentials - Handle *CtxtHandle - handle CtxtHandle - updFn updateFunc - expiry syscall.Filetime - RequestedFlags uint32 - EstablishedFlags uint32 -} - -func NewClientContext(cred *Credentials, flags uint32) *Context { - return &Context{ - Cred: cred, - updFn: initialize, - RequestedFlags: flags, - } -} - -func NewServerContext(cred *Credentials, flags uint32) *Context { - return &Context{ - Cred: cred, - updFn: accept, - RequestedFlags: flags, - } -} - -func initialize(c *Context, targname *uint16, h, newh *CtxtHandle, out, in *SecBufferDesc) syscall.Errno { - return InitializeSecurityContext(&c.Cred.Handle, h, targname, c.RequestedFlags, - 0, SECURITY_NATIVE_DREP, in, 0, newh, out, &c.EstablishedFlags, &c.expiry) -} - -func accept(c *Context, targname *uint16, h, newh *CtxtHandle, out, in *SecBufferDesc) syscall.Errno { - return AcceptSecurityContext(&c.Cred.Handle, h, in, c.RequestedFlags, - SECURITY_NATIVE_DREP, newh, out, &c.EstablishedFlags, &c.expiry) -} - -func (c *Context) Update(targname *uint16, out, in *SecBufferDesc) syscall.Errno { - h := c.Handle - if c.Handle == nil { - c.Handle = &c.handle - } - return c.updFn(c, targname, h, c.Handle, out, in) -} - -func (c *Context) Release() error { - if c == nil { - return nil - } - ret := DeleteSecurityContext(c.Handle) - if ret != SEC_E_OK { - return ret - } - return nil -} - -func (c *Context) Expiry() time.Time { - return time.Unix(0, c.expiry.Nanoseconds()) -} - -// TODO: add comment to function doco that this "impersonation" is applied to current OS thread. -func (c *Context) ImpersonateUser() error { - ret := ImpersonateSecurityContext(c.Handle) - if ret != SEC_E_OK { - return ret - } - return nil -} - -func (c *Context) RevertToSelf() error { - ret := RevertSecurityContext(c.Handle) - if ret != SEC_E_OK { - return ret - } - return nil -} - -// Sizes queries the context for the sizes used in per-message functions. -// It returns the maximum token size used in authentication exchanges, the -// maximum signature size, the preferred integral size of messages, the -// size of any security trailer, and any error. -func (c *Context) Sizes() (uint32, uint32, uint32, uint32, error) { - var s _SecPkgContext_Sizes - ret := QueryContextAttributes(c.Handle, _SECPKG_ATTR_SIZES, (*byte)(unsafe.Pointer(&s))) - if ret != SEC_E_OK { - return 0, 0, 0, 0, ret - } - return s.MaxToken, s.MaxSignature, s.BlockSize, s.SecurityTrailer, nil -} - -// VerifyFlags determines if all flags used to construct the context -// were honored (see NewClientContext). It should be called after c.Update. -func (c *Context) VerifyFlags() error { - return c.VerifySelectiveFlags(c.RequestedFlags) -} - -// VerifySelectiveFlags determines if the given flags were honored (see NewClientContext). -// It should be called after c.Update. -func (c *Context) VerifySelectiveFlags(flags uint32) error { - if valid, missing, extra := verifySelectiveFlags(flags, c.RequestedFlags); !valid { - return fmt.Errorf("sspi: invalid flags check: desired=%b requested=%b missing=%b extra=%b", flags, c.RequestedFlags, missing, extra) - } - if valid, missing, extra := verifySelectiveFlags(flags, c.EstablishedFlags); !valid { - return fmt.Errorf("sspi: invalid flags: desired=%b established=%b missing=%b extra=%b", flags, c.EstablishedFlags, missing, extra) - } - return nil -} - -// verifySelectiveFlags determines if all bits requested in flags are set in establishedFlags. -// missing represents the bits set in flags that are not set in establishedFlags. -// extra represents the bits set in establishedFlags that are not set in flags. -// valid is true and missing is zero when establishedFlags has all of the requested flags. -func verifySelectiveFlags(flags, establishedFlags uint32) (valid bool, missing, extra uint32) { - missing = flags&establishedFlags ^ flags - extra = flags | establishedFlags ^ flags - valid = missing == 0 - return valid, missing, extra -} - -// NewSecBufferDesc returns an initialized SecBufferDesc describing the -// provided SecBuffer. -func NewSecBufferDesc(b []SecBuffer) *SecBufferDesc { - return &SecBufferDesc{ - Version: SECBUFFER_VERSION, - BuffersCount: uint32(len(b)), - Buffers: &b[0], - } -} diff --git a/vendor/github.com/alexbrainman/sspi/sspi_test.go b/vendor/github.com/alexbrainman/sspi/sspi_test.go deleted file mode 100644 index 5edf5617e5fd..000000000000 --- a/vendor/github.com/alexbrainman/sspi/sspi_test.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package sspi_test - -import ( - "testing" - - "github.com/alexbrainman/sspi" -) - -func TestQueryPackageInfo(t *testing.T) { - pkgnames := []string{ - sspi.NTLMSP_NAME, - sspi.MICROSOFT_KERBEROS_NAME, - sspi.NEGOSSP_NAME, - sspi.UNISP_NAME, - } - for _, name := range pkgnames { - pi, err := sspi.QueryPackageInfo(name) - if err != nil { - t.Error(err) - continue - } - if pi.Name != name { - t.Errorf("unexpected package name %q returned for %q package: package info is %#v", pi.Name, name, pi) - continue - } - } -} diff --git a/vendor/github.com/alexbrainman/sspi/syscall.go b/vendor/github.com/alexbrainman/sspi/syscall.go deleted file mode 100644 index 04660df2aea4..000000000000 --- a/vendor/github.com/alexbrainman/sspi/syscall.go +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package sspi - -import ( - "syscall" -) - -const ( - SEC_E_OK = syscall.Errno(0) - - SEC_I_COMPLETE_AND_CONTINUE = syscall.Errno(590612) - SEC_I_COMPLETE_NEEDED = syscall.Errno(590611) - SEC_I_CONTINUE_NEEDED = syscall.Errno(590610) - - SEC_E_LOGON_DENIED = syscall.Errno(0x8009030c) - SEC_E_CONTEXT_EXPIRED = syscall.Errno(0x80090317) // not sure if the value is valid - SEC_E_INCOMPLETE_MESSAGE = syscall.Errno(0x80090318) - - NTLMSP_NAME = "NTLM" - MICROSOFT_KERBEROS_NAME = "Kerberos" - NEGOSSP_NAME = "Negotiate" - UNISP_NAME = "Microsoft Unified Security Protocol Provider" - - _SECPKG_ATTR_SIZES = 0 - _SECPKG_ATTR_NAMES = 1 - _SECPKG_ATTR_LIFESPAN = 2 - _SECPKG_ATTR_DCE_INFO = 3 - _SECPKG_ATTR_STREAM_SIZES = 4 - _SECPKG_ATTR_KEY_INFO = 5 - _SECPKG_ATTR_AUTHORITY = 6 - _SECPKG_ATTR_PROTO_INFO = 7 - _SECPKG_ATTR_PASSWORD_EXPIRY = 8 - _SECPKG_ATTR_SESSION_KEY = 9 - _SECPKG_ATTR_PACKAGE_INFO = 10 - _SECPKG_ATTR_USER_FLAGS = 11 - _SECPKG_ATTR_NEGOTIATION_INFO = 12 - _SECPKG_ATTR_NATIVE_NAMES = 13 - _SECPKG_ATTR_FLAGS = 14 -) - -type SecPkgInfo struct { - Capabilities uint32 - Version uint16 - RPCID uint16 - MaxToken uint32 - Name *uint16 - Comment *uint16 -} - -type _SecPkgContext_Sizes struct { - MaxToken uint32 - MaxSignature uint32 - BlockSize uint32 - SecurityTrailer uint32 -} - -//sys QuerySecurityPackageInfo(pkgname *uint16, pkginfo **SecPkgInfo) (ret syscall.Errno) = secur32.QuerySecurityPackageInfoW -//sys FreeContextBuffer(buf *byte) (ret syscall.Errno) = secur32.FreeContextBuffer - -const ( - SECPKG_CRED_INBOUND = 1 - SECPKG_CRED_OUTBOUND = 2 - SECPKG_CRED_BOTH = (SECPKG_CRED_OUTBOUND | SECPKG_CRED_INBOUND) - - SEC_WINNT_AUTH_IDENTITY_UNICODE = 0x2 -) - -type SEC_WINNT_AUTH_IDENTITY struct { - User *uint16 - UserLength uint32 - Domain *uint16 - DomainLength uint32 - Password *uint16 - PasswordLength uint32 - Flags uint32 -} - -type LUID struct { - LowPart uint32 - HighPart int32 -} - -type CredHandle struct { - Lower uintptr - Upper uintptr -} - -//sys AcquireCredentialsHandle(principal *uint16, pkgname *uint16, creduse uint32, logonid *LUID, authdata *byte, getkeyfn uintptr, getkeyarg uintptr, handle *CredHandle, expiry *syscall.Filetime) (ret syscall.Errno) = secur32.AcquireCredentialsHandleW -//sys FreeCredentialsHandle(handle *CredHandle) (ret syscall.Errno) = secur32.FreeCredentialsHandle - -const ( - SECURITY_NATIVE_DREP = 16 - - SECBUFFER_DATA = 1 - SECBUFFER_TOKEN = 2 - SECBUFFER_PKG_PARAMS = 3 - SECBUFFER_MISSING = 4 - SECBUFFER_EXTRA = 5 - SECBUFFER_STREAM_TRAILER = 6 - SECBUFFER_STREAM_HEADER = 7 - SECBUFFER_PADDING = 9 - SECBUFFER_STREAM = 10 - SECBUFFER_READONLY = 0x80000000 - SECBUFFER_ATTRMASK = 0xf0000000 - SECBUFFER_VERSION = 0 - SECBUFFER_EMPTY = 0 - - ISC_REQ_DELEGATE = 1 - ISC_REQ_MUTUAL_AUTH = 2 - ISC_REQ_REPLAY_DETECT = 4 - ISC_REQ_SEQUENCE_DETECT = 8 - ISC_REQ_CONFIDENTIALITY = 16 - ISC_REQ_USE_SESSION_KEY = 32 - ISC_REQ_PROMPT_FOR_CREDS = 64 - ISC_REQ_USE_SUPPLIED_CREDS = 128 - ISC_REQ_ALLOCATE_MEMORY = 256 - ISC_REQ_USE_DCE_STYLE = 512 - ISC_REQ_DATAGRAM = 1024 - ISC_REQ_CONNECTION = 2048 - ISC_REQ_EXTENDED_ERROR = 16384 - ISC_REQ_STREAM = 32768 - ISC_REQ_INTEGRITY = 65536 - ISC_REQ_MANUAL_CRED_VALIDATION = 524288 - ISC_REQ_HTTP = 268435456 - - ASC_REQ_DELEGATE = 1 - ASC_REQ_MUTUAL_AUTH = 2 - ASC_REQ_REPLAY_DETECT = 4 - ASC_REQ_SEQUENCE_DETECT = 8 - ASC_REQ_CONFIDENTIALITY = 16 - ASC_REQ_USE_SESSION_KEY = 32 - ASC_REQ_ALLOCATE_MEMORY = 256 - ASC_REQ_USE_DCE_STYLE = 512 - ASC_REQ_DATAGRAM = 1024 - ASC_REQ_CONNECTION = 2048 - ASC_REQ_EXTENDED_ERROR = 32768 - ASC_REQ_STREAM = 65536 - ASC_REQ_INTEGRITY = 131072 -) - -type CtxtHandle struct { - Lower uintptr - Upper uintptr -} - -type SecBuffer struct { - BufferSize uint32 - BufferType uint32 - Buffer *byte -} - -type SecBufferDesc struct { - Version uint32 - BuffersCount uint32 - Buffers *SecBuffer -} - -//sys InitializeSecurityContext(credential *CredHandle, context *CtxtHandle, targname *uint16, contextreq uint32, reserved1 uint32, targdatarep uint32, input *SecBufferDesc, reserved2 uint32, newcontext *CtxtHandle, output *SecBufferDesc, contextattr *uint32, expiry *syscall.Filetime) (ret syscall.Errno) = secur32.InitializeSecurityContextW -//sys AcceptSecurityContext(credential *CredHandle, context *CtxtHandle, input *SecBufferDesc, contextreq uint32, targdatarep uint32, newcontext *CtxtHandle, output *SecBufferDesc, contextattr *uint32, expiry *syscall.Filetime) (ret syscall.Errno) = secur32.AcceptSecurityContext -//sys CompleteAuthToken(context *CtxtHandle, token *SecBufferDesc) (ret syscall.Errno) = secur32.CompleteAuthToken -//sys DeleteSecurityContext(context *CtxtHandle) (ret syscall.Errno) = secur32.DeleteSecurityContext -//sys ImpersonateSecurityContext(context *CtxtHandle) (ret syscall.Errno) = secur32.ImpersonateSecurityContext -//sys RevertSecurityContext(context *CtxtHandle) (ret syscall.Errno) = secur32.RevertSecurityContext -//sys QueryContextAttributes(context *CtxtHandle, attribute uint32, buf *byte) (ret syscall.Errno) = secur32.QueryContextAttributesW -//sys EncryptMessage(context *CtxtHandle, qop uint32, message *SecBufferDesc, messageseqno uint32) (ret syscall.Errno) = secur32.EncryptMessage -//sys DecryptMessage(context *CtxtHandle, message *SecBufferDesc, messageseqno uint32, qop *uint32) (ret syscall.Errno) = secur32.DecryptMessage -//sys ApplyControlToken(context *CtxtHandle, input *SecBufferDesc) (ret syscall.Errno) = secur32.ApplyControlToken -//sys MakeSignature(context *CtxtHandle, qop uint32, message *SecBufferDesc, messageseqno uint32) (ret syscall.Errno) = secur32.MakeSignature -//sys VerifySignature(context *CtxtHandle, message *SecBufferDesc, messageseqno uint32, qop *uint32) (ret syscall.Errno) = secur32.VerifySignature diff --git a/vendor/github.com/alexbrainman/sspi/zsyscall_windows.go b/vendor/github.com/alexbrainman/sspi/zsyscall_windows.go deleted file mode 100644 index 55e820997016..000000000000 --- a/vendor/github.com/alexbrainman/sspi/zsyscall_windows.go +++ /dev/null @@ -1,152 +0,0 @@ -// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT - -package sspi - -import ( - "syscall" - "unsafe" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return nil - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) - return e -} - -var ( - modsecur32 = syscall.NewLazyDLL("secur32.dll") - - procQuerySecurityPackageInfoW = modsecur32.NewProc("QuerySecurityPackageInfoW") - procFreeContextBuffer = modsecur32.NewProc("FreeContextBuffer") - procAcquireCredentialsHandleW = modsecur32.NewProc("AcquireCredentialsHandleW") - procFreeCredentialsHandle = modsecur32.NewProc("FreeCredentialsHandle") - procInitializeSecurityContextW = modsecur32.NewProc("InitializeSecurityContextW") - procAcceptSecurityContext = modsecur32.NewProc("AcceptSecurityContext") - procCompleteAuthToken = modsecur32.NewProc("CompleteAuthToken") - procDeleteSecurityContext = modsecur32.NewProc("DeleteSecurityContext") - procImpersonateSecurityContext = modsecur32.NewProc("ImpersonateSecurityContext") - procRevertSecurityContext = modsecur32.NewProc("RevertSecurityContext") - procQueryContextAttributesW = modsecur32.NewProc("QueryContextAttributesW") - procEncryptMessage = modsecur32.NewProc("EncryptMessage") - procDecryptMessage = modsecur32.NewProc("DecryptMessage") - procApplyControlToken = modsecur32.NewProc("ApplyControlToken") - procMakeSignature = modsecur32.NewProc("MakeSignature") - procVerifySignature = modsecur32.NewProc("VerifySignature") -) - -func QuerySecurityPackageInfo(pkgname *uint16, pkginfo **SecPkgInfo) (ret syscall.Errno) { - r0, _, _ := syscall.Syscall(procQuerySecurityPackageInfoW.Addr(), 2, uintptr(unsafe.Pointer(pkgname)), uintptr(unsafe.Pointer(pkginfo)), 0) - ret = syscall.Errno(r0) - return -} - -func FreeContextBuffer(buf *byte) (ret syscall.Errno) { - r0, _, _ := syscall.Syscall(procFreeContextBuffer.Addr(), 1, uintptr(unsafe.Pointer(buf)), 0, 0) - ret = syscall.Errno(r0) - return -} - -func AcquireCredentialsHandle(principal *uint16, pkgname *uint16, creduse uint32, logonid *LUID, authdata *byte, getkeyfn uintptr, getkeyarg uintptr, handle *CredHandle, expiry *syscall.Filetime) (ret syscall.Errno) { - r0, _, _ := syscall.Syscall9(procAcquireCredentialsHandleW.Addr(), 9, uintptr(unsafe.Pointer(principal)), uintptr(unsafe.Pointer(pkgname)), uintptr(creduse), uintptr(unsafe.Pointer(logonid)), uintptr(unsafe.Pointer(authdata)), uintptr(getkeyfn), uintptr(getkeyarg), uintptr(unsafe.Pointer(handle)), uintptr(unsafe.Pointer(expiry))) - ret = syscall.Errno(r0) - return -} - -func FreeCredentialsHandle(handle *CredHandle) (ret syscall.Errno) { - r0, _, _ := syscall.Syscall(procFreeCredentialsHandle.Addr(), 1, uintptr(unsafe.Pointer(handle)), 0, 0) - ret = syscall.Errno(r0) - return -} - -func InitializeSecurityContext(credential *CredHandle, context *CtxtHandle, targname *uint16, contextreq uint32, reserved1 uint32, targdatarep uint32, input *SecBufferDesc, reserved2 uint32, newcontext *CtxtHandle, output *SecBufferDesc, contextattr *uint32, expiry *syscall.Filetime) (ret syscall.Errno) { - r0, _, _ := syscall.Syscall12(procInitializeSecurityContextW.Addr(), 12, uintptr(unsafe.Pointer(credential)), uintptr(unsafe.Pointer(context)), uintptr(unsafe.Pointer(targname)), uintptr(contextreq), uintptr(reserved1), uintptr(targdatarep), uintptr(unsafe.Pointer(input)), uintptr(reserved2), uintptr(unsafe.Pointer(newcontext)), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(contextattr)), uintptr(unsafe.Pointer(expiry))) - ret = syscall.Errno(r0) - return -} - -func AcceptSecurityContext(credential *CredHandle, context *CtxtHandle, input *SecBufferDesc, contextreq uint32, targdatarep uint32, newcontext *CtxtHandle, output *SecBufferDesc, contextattr *uint32, expiry *syscall.Filetime) (ret syscall.Errno) { - r0, _, _ := syscall.Syscall9(procAcceptSecurityContext.Addr(), 9, uintptr(unsafe.Pointer(credential)), uintptr(unsafe.Pointer(context)), uintptr(unsafe.Pointer(input)), uintptr(contextreq), uintptr(targdatarep), uintptr(unsafe.Pointer(newcontext)), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(contextattr)), uintptr(unsafe.Pointer(expiry))) - ret = syscall.Errno(r0) - return -} - -func CompleteAuthToken(context *CtxtHandle, token *SecBufferDesc) (ret syscall.Errno) { - r0, _, _ := syscall.Syscall(procCompleteAuthToken.Addr(), 2, uintptr(unsafe.Pointer(context)), uintptr(unsafe.Pointer(token)), 0) - ret = syscall.Errno(r0) - return -} - -func DeleteSecurityContext(context *CtxtHandle) (ret syscall.Errno) { - r0, _, _ := syscall.Syscall(procDeleteSecurityContext.Addr(), 1, uintptr(unsafe.Pointer(context)), 0, 0) - ret = syscall.Errno(r0) - return -} - -func ImpersonateSecurityContext(context *CtxtHandle) (ret syscall.Errno) { - r0, _, _ := syscall.Syscall(procImpersonateSecurityContext.Addr(), 1, uintptr(unsafe.Pointer(context)), 0, 0) - ret = syscall.Errno(r0) - return -} - -func RevertSecurityContext(context *CtxtHandle) (ret syscall.Errno) { - r0, _, _ := syscall.Syscall(procRevertSecurityContext.Addr(), 1, uintptr(unsafe.Pointer(context)), 0, 0) - ret = syscall.Errno(r0) - return -} - -func QueryContextAttributes(context *CtxtHandle, attribute uint32, buf *byte) (ret syscall.Errno) { - r0, _, _ := syscall.Syscall(procQueryContextAttributesW.Addr(), 3, uintptr(unsafe.Pointer(context)), uintptr(attribute), uintptr(unsafe.Pointer(buf))) - ret = syscall.Errno(r0) - return -} - -func EncryptMessage(context *CtxtHandle, qop uint32, message *SecBufferDesc, messageseqno uint32) (ret syscall.Errno) { - r0, _, _ := syscall.Syscall6(procEncryptMessage.Addr(), 4, uintptr(unsafe.Pointer(context)), uintptr(qop), uintptr(unsafe.Pointer(message)), uintptr(messageseqno), 0, 0) - ret = syscall.Errno(r0) - return -} - -func DecryptMessage(context *CtxtHandle, message *SecBufferDesc, messageseqno uint32, qop *uint32) (ret syscall.Errno) { - r0, _, _ := syscall.Syscall6(procDecryptMessage.Addr(), 4, uintptr(unsafe.Pointer(context)), uintptr(unsafe.Pointer(message)), uintptr(messageseqno), uintptr(unsafe.Pointer(qop)), 0, 0) - ret = syscall.Errno(r0) - return -} - -func ApplyControlToken(context *CtxtHandle, input *SecBufferDesc) (ret syscall.Errno) { - r0, _, _ := syscall.Syscall(procApplyControlToken.Addr(), 2, uintptr(unsafe.Pointer(context)), uintptr(unsafe.Pointer(input)), 0) - ret = syscall.Errno(r0) - return -} - -func MakeSignature(context *CtxtHandle, qop uint32, message *SecBufferDesc, messageseqno uint32) (ret syscall.Errno) { - r0, _, _ := syscall.Syscall6(procMakeSignature.Addr(), 4, uintptr(unsafe.Pointer(context)), uintptr(qop), uintptr(unsafe.Pointer(message)), uintptr(messageseqno), 0, 0) - ret = syscall.Errno(r0) - return -} - -func VerifySignature(context *CtxtHandle, message *SecBufferDesc, messageseqno uint32, qop *uint32) (ret syscall.Errno) { - r0, _, _ := syscall.Syscall6(procVerifySignature.Addr(), 4, uintptr(unsafe.Pointer(context)), uintptr(unsafe.Pointer(message)), uintptr(messageseqno), uintptr(unsafe.Pointer(qop)), 0, 0) - ret = syscall.Errno(r0) - return -} diff --git a/vendor/github.com/apcera/gssapi/.gitignore b/vendor/github.com/apcera/gssapi/.gitignore deleted file mode 100644 index 1377554ebea6..000000000000 --- a/vendor/github.com/apcera/gssapi/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.swp diff --git a/vendor/github.com/apcera/gssapi/.travis.yml b/vendor/github.com/apcera/gssapi/.travis.yml deleted file mode 100644 index 18af0f274ad6..000000000000 --- a/vendor/github.com/apcera/gssapi/.travis.yml +++ /dev/null @@ -1,33 +0,0 @@ -language: go - -go: - - 1.5.4 - - 1.6.3 - - 1.7.1 - - tip - -sudo: required - -services: - - docker - -env: - global: - secure: o2LFpvYCeeVurf/rttGoELGWBqWhUUtk/CKZC8pCY8JzMZHC35xVJwaBAWZarJCnno75ZGp0VzTLKkV6vA+Rr0uM8Q3SCScDc+/LvC4k5DveL+9oce2PxpFpiGGWvX/Xjk0zFjvOoGWitvU6ozQhUhejMSzq4ZdZqzTPDLlOK18bRN9y9eOnRFXxLkgLgRKoEEQActvbLeH6LK4edleZOyADoYS/6VlqFspLH2qEeyO3fhB5o/EFTfcQ/3L0v+ifP/bjfBQFVGTUfqx9rM+jbPvzcumR2e3Gw2Hub+xaO67//jJ9UGLpD4javJ1Ff6Tg5YYjl/OYuaM0iqqGNI1M/QOuhpxt7gvd7uCmV6C2JNxVH23jBJu6wiKjP30J8J0AuuTj4xT9JrpSmkyvjHmnm2NX55ZW1pv4OQZD/n4WM3m53ADfcKQByCB1GQlArHj11SD1NrvFqED4id0puCOBa8EUUw0R7++i6a75L/B/03/WTmSk8SKECQhObM6O4juHy8V58r7W3pzZ6K+Arjj6uSA6nT7vLx1dD8xdllbn3obwPaWPuwMHPXtWl/OpQHUhpvlAsxDeJFY1Y16Mxu6Ht6qeqP7Guz3aAGFFVyymnU1UeVHm4ABC7yQnn0nMMsl5bq7KiOvRD5m01AEZLd846MwYP+c0yte/0+Cre4x8OVQ= - -before_install: - - sudo apt-get update -q - - sudo apt-get install -y gcc libkrb5-dev - - go get github.com/mattn/goveralls - -install: true - -script: - - go vet ./... - - go fmt ./... - - go test -v -covermode=count -coverprofile=coverage.out - - $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci - - go test -i -race ./... - - go test -v -race ./... - - cd test/ - - ./run-heimdal.sh diff --git a/vendor/github.com/apcera/gssapi/CONTRIBUTING.md b/vendor/github.com/apcera/gssapi/CONTRIBUTING.md deleted file mode 100644 index fc0602f691bd..000000000000 --- a/vendor/github.com/apcera/gssapi/CONTRIBUTING.md +++ /dev/null @@ -1,21 +0,0 @@ -# How to Contribute - -gssapi is [Apache 2.0 licensed](LICENSE), and we're happy to accept pull -requests. Planned/desired work lives in the [TODO file](TODO.md). If you want to -start working on something, grab an item from the [TODO](TODO.md), create a new -[Github Issue](https://github.com/apcera/gssapi/issues) and assign it to -yourself, and begin working on your code. - -# Suggested Workflow - -- Fork our repo -- Read the [README](README.md) on what we use gssapi for -- Read the [TODO](TODO.md) for planned/desired work -- File issues, submit pull requests, and let us know what you think - -# Style - -We defer to the [style guide](https://github.com/golang/go/wiki/CodeReviewComments) -from the Golang team wherever possible. - -Thanks for considering working on GSSAPI! We hope to see patches from you soon. diff --git a/vendor/github.com/apcera/gssapi/LICENSE b/vendor/github.com/apcera/gssapi/LICENSE deleted file mode 100644 index 8f71f43fee3f..000000000000 --- a/vendor/github.com/apcera/gssapi/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/vendor/github.com/apcera/gssapi/README.md b/vendor/github.com/apcera/gssapi/README.md deleted file mode 100644 index cc6aa214161d..000000000000 --- a/vendor/github.com/apcera/gssapi/README.md +++ /dev/null @@ -1,83 +0,0 @@ -# gssapi - -[![License][License-Image]][License-Url] [![ReportCard][ReportCard-Image]][ReportCard-Url] [![Build][Build-Status-Image]][Build-Status-Url] [![Coverage][Coverage-Image]][Coverage-Url] [![GoDoc][GoDoc-Image]][GoDoc-URL] - -The gssapi package is a Golang wrapper around [RFC 2743](https://www.ietf.org/rfc/rfc2743.txt), -the Generic Security Services Application Programming Interface. (GSSAPI) - -## Uses - -We use it to authenticate clients with our authentication server. Clients talk -to a Kerberos or Active Directory Domain Controller to retrieve a Kerberos -service ticket, which we verify with a keytab on our authentication server. - -When a user logs into Kerberos using `kinit`, they get a Kerberos TGT. During -Kerberos authentication, that TGT is used to retrieve a Service Ticket from the -Domain Controller. GSSAPI lets us authenticate without having to know where or -in what form the TGT is stored. Because each operating system vendor might -move that, this package wraps your system GSSAPI implementation. - -What do you use it for? Let us know! - -## Building - -This library is `go get` compatible. However, it also requires header files -to build against the GSSAPI C library on your platform. - -Golang needs to be able to find a gcc compiler (and one which is recent enough -to support gccgo). If the system compiler isn't gcc, then use `CC` in environ -to point the Golang build tools at your gcc. (LLVM's clang does not work and -Golang's diagnostics if it encounters clang are to spew a lot of -apparently-unrelated errors from trying to use it anyway). - -On MacOS, the default headers are too old; you can use newer headers for -building but still use the normal system libraries. - -* FreeBSD: `export CC=gcc48; go install` -* MacOS: `brew install homebrew/dupes/heimdal --without-x11` -* Ubuntu: see `apt-get` in `test/docker/client/Dockerfile` - -## Testing - -Tests in the main `gssapi` repository can be run using the built-in `go test`. - -To run an integrated test against a live Heimdal Kerberos Domain Controller, -`cd test` and bring up [Docker](https://www.docker.com/), (or -[boot2docker](http://boot2docker.io/)). Then, run `./run-heimdal.sh`. This will -run some go tests using three Docker images: a client, a service, and a domain -controller. The service will receive a generated keytab file, and the client -will point to the domain controller for authentication. - -**NOTE:** to run Docker tests, your `GOROOT` environment variable MUST be set. - -## TODO - -See our [TODO doc](TODO.md) on stuff you can do to help. We welcome -contributions! - -## Verified platforms - -We've tested that we can authenticate against: - -- Heimdal Kerberos -- Active Directory - -We suspect we can authenticate against: - -- MIT Kerberos - -We definitely cannot authenticate with: - -- Windows clients (because Windows uses SSPI instead of GSSAPI as the library - interface) - -[License-Url]: https://opensource.org/licenses/Apache-2.0 -[License-Image]: https://img.shields.io/hexpm/l/plug.svg -[Build-Status-Url]: http://travis-ci.org/apcera/gssapi -[Build-Status-Image]: https://travis-ci.org/apcera/gssapi.svg?branch=master -[Coverage-Url]: https://coveralls.io/r/apcera/gssapi?branch=master -[Coverage-image]: https://img.shields.io/coveralls/apcera/gssapi.svg?branch=master -[ReportCard-Url]: http://goreportcard.com/report/github.com/apcera/gssapi -[ReportCard-Image]: http://goreportcard.com/badge/github.com/apcera/gssapi -[Godoc-Url]: https://godoc.org/github.com/apcera/gssapi -[Godoc-Image]: https://godoc.org/github.com/apcera/gssapi?status.svg diff --git a/vendor/github.com/apcera/gssapi/TODO.md b/vendor/github.com/apcera/gssapi/TODO.md deleted file mode 100644 index b9afc7f5a7c8..000000000000 --- a/vendor/github.com/apcera/gssapi/TODO.md +++ /dev/null @@ -1,83 +0,0 @@ -# TODO - -We have stuff left outstanding, and would appreciate your help. - -## GSSAPI calls - -These are the remaining GSSAPI calls to implement, in no particular order. - -- [ ] gss_acquire_cred_from -- [ ] gss_acquire_cred_impersonate_name -- [ ] gss_acquire_cred_with_password -- [ ] gss_add_buffer_set_member -- [ ] gss_add_cred_from -- [ ] gss_add_cred_impersonate_name -- [ ] gss_authorize_localname -- [ ] gss_complete_auth_token -- [ ] gss_context_time -- [ ] gss_create_empty_buffer_set -- [ ] gss_decapsulate_token -- [ ] gss_delete_name_attribute -- [ ] gss_display_mech_attr -- [ ] gss_display_name_ext -- [ ] gss_encapsulate_token -- [ ] gss_export_cred -- [ ] gss_export_name_composite -- [ ] gss_export_sec_context -- [ ] gss_get_mic_iov -- [ ] gss_get_mic_iov_length -- [ ] gss_get_name_attribute -- [ ] gss_import_cred -- [ ] gss_import_sec_context -- [ ] gss_indicate_mechs_by_attrs -- [ ] gss_inquire_attrs_for_mech -- [ ] gss_inquire_cred_by_oid -- [ ] gss_inquire_mech_for_saslname -- [ ] gss_inquire_name -- [ ] gss_inquire_saslname_for_mech -- [ ] gss_inquire_sec_context_by_oid -- [ ] gss_krb5_ccache_name -- [ ] gss_krb5_copy_ccache -- [ ] gss_krb5_export_lucid_sec_context -- [ ] gss_krb5_free_lucid_sec_context -- [ ] gss_krb5_get_tkt_flags -- [ ] gss_krb5_import_cred -- [ ] gss_krb5_set_allowable_enctypes -- [ ] gss_krb5_set_cred_rcache -- [ ] gss_krb5int_make_seal_token_v3 -- [ ] gss_krb5int_unseal_token_v3 -- [ ] gss_localname -- [ ] gss_map_name_to_any -- [ ] gss_pname_to_uid -- [ ] gss_process_context_token -- [ ] gss_pseudo_random -- [ ] gss_release_any_name_mapping -- [ ] gss_release_buffer_set -- [ ] gss_release_iov_buffer -- [ ] gss_release_oid -- [ ] gss_seal -- [ ] gss_set_cred_option -- [ ] gss_set_name_attribute -- [ ] gss_set_neg_mechs -- [ ] gss_set_sec_context_option -- [ ] gss_sign -- [ ] gss_store_cred -- [ ] gss_store_cred_into -- [ ] gss_unseal -- [ ] gss_unwrap_aead -- [ ] gss_unwrap_iov -- [ ] gss_userok -- [ ] gss_verify -- [ ] gss_verify_mic_iov -- [ ] gss_wrap_aead -- [ ] gss_wrap_iov -- [ ] gss_wrap_iov_length -- [ ] gss_wrap_size_limit -- [ ] krb5_gss_register_acceptor_identity -- [ ] krb5_gss_use_kdc_context - -## Domain Controller compatibility - -We haven't tested against: - -- MIT Kerberos diff --git a/vendor/github.com/apcera/gssapi/buffer.go b/vendor/github.com/apcera/gssapi/buffer.go deleted file mode 100644 index 03b8bf420496..000000000000 --- a/vendor/github.com/apcera/gssapi/buffer.go +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright 2013-2015 Apcera Inc. All rights reserved. - -package gssapi - -/* -#include -#include - -#include - -const size_t gss_buffer_size=sizeof(gss_buffer_desc); - -OM_uint32 -wrap_gss_release_buffer(void *fp, - OM_uint32 *minor_status, - gss_buffer_t buf) -{ - return ((OM_uint32(*)( - OM_uint32*, - gss_buffer_t))fp) (minor_status, buf); -} - -OM_uint32 -wrap_gss_import_name(void *fp, - OM_uint32 *minor_status, - const gss_buffer_t input_name_buffer, - const gss_OID input_name_type, - gss_name_t *output_name) -{ - return ((OM_uint32(*)( - OM_uint32 *, - const gss_buffer_t, - const gss_OID, - gss_name_t *)) fp) ( - minor_status, - input_name_buffer, - input_name_type, - output_name); -} - -int -wrap_gss_buffer_equal( - gss_buffer_t b1, - gss_buffer_t b2) -{ - return - b1 != NULL && - b2 != NULL && - b1->length == b2->length && - (memcmp(b1->value,b2->value,b1->length) == 0); -} - -*/ -import "C" - -import ( - "errors" - "unsafe" -) - -// ErrMallocFailed is returned when the malloc call has failed. -var ErrMallocFailed = errors.New("malloc failed, out of memory?") - -// MakeBuffer returns a Buffer with an empty malloc-ed gss_buffer_desc in it. -// The return value must be .Release()-ed -func (lib *Lib) MakeBuffer(alloc int) (*Buffer, error) { - s := C.malloc(C.gss_buffer_size) - if s == nil { - return nil, ErrMallocFailed - } - C.memset(s, 0, C.gss_buffer_size) - - b := &Buffer{ - Lib: lib, - C_gss_buffer_t: C.gss_buffer_t(s), - alloc: alloc, - } - return b, nil -} - -// MakeBufferBytes makes a Buffer encapsulating a byte slice. -func (lib *Lib) MakeBufferBytes(data []byte) (*Buffer, error) { - if len(data) == 0 { - return lib.GSS_C_NO_BUFFER, nil - } - - // have to allocate the memory in C land and copy - b, err := lib.MakeBuffer(allocMalloc) - if err != nil { - return nil, err - } - - l := C.size_t(len(data)) - c := C.malloc(l) - if b == nil { - return nil, ErrMallocFailed - } - C.memmove(c, (unsafe.Pointer)(&data[0]), l) - - b.C_gss_buffer_t.length = l - b.C_gss_buffer_t.value = c - b.alloc = allocMalloc - - return b, nil -} - -// MakeBufferString makes a Buffer encapsulating the contents of a string. -func (lib *Lib) MakeBufferString(content string) (*Buffer, error) { - return lib.MakeBufferBytes([]byte(content)) -} - -// Release safely frees the contents of a Buffer. -func (b *Buffer) Release() error { - if b == nil || b.C_gss_buffer_t == nil { - return nil - } - - defer func() { - C.free(unsafe.Pointer(b.C_gss_buffer_t)) - b.C_gss_buffer_t = nil - b.alloc = allocNone - }() - - // free the value as needed - switch { - case b.C_gss_buffer_t.value == nil: - // do nothing - - case b.alloc == allocMalloc: - C.free(b.C_gss_buffer_t.value) - - case b.alloc == allocGSSAPI: - var min C.OM_uint32 - maj := C.wrap_gss_release_buffer(b.Fp_gss_release_buffer, &min, b.C_gss_buffer_t) - err := b.stashLastStatus(maj, min) - if err != nil { - return err - } - } - - return nil -} - -// Length returns the number of bytes in the Buffer. -func (b *Buffer) Length() int { - if b == nil || b.C_gss_buffer_t == nil || b.C_gss_buffer_t.length == 0 { - return 0 - } - return int(b.C_gss_buffer_t.length) -} - -// Bytes returns the contents of a Buffer as a byte slice. -func (b *Buffer) Bytes() []byte { - if b == nil || b.C_gss_buffer_t == nil || b.C_gss_buffer_t.length == 0 { - return make([]byte, 0) - } - return C.GoBytes(b.C_gss_buffer_t.value, C.int(b.C_gss_buffer_t.length)) -} - -// String returns the contents of a Buffer as a string. -func (b *Buffer) String() string { - if b == nil || b.C_gss_buffer_t == nil || b.C_gss_buffer_t.length == 0 { - return "" - } - return C.GoStringN((*C.char)(b.C_gss_buffer_t.value), C.int(b.C_gss_buffer_t.length)) -} - -// Name converts a Buffer representing a name into a Name (internal opaque -// representation) using the specified nametype. -func (b Buffer) Name(nametype *OID) (*Name, error) { - var min C.OM_uint32 - var result C.gss_name_t - - maj := C.wrap_gss_import_name(b.Fp_gss_import_name, &min, - b.C_gss_buffer_t, nametype.C_gss_OID, &result) - err := b.stashLastStatus(maj, min) - if err != nil { - return nil, err - } - - n := &Name{ - Lib: b.Lib, - C_gss_name_t: result, - } - return n, nil -} - -// Equal determines if a Buffer receiver is equivalent to the supplied Buffer. -func (b *Buffer) Equal(other *Buffer) bool { - isEqual := C.wrap_gss_buffer_equal(b.C_gss_buffer_t, other.C_gss_buffer_t) - return isEqual != 0 -} diff --git a/vendor/github.com/apcera/gssapi/buffer_test.go b/vendor/github.com/apcera/gssapi/buffer_test.go deleted file mode 100644 index fb3d5364a4c4..000000000000 --- a/vendor/github.com/apcera/gssapi/buffer_test.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2013-2015 Apcera Inc. All rights reserved. - -package gssapi - -import ( - "bytes" - "testing" -) - -func TestNewBuffer(t *testing.T) { - l, err := testLoad() - if err != nil { - t.Fatal(err) - } - defer l.Unload() - - for a := range []int{allocNone, allocMalloc, allocGSSAPI} { - b, err := l.MakeBuffer(a) - if err != nil { - t.Fatalf("alloc: %v: %s", a, err) - } - defer b.Release() - - if b == nil { - t.Fatalf("alloc: %v: Got nil, expected non-nil", a) - } - if b.Lib != l { - t.Fatalf("alloc: %v: b.Lib didn't get set correctly, got %p, expected %p", - a, b.Lib, l) - } - if b.C_gss_buffer_t == nil { - t.Fatalf("alloc: %v: Got nil buffer, expected non-nil", a) - } - if b.String() != "" { - t.Fatalf(`alloc: %v: String(): got %q, expected ""`, - a, b.String()) - } - } -} - -// Also tests MakeBufferBytes, implicitly -func TestMakeBufferString(t *testing.T) { - l, err := testLoad() - if err != nil { - t.Fatal(err) - } - defer l.Unload() - - test := "testing" - b, err := l.MakeBufferString(test) - if err != nil { - t.Fatal(err) - } - defer b.Release() - - if b == nil { - t.Fatal("Got nil, expected non-nil") - } - if b.Lib != l { - t.Fatalf("b.Lib didn't get set correctly, got %p, expected %p", b.Lib, l) - } - if b.String() != test { - t.Fatalf("Got %q, expected %q", b.String(), test) - } else if !bytes.Equal(b.Bytes(), []byte(test)) { - t.Fatalf("Got '%v'; expected '%v'", b.Bytes(), []byte(test)) - } -} diff --git a/vendor/github.com/apcera/gssapi/consts.go b/vendor/github.com/apcera/gssapi/consts.go deleted file mode 100644 index 1820fd0b904b..000000000000 --- a/vendor/github.com/apcera/gssapi/consts.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2013-2015 Apcera Inc. All rights reserved. - -// A number of constants for C binding of GSSAPI. -// -// Unless otherwise stated, values come from RFC 2744 Appendix A. -// -// See also the GSS_S_* values in status.go, together with some related GSS_C_* -// values. - -package gssapi - -/* -#include -*/ -import "C" - -import ( - "time" -) - -// Flag bits for context-level services -const ( - GSS_C_DELEG_FLAG uint32 = 1 - GSS_C_MUTUAL_FLAG = 2 - GSS_C_REPLAY_FLAG = 4 - GSS_C_SEQUENCE_FLAG = 8 - GSS_C_CONF_FLAG = 16 - GSS_C_INTEG_FLAG = 32 - GSS_C_ANON_FLAG = 64 - GSS_C_PROT_READY_FLAG = 128 - GSS_C_TRANS_FLAG = 256 -) - -// Credential usage options -const ( - GSS_C_BOTH CredUsage = 0 - GSS_C_INITIATE = 1 - GSS_C_ACCEPT = 2 -) - -// Status code types for gss_display_status -const ( - GSS_C_GSS_CODE int = 1 - GSS_C_MECH_CODE = 2 -) - -// The constant definitions for channel-bindings address families -const ( - GSS_C_AF_UNSPEC ChannelBindingAddressFamily = 0 - GSS_C_AF_LOCAL = 1 - GSS_C_AF_INET = 2 - GSS_C_AF_IMPLINK = 3 - GSS_C_AF_PUP = 4 - GSS_C_AF_CHAOS = 5 - GSS_C_AF_NS = 6 - GSS_C_AF_NBS = 7 - GSS_C_AF_ECMA = 8 - GSS_C_AF_DATAKIT = 9 - GSS_C_AF_CCITT = 10 - GSS_C_AF_SNA = 11 - GSS_C_AF_DECnet = 12 - GSS_C_AF_DLI = 13 - GSS_C_AF_LAT = 14 - GSS_C_AF_HYLINK = 15 - GSS_C_AF_APPLETALK = 16 - GSS_C_AF_BSC = 17 - GSS_C_AF_DSS = 18 - GSS_C_AF_OSI = 19 - GSS_C_AF_X25 = 21 - GSS_C_AF_INET6 = 24 - GSS_C_AF_NULLADDR = 255 - - // Note: GSS_C_AF_INET6 is not in RFC2744 and not in MIT Kerberos. - // The value here is from Heimdal. - // Searching reveals that at IETF-64 the Kitten WG discussed the lack of - // GSS_C_AF_INET6 and problems with standardising, but I can find no - // further reference to standardising the value. - // MIT does not have such a value, there are suggestions that GSS_C_AF_INET - // is used instead. If this CB value is actually used, interoperability - // must be ... "limited". - // - // Fiat decision: adopt the Heimdal value. -) - -const ( - // Quality Of Protection - GSS_C_QOP_DEFAULT = 0 - - // Infinite Lifetime, defined as 2^32-1 - GSS_C_INDEFINITE = 0xffffffff * time.Second -) diff --git a/vendor/github.com/apcera/gssapi/context.go b/vendor/github.com/apcera/gssapi/context.go deleted file mode 100644 index 87d82fb04851..000000000000 --- a/vendor/github.com/apcera/gssapi/context.go +++ /dev/null @@ -1,372 +0,0 @@ -// Copyright 2013-2015 Apcera Inc. All rights reserved. - -package gssapi - -// This file provides GSSContext methods - -/* -#include - -OM_uint32 -wrap_gss_init_sec_context(void *fp, - OM_uint32 * minor_status, - const gss_cred_id_t initiator_cred_handle, - gss_ctx_id_t * context_handle, - const gss_name_t target_name, - const gss_OID mech_type, - OM_uint32 req_flags, - OM_uint32 time_req, - const gss_channel_bindings_t input_chan_bindings, - const gss_buffer_t input_token, - gss_OID * actual_mech_type, - gss_buffer_t output_token, - OM_uint32 * ret_flags, - OM_uint32 * time_rec) -{ - return ((OM_uint32(*) ( - OM_uint32 *, - const gss_cred_id_t, - gss_ctx_id_t *, - const gss_name_t, - const gss_OID, - OM_uint32, - OM_uint32, - const gss_channel_bindings_t, - const gss_buffer_t, - gss_OID *, - gss_buffer_t, - OM_uint32 *, - OM_uint32 *) - ) fp)( - minor_status, - initiator_cred_handle, - context_handle, - target_name, - mech_type, - req_flags, - time_req, - input_chan_bindings, - input_token, - actual_mech_type, - output_token, - ret_flags, - time_rec); -} - -OM_uint32 -wrap_gss_accept_sec_context(void *fp, - OM_uint32 * minor_status, - gss_ctx_id_t * context_handle, - const gss_cred_id_t acceptor_cred_handle, - const gss_buffer_t input_token_buffer, - const gss_channel_bindings_t input_chan_bindings, - gss_name_t * src_name, - gss_OID * mech_type, - gss_buffer_t output_token, - OM_uint32 * ret_flags, - OM_uint32 * time_rec, - gss_cred_id_t * delegated_cred_handle) -{ - return ((OM_uint32(*) ( - OM_uint32 *, - gss_ctx_id_t *, - const gss_cred_id_t, - const gss_buffer_t, - const gss_channel_bindings_t, - gss_name_t *, - gss_OID *, - gss_buffer_t, - OM_uint32 *, - OM_uint32 *, - gss_cred_id_t *) - ) fp)( - minor_status, - context_handle, - acceptor_cred_handle, - input_token_buffer, - input_chan_bindings, - src_name, - mech_type, - output_token, - ret_flags, - time_rec, - delegated_cred_handle); -} - -OM_uint32 -wrap_gss_delete_sec_context(void *fp, - OM_uint32 * minor_status, - gss_ctx_id_t * context_handle, - gss_buffer_t output_token) -{ - return ((OM_uint32(*) ( - OM_uint32 *, - gss_ctx_id_t *, - gss_buffer_t) - ) fp)( - minor_status, - context_handle, - output_token); -} - -OM_uint32 -wrap_gss_inquire_context(void *fp, - OM_uint32 * minor_status, - const gss_ctx_id_t context_handle, - gss_name_t * src_name, - gss_name_t * targ_name, - OM_uint32 * lifetime_rec, - gss_OID * mech_type, - OM_uint32 * ctx_flags, - int * locally_initiated, - int * open) -{ - return ((OM_uint32(*) ( - OM_uint32 *, - const gss_ctx_id_t, - gss_name_t *, - gss_name_t *, - OM_uint32 *, - gss_OID *, - OM_uint32 *, - int *, - int *) - ) fp)( - minor_status, - context_handle, - src_name, - targ_name, - lifetime_rec, - mech_type, - ctx_flags, - locally_initiated, - open); -} - - -*/ -import "C" - -import ( - "runtime" - "time" -) - -func (lib *Lib) NewCtxId() *CtxId { - return &CtxId{ - Lib: lib, - } -} - -// InitSecContext initiates a security context. Usually invoked by the client. -// A Context (CtxId) describes the state at one end of an authentication -// protocol. May return ErrContinueNeeded if the client is to make another -// iteration of exchanging token with the service -func (lib *Lib) InitSecContext(initiatorCredHandle *CredId, ctxIn *CtxId, - targetName *Name, mechType *OID, reqFlags uint32, timeReq time.Duration, - inputChanBindings ChannelBindings, inputToken *Buffer) ( - ctxOut *CtxId, actualMechType *OID, outputToken *Buffer, retFlags uint32, - timeRec time.Duration, err error) { - - runtime.LockOSThread() - defer runtime.UnlockOSThread() - - // prepare the input params - C_initiator := C.gss_cred_id_t(nil) - if initiatorCredHandle != nil { - C_initiator = initiatorCredHandle.C_gss_cred_id_t - } - - C_mechType := C.gss_OID(nil) - if mechType != nil { - C_mechType = mechType.C_gss_OID - } - - C_inputToken := C.gss_buffer_t(nil) - if inputToken != nil { - C_inputToken = inputToken.C_gss_buffer_t - } - - // prepare the outputs. - if ctxIn != nil { - ctxCopy := *ctxIn - ctxOut = &ctxCopy - } else { - ctxOut = lib.NewCtxId() - } - - min := C.OM_uint32(0) - actualMechType = lib.NewOID() - outputToken, err = lib.MakeBuffer(allocGSSAPI) - if err != nil { - return nil, nil, nil, 0, 0, err - } - - flags := C.OM_uint32(0) - timerec := C.OM_uint32(0) - - maj := C.wrap_gss_init_sec_context(lib.Fp_gss_init_sec_context, - &min, - C_initiator, - &ctxOut.C_gss_ctx_id_t, // used as both in and out param - targetName.C_gss_name_t, - C_mechType, - C.OM_uint32(reqFlags), - C.OM_uint32(timeReq.Seconds()), - C.gss_channel_bindings_t(inputChanBindings), - C_inputToken, - &actualMechType.C_gss_OID, - outputToken.C_gss_buffer_t, - &flags, - &timerec) - - err = lib.stashLastStatus(maj, min) - if err != nil { - return nil, nil, nil, 0, 0, err - } - - if MajorStatus(maj).ContinueNeeded() { - err = ErrContinueNeeded - } - - return ctxOut, actualMechType, outputToken, - uint32(flags), time.Duration(timerec) * time.Second, - err -} - -// AcceptSecContext accepts an initialized security context. Usually called by -// the server. May return ErrContinueNeeded if the client is to make another -// iteration of exchanging token with the service -func (lib *Lib) AcceptSecContext( - ctxIn *CtxId, acceptorCredHandle *CredId, inputToken *Buffer, - inputChanBindings ChannelBindings) ( - ctxOut *CtxId, srcName *Name, actualMechType *OID, outputToken *Buffer, - retFlags uint32, timeRec time.Duration, delegatedCredHandle *CredId, - err error) { - - runtime.LockOSThread() - defer runtime.UnlockOSThread() - - // prepare the inputs - C_acceptorCredHandle := C.gss_cred_id_t(nil) - if acceptorCredHandle != nil { - C_acceptorCredHandle = acceptorCredHandle.C_gss_cred_id_t - } - - C_inputToken := C.gss_buffer_t(nil) - if inputToken != nil { - C_inputToken = inputToken.C_gss_buffer_t - } - - // prepare the outputs - if ctxIn != nil { - ctxCopy := *ctxIn - ctxOut = &ctxCopy - } else { - ctxOut = lib.GSS_C_NO_CONTEXT - } - - min := C.OM_uint32(0) - srcName = lib.NewName() - actualMechType = lib.NewOID() - outputToken, err = lib.MakeBuffer(allocGSSAPI) - if err != nil { - return nil, nil, nil, nil, 0, 0, nil, err - } - flags := C.OM_uint32(0) - timerec := C.OM_uint32(0) - delegatedCredHandle = lib.NewCredId() - - maj := C.wrap_gss_accept_sec_context(lib.Fp_gss_accept_sec_context, - &min, - &ctxOut.C_gss_ctx_id_t, // used as both in and out param - C_acceptorCredHandle, - C_inputToken, - C.gss_channel_bindings_t(inputChanBindings), - &srcName.C_gss_name_t, - &actualMechType.C_gss_OID, - outputToken.C_gss_buffer_t, - &flags, - &timerec, - &delegatedCredHandle.C_gss_cred_id_t) - - err = lib.stashLastStatus(maj, min) - if err != nil { - lib.Err("AcceptSecContext: ", err) - return nil, nil, nil, nil, 0, 0, nil, err - } - - if MajorStatus(maj).ContinueNeeded() { - err = ErrContinueNeeded - } - - return ctxOut, srcName, actualMechType, outputToken, uint32(flags), - time.Duration(timerec) * time.Second, delegatedCredHandle, err -} - -// DeleteSecContext frees a security context. -// NB: I decided not to implement the outputToken parameter since its use is no -// longer recommended, and it would have to be Released by the caller -func (ctx *CtxId) DeleteSecContext() error { - if ctx == nil || ctx.C_gss_ctx_id_t == nil { - return nil - } - - runtime.LockOSThread() - defer runtime.UnlockOSThread() - - min := C.OM_uint32(0) - maj := C.wrap_gss_delete_sec_context(ctx.Fp_gss_delete_sec_context, - &min, &ctx.C_gss_ctx_id_t, nil) - - return ctx.stashLastStatus(maj, min) -} - -// Release is an alias for DeleteSecContext. -func (ctx *CtxId) Release() error { - return ctx.DeleteSecContext() -} - -// InquireContext returns fields about a security context. -func (ctx *CtxId) InquireContext() ( - srcName *Name, targetName *Name, lifetimeRec time.Duration, mechType *OID, - ctxFlags uint64, locallyInitiated bool, open bool, err error) { - - min := C.OM_uint32(0) - srcName = ctx.NewName() - targetName = ctx.NewName() - rec := C.OM_uint32(0) - mechType = ctx.NewOID() - flags := C.OM_uint32(0) - li := C.int(0) - opn := C.int(0) - - maj := C.wrap_gss_inquire_context(ctx.Fp_gss_inquire_context, - &min, - ctx.C_gss_ctx_id_t, - &srcName.C_gss_name_t, - &targetName.C_gss_name_t, - &rec, - &mechType.C_gss_OID, - &flags, - &li, - &opn) - - err = ctx.stashLastStatus(maj, min) - if err != nil { - ctx.Err("InquireContext: ", err) - return nil, nil, 0, nil, 0, false, false, err - } - - lifetimeRec = time.Duration(rec) * time.Second - ctxFlags = uint64(flags) - - if li != 0 { - locallyInitiated = true - } - if opn != 0 { - open = true - } - - return srcName, targetName, lifetimeRec, mechType, ctxFlags, locallyInitiated, open, nil -} diff --git a/vendor/github.com/apcera/gssapi/credential.go b/vendor/github.com/apcera/gssapi/credential.go deleted file mode 100644 index 62fa7a648f28..000000000000 --- a/vendor/github.com/apcera/gssapi/credential.go +++ /dev/null @@ -1,309 +0,0 @@ -// Copyright 2013 Apcera Inc. All rights reserved. - -package gssapi - -/* -#include - -OM_uint32 -wrap_gss_acquire_cred(void *fp, - OM_uint32 * minor_status, - const gss_name_t desired_name, - OM_uint32 time_req, - const gss_OID_set desired_mechs, - gss_cred_usage_t cred_usage, - gss_cred_id_t * output_cred_handle, - gss_OID_set * actual_mechs, - OM_uint32 * time_rec) -{ - return ((OM_uint32(*) ( - OM_uint32 *, - const gss_name_t, - OM_uint32, - const gss_OID_set, - gss_cred_usage_t, - gss_cred_id_t *, - gss_OID_set *, - OM_uint32 *) - ) fp)( - minor_status, - desired_name, - time_req, - desired_mechs, - cred_usage, - output_cred_handle, - actual_mechs, - time_rec); -} - -OM_uint32 -wrap_gss_add_cred(void *fp, - OM_uint32 * minor_status, - const gss_cred_id_t input_cred_handle, - const gss_name_t desired_name, - const gss_OID desired_mech, - gss_cred_usage_t cred_usage, - OM_uint32 initiator_time_req, - OM_uint32 acceptor_time_req, - gss_cred_id_t * output_cred_handle, - gss_OID_set * actual_mechs, - OM_uint32 * initiator_time_rec, - OM_uint32 * acceptor_time_rec) -{ - return ((OM_uint32(*) ( - OM_uint32 *, - const gss_cred_id_t, - const gss_name_t, - const gss_OID, - gss_cred_usage_t, - OM_uint32, - OM_uint32, - gss_cred_id_t *, - gss_OID_set *, - OM_uint32 *, - OM_uint32 *) - ) fp)( - minor_status, - input_cred_handle, - desired_name, - desired_mech, - cred_usage, - initiator_time_req, - acceptor_time_req, - output_cred_handle, - actual_mechs, - initiator_time_rec, - acceptor_time_rec); -} - -OM_uint32 -wrap_gss_inquire_cred (void *fp, - OM_uint32 *minor_status, - const gss_cred_id_t cred_handle, - gss_name_t *name, - OM_uint32 *lifetime, - gss_cred_usage_t *cred_usage, - gss_OID_set *mechanisms ) -{ - return ((OM_uint32(*) ( - OM_uint32 *, - const gss_cred_id_t, - gss_name_t *, - OM_uint32 *, - gss_cred_usage_t *, - gss_OID_set *) - ) fp)( - minor_status, - cred_handle, - name, - lifetime, - cred_usage, - mechanisms); -} - -OM_uint32 -wrap_gss_inquire_cred_by_mech (void *fp, - OM_uint32 *minor_status, - const gss_cred_id_t cred_handle, - const gss_OID mech_type, - gss_name_t *name, - OM_uint32 *initiator_lifetime, - OM_uint32 *acceptor_lifetime, - gss_cred_usage_t *cred_usage ) -{ - return ((OM_uint32(*) ( - OM_uint32 *, - const gss_cred_id_t, - const gss_OID, - gss_name_t *, - OM_uint32 *, - OM_uint32 *, - gss_cred_usage_t *) - ) fp)( - minor_status, - cred_handle, - mech_type, - name, - initiator_lifetime, - acceptor_lifetime, - cred_usage); -} - -OM_uint32 -wrap_gss_release_cred(void *fp, - OM_uint32 * minor_status, - gss_cred_id_t * cred_handle) -{ - return ((OM_uint32(*) ( - OM_uint32 *, - gss_cred_id_t *) - ) fp)( - minor_status, - cred_handle); -} - -*/ -import "C" - -import ( - "time" -) - -// NewCredId instantiates a new credential. -func (lib *Lib) NewCredId() *CredId { - return &CredId{ - Lib: lib, - } -} - -// AcquireCred implements gss_acquire_cred API, as per -// https://tools.ietf.org/html/rfc2743#page-31. outputCredHandle, actualMechs -// must be .Release()-ed by the caller -func (lib *Lib) AcquireCred(desiredName *Name, timeReq time.Duration, - desiredMechs *OIDSet, credUsage CredUsage) (outputCredHandle *CredId, - actualMechs *OIDSet, timeRec time.Duration, err error) { - - min := C.OM_uint32(0) - actualMechs = lib.NewOIDSet() - outputCredHandle = lib.NewCredId() - timerec := C.OM_uint32(0) - - maj := C.wrap_gss_acquire_cred(lib.Fp_gss_acquire_cred, - &min, - desiredName.C_gss_name_t, - C.OM_uint32(timeReq.Seconds()), - desiredMechs.C_gss_OID_set, - C.gss_cred_usage_t(credUsage), - &outputCredHandle.C_gss_cred_id_t, - &actualMechs.C_gss_OID_set, - &timerec) - - err = lib.stashLastStatus(maj, min) - if err != nil { - return nil, nil, 0, err - } - - return outputCredHandle, actualMechs, time.Duration(timerec) * time.Second, nil -} - -// AddCred implements gss_add_cred API, as per -// https://tools.ietf.org/html/rfc2743#page-36. outputCredHandle, actualMechs -// must be .Release()-ed by the caller -func (lib *Lib) AddCred(inputCredHandle *CredId, - desiredName *Name, desiredMech *OID, credUsage CredUsage, - initiatorTimeReq time.Duration, acceptorTimeReq time.Duration) ( - outputCredHandle *CredId, actualMechs *OIDSet, - initiatorTimeRec time.Duration, acceptorTimeRec time.Duration, - err error) { - - min := C.OM_uint32(0) - actualMechs = lib.NewOIDSet() - outputCredHandle = lib.NewCredId() - initSeconds := C.OM_uint32(0) - acceptSeconds := C.OM_uint32(0) - - maj := C.wrap_gss_add_cred(lib.Fp_gss_add_cred, - &min, - inputCredHandle.C_gss_cred_id_t, - desiredName.C_gss_name_t, - desiredMech.C_gss_OID, - C.gss_cred_usage_t(credUsage), - C.OM_uint32(initiatorTimeReq.Seconds()), - C.OM_uint32(acceptorTimeReq.Seconds()), - &outputCredHandle.C_gss_cred_id_t, - &actualMechs.C_gss_OID_set, - &initSeconds, - &acceptSeconds) - - err = lib.stashLastStatus(maj, min) - if err != nil { - return nil, nil, 0, 0, err - } - - return outputCredHandle, - actualMechs, - time.Duration(initSeconds) * time.Second, - time.Duration(acceptSeconds) * time.Second, - nil -} - -// InquireCred implements gss_inquire_cred API, as per -// https://tools.ietf.org/html/rfc2743#page-34. name and mechanisms must be -// .Release()-ed by the caller -func (lib *Lib) InquireCred(credHandle *CredId) ( - name *Name, lifetime time.Duration, credUsage CredUsage, mechanisms *OIDSet, - err error) { - - min := C.OM_uint32(0) - name = lib.NewName() - life := C.OM_uint32(0) - credUsage = CredUsage(0) - mechanisms = lib.NewOIDSet() - - maj := C.wrap_gss_inquire_cred(lib.Fp_gss_inquire_cred, - &min, - credHandle.C_gss_cred_id_t, - &name.C_gss_name_t, - &life, - (*C.gss_cred_usage_t)(&credUsage), - &mechanisms.C_gss_OID_set) - err = lib.stashLastStatus(maj, min) - if err != nil { - return nil, 0, 0, nil, err - } - - return name, - time.Duration(life) * time.Second, - credUsage, - mechanisms, - nil -} - -// InquireCredByMech implements gss_inquire_cred_by_mech API, as per -// https://tools.ietf.org/html/rfc2743#page-39. name must be .Release()-ed by -// the caller -func (lib *Lib) InquireCredByMech(credHandle *CredId, mechType *OID) ( - name *Name, initiatorLifetime time.Duration, acceptorLifetime time.Duration, - credUsage CredUsage, err error) { - - min := C.OM_uint32(0) - name = lib.NewName() - ilife := C.OM_uint32(0) - alife := C.OM_uint32(0) - credUsage = CredUsage(0) - - maj := C.wrap_gss_inquire_cred_by_mech(lib.Fp_gss_inquire_cred_by_mech, - &min, - credHandle.C_gss_cred_id_t, - mechType.C_gss_OID, - &name.C_gss_name_t, - &ilife, - &alife, - (*C.gss_cred_usage_t)(&credUsage)) - err = lib.stashLastStatus(maj, min) - if err != nil { - return nil, 0, 0, 0, err - } - - return name, - time.Duration(ilife) * time.Second, - time.Duration(alife) * time.Second, - credUsage, - nil -} - -// Release frees a credential. -func (c *CredId) Release() error { - if c == nil || c.C_gss_cred_id_t == nil { - return nil - } - - min := C.OM_uint32(0) - maj := C.wrap_gss_release_cred(c.Fp_gss_release_cred, - &min, - &c.C_gss_cred_id_t) - - return c.stashLastStatus(maj, min) -} - -//TODO: Test for AddCred with existing cred diff --git a/vendor/github.com/apcera/gssapi/doc.go b/vendor/github.com/apcera/gssapi/doc.go deleted file mode 100644 index 0a5ef0163ca1..000000000000 --- a/vendor/github.com/apcera/gssapi/doc.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2013-2015 Apcera Inc. All rights reserved. - -/* -This is a GSSAPI provider for Go, which expects to be initialized with the name -of a dynamically loadable module which can be dlopen'd to get at a C language -binding GSSAPI library. - -The GSSAPI concepts are explained in RFC 2743, "Generic Security Service -Application Program Interface Version 2, Update 1". - -The API calls for C, together with a number of values for constants, come from -RFC 2744, "Generic Security Service API Version 2 : C-bindings". - -Note that the basic GSSAPI bindings for C use the Latin-1 character set. UTF-8 -interfaces are specified in RFC 5178, "Generic Security Service Application -Program Interface (GSS-API) Internationalization and Domain-Based Service Names -and Name Type", in 2008. Looking in 2013, this API does not appear to be -provided by either MIT or Heimdal. This API applies solely to hostnames -though, which can also be supplied in ACE encoding, bypassing the issue. - -For now, we assume that hostnames and usercodes are all ASCII-ish and pass -UTF-8 into the library. Patches for more comprehensive support welcome. -*/ -package gssapi diff --git a/vendor/github.com/apcera/gssapi/gss_types.go b/vendor/github.com/apcera/gssapi/gss_types.go deleted file mode 100644 index 0b4c2aaf725e..000000000000 --- a/vendor/github.com/apcera/gssapi/gss_types.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2013-2015 Apcera Inc. All rights reserved. - -// Wrappers for the main gssapi types, all in one file for consistency. - -package gssapi - -/* -#include -*/ -import "C" - -// Struct types. The structs themselves are allocated in Go and are therefore -// GCed, the contents may comes from C/gssapi calls, and therefore must be -// explicitly released. Calling the Release method is safe on uninitialized -// objects, and nil pointers. - -const ( - allocNone = iota - allocMalloc - allocGSSAPI -) - -// A Buffer is an underlying C buffer represented in Golang. Must be .Release'd. -type Buffer struct { - *Lib - C_gss_buffer_t C.gss_buffer_t - - // indicates if the contents of the buffer must be released with - // gss_release_buffer (allocGSSAPI) or free-ed (allocMalloc) - alloc int -} - -// A Name represents a binary string labeling a security principal. In the case -// of Kerberos, this could be a name like 'user@EXAMPLE.COM'. -type Name struct { - *Lib - C_gss_name_t C.gss_name_t -} - -// An OID is the wrapper for gss_OID_desc type. IMPORTANT: In gssapi, OIDs are -// not released explicitly, only as part of an OIDSet. However we malloc the OID -// bytes ourselves, so need to free them. To keep it simple, assume that OIDs -// obtained from gssapi must be Release()-ed. It will be safely ignored on those -// allocated by gssapi -type OID struct { - *Lib - C_gss_OID C.gss_OID - - // indicates if the contents of the buffer must be released with - // gss_release_buffer (allocGSSAPI) or free-ed (allocMalloc) - alloc int -} - -// An OIDSet is a set of OIDs. -type OIDSet struct { - *Lib - C_gss_OID_set C.gss_OID_set -} - -// A CredId represents information like a cryptographic secret. In Kerberos, -// this likely represents a keytab. -type CredId struct { - *Lib - C_gss_cred_id_t C.gss_cred_id_t -} - -// A CtxId represents a security context. Contexts maintain the state of one end -// of an authentication protocol. -type CtxId struct { - *Lib - C_gss_ctx_id_t C.gss_ctx_id_t -} - -// Aliases for the simple types -type CredUsage C.gss_cred_usage_t // C.int -type ChannelBindingAddressFamily uint32 -type QOP C.OM_uint32 - -// A struct pointer technically, but not really used yet, and it's a static, -// non-releaseable struct so an alias will suffice -type ChannelBindings C.gss_channel_bindings_t diff --git a/vendor/github.com/apcera/gssapi/lib.go b/vendor/github.com/apcera/gssapi/lib.go deleted file mode 100644 index 4dd01dca1c53..000000000000 --- a/vendor/github.com/apcera/gssapi/lib.go +++ /dev/null @@ -1,394 +0,0 @@ -// Copyright 2013-2015 Apcera Inc. All rights reserved. - -// +build darwin linux freebsd - -package gssapi - -/* -#cgo linux LDFLAGS: -ldl -#cgo freebsd pkg-config: heimdal-gssapi - -#include -#include -#include - -// Name-Types. These are standardized in the RFCs. The library requires that -// a given name be usable for resolution, but it's typically a macro, there's -// no guarantee about the name exported from the library. But since they're -// static, and well-defined, we can just define them ourselves. - -// RFC2744-mandated values, mapping from as-near-as-possible to cut&paste -const gss_OID_desc *_GSS_C_NT_USER_NAME = & (gss_OID_desc) { 10, "\x2a\x86\x48\x86\xf7\x12\x01\x02\x01\x01" }; -const gss_OID_desc *_GSS_C_NT_MACHINE_UID_NAME = & (gss_OID_desc) { 10, "\x2a\x86\x48\x86\xf7\x12\x01\x02\x01\x02" }; -const gss_OID_desc *_GSS_C_NT_STRING_UID_NAME = & (gss_OID_desc) { 10, "\x2a\x86\x48\x86\xf7\x12\x01\x02\x01\x03" }; -const gss_OID_desc *_GSS_C_NT_HOSTBASED_SERVICE_X = & (gss_OID_desc) { 6, "\x2b\x06\x01\x05\x06\x02" }; -const gss_OID_desc *_GSS_C_NT_HOSTBASED_SERVICE = & (gss_OID_desc) { 10, "\x2a\x86\x48\x86\xf7\x12\x01\x02\x01\x04" }; -const gss_OID_desc *_GSS_C_NT_ANONYMOUS = & (gss_OID_desc) { 6, "\x2b\x06\x01\x05\x06\x03" }; // original had \01 -const gss_OID_desc *_GSS_C_NT_EXPORT_NAME = & (gss_OID_desc) { 6, "\x2b\x06\x01\x05\x06\x04" }; - -// from gssapi_krb5.h: This name form shall be represented by the Object -// Identifier {iso(1) member-body(2) United States(840) mit(113554) infosys(1) -// gssapi(2) krb5(2) krb5_name(1)}. The recommended symbolic name for this -// type is "GSS_KRB5_NT_PRINCIPAL_NAME". -const gss_OID_desc *_GSS_KRB5_NT_PRINCIPAL_NAME = & (gss_OID_desc) { 10, "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02\x01" }; - -// { 1 2 840 113554 1 2 2 2 } -const gss_OID_desc *_GSS_KRB5_NT_PRINCIPAL = & (gss_OID_desc) { 10, "\x2A\x86\x48\x86\xF7\x12\x01\x02\x02\x02" }; - -// known mech OIDs -const gss_OID_desc *_GSS_MECH_KRB5 = & (gss_OID_desc) { 9, "\x2A\x86\x48\x86\xF7\x12\x01\x02\x02" }; -const gss_OID_desc *_GSS_MECH_KRB5_LEGACY = & (gss_OID_desc) { 9, "\x2A\x86\x48\x82\xF7\x12\x01\x02\x02" }; -const gss_OID_desc *_GSS_MECH_KRB5_OLD = & (gss_OID_desc) { 5, "\x2B\x05\x01\x05\x02" }; -const gss_OID_desc *_GSS_MECH_SPNEGO = & (gss_OID_desc) { 6, "\x2b\x06\x01\x05\x05\x02" }; -const gss_OID_desc *_GSS_MECH_IAKERB = & (gss_OID_desc) { 6, "\x2b\x06\x01\x05\x02\x05" }; -const gss_OID_desc *_GSS_MECH_NTLMSSP = & (gss_OID_desc) { 10, "\x2b\x06\x01\x04\x01\x82\x37\x02\x02\x0a" }; - -*/ -import "C" - -import ( - "fmt" - "os" - "reflect" - "runtime" - "strings" - "unsafe" -) - -// Values for Options.LoadDefault -const ( - MIT = iota - Heimdal -) - -type Severity uint - -// Values for Options.Log severity indices -const ( - Emerg = Severity(iota) - Alert - Crit - Err - Warn - Notice - Info - Debug - MaxSeverity -) - -var severityNames = []string{ - "Emerg", - "Alert", - "Crit", - "Err", - "Warn", - "Notice", - "Info", - "Debug", -} - -// String returns the string name of a log Severity. -func (s Severity) String() string { - if s >= MaxSeverity { - return "" - } - return severityNames[s] -} - -// Printer matches the log package, not fmt -type Printer interface { - Print(a ...interface{}) -} - -// Options denote the options used to load a GSSAPI library. If a user supplies -// a LibPath, we use that. Otherwise, based upon the default and the current OS, -// we try to construct the library path. -type Options struct { - LibPath string - Krb5Config string - Krb5Ktname string - LoadDefault int - - Printers []Printer `json:"-"` -} - -// ftable fields will be initialized to the corresponding function pointers from -// the GSSAPI library. They must be of form Fp_function_name (Capital 'F' so -// that we can use reflect. -type ftable struct { - // buffer.go - Fp_gss_release_buffer unsafe.Pointer - Fp_gss_import_name unsafe.Pointer - - // context.go - Fp_gss_init_sec_context unsafe.Pointer - Fp_gss_accept_sec_context unsafe.Pointer - Fp_gss_delete_sec_context unsafe.Pointer - Fp_gss_process_context_token unsafe.Pointer - Fp_gss_context_time unsafe.Pointer - Fp_gss_inquire_context unsafe.Pointer - Fp_gss_wrap_size_limit unsafe.Pointer - Fp_gss_export_sec_context unsafe.Pointer - Fp_gss_import_sec_context unsafe.Pointer - - // credential.go - Fp_gss_acquire_cred unsafe.Pointer - Fp_gss_add_cred unsafe.Pointer - Fp_gss_inquire_cred unsafe.Pointer - Fp_gss_inquire_cred_by_mech unsafe.Pointer - Fp_gss_release_cred unsafe.Pointer - - // message.go - Fp_gss_get_mic unsafe.Pointer - Fp_gss_verify_mic unsafe.Pointer - Fp_gss_wrap unsafe.Pointer - Fp_gss_unwrap unsafe.Pointer - - // misc.go - Fp_gss_indicate_mechs unsafe.Pointer - - // name.go - Fp_gss_canonicalize_name unsafe.Pointer - Fp_gss_compare_name unsafe.Pointer - Fp_gss_display_name unsafe.Pointer - Fp_gss_duplicate_name unsafe.Pointer - Fp_gss_export_name unsafe.Pointer - Fp_gss_inquire_mechs_for_name unsafe.Pointer - Fp_gss_inquire_names_for_mech unsafe.Pointer - Fp_gss_release_name unsafe.Pointer - - // oid_set.go - Fp_gss_create_empty_oid_set unsafe.Pointer - Fp_gss_add_oid_set_member unsafe.Pointer - Fp_gss_release_oid_set unsafe.Pointer - Fp_gss_test_oid_set_member unsafe.Pointer - - // status.go - Fp_gss_display_status unsafe.Pointer - - // krb5_keytab.go -- where does this come from? - // Fp_gsskrb5_register_acceptor_identity unsafe.Pointer -} - -// constants are a number of constant initialized in initConstants. -type constants struct { - GSS_C_NO_BUFFER *Buffer - GSS_C_NO_OID *OID - GSS_C_NO_OID_SET *OIDSet - GSS_C_NO_CONTEXT *CtxId - GSS_C_NO_CREDENTIAL *CredId - - // when adding new OID constants also need to update OID.DebugString - GSS_C_NT_USER_NAME *OID - GSS_C_NT_MACHINE_UID_NAME *OID - GSS_C_NT_STRING_UID_NAME *OID - GSS_C_NT_HOSTBASED_SERVICE_X *OID - GSS_C_NT_HOSTBASED_SERVICE *OID - GSS_C_NT_ANONYMOUS *OID - GSS_C_NT_EXPORT_NAME *OID - GSS_KRB5_NT_PRINCIPAL_NAME *OID - GSS_KRB5_NT_PRINCIPAL *OID - GSS_MECH_KRB5 *OID - GSS_MECH_KRB5_LEGACY *OID - GSS_MECH_KRB5_OLD *OID - GSS_MECH_SPNEGO *OID - GSS_MECH_IAKERB *OID - GSS_MECH_NTLMSSP *OID - - GSS_C_NO_CHANNEL_BINDINGS ChannelBindings // implicitly initialized as nil -} - -// Lib encapsulates both the GSSAPI and the library dlopen()'d for it. The -// handle represents the dynamically-linked gssapi library handle. -type Lib struct { - LastStatus *Error - - // Should contain a gssapi.Printer for each severity level to be - // logged, up to gssapi.MaxSeverity items - Printers []Printer - - handle unsafe.Pointer - - ftable - constants -} - -const ( - fpPrefix = "Fp_" -) - -// Path returns the chosen gssapi library path that we're looking for. -func (o *Options) Path() string { - switch { - case o.LibPath != "": - return o.LibPath - - case o.LoadDefault == MIT: - return appendOSExt("libgssapi_krb5") - - case o.LoadDefault == Heimdal: - return appendOSExt("libgssapi") - } - return "" -} - -// Load attempts to load a dynamically-linked gssapi library from the path -// specified by the supplied Options. -func Load(o *Options) (*Lib, error) { - if o == nil { - o = &Options{} - } - - // We get the error in a separate call, so we need to lock OS thread - runtime.LockOSThread() - defer runtime.UnlockOSThread() - - lib := &Lib{ - Printers: o.Printers, - } - - if o.Krb5Config != "" { - err := os.Setenv("KRB5_CONFIG", o.Krb5Config) - if err != nil { - return nil, err - } - } - - if o.Krb5Ktname != "" { - err := os.Setenv("KRB5_KTNAME", o.Krb5Ktname) - if err != nil { - return nil, err - } - } - - path := o.Path() - lib.Debug(fmt.Sprintf("Loading %q", path)) - lib_cs := C.CString(path) - defer C.free(unsafe.Pointer(lib_cs)) - - // we don't use RTLD_FIRST, it might be the case that the GSSAPI lib - // delegates symbols to other libs it links against (eg, Kerberos) - lib.handle = C.dlopen(lib_cs, C.RTLD_NOW|C.RTLD_LOCAL) - if lib.handle == nil { - return nil, fmt.Errorf("%s", C.GoString(C.dlerror())) - } - - err := lib.populateFunctions() - if err != nil { - lib.Unload() - return nil, err - } - - lib.initConstants() - - return lib, nil -} - -// Unload closes the handle to the dynamically-linked gssapi library. -func (lib *Lib) Unload() error { - if lib == nil || lib.handle == nil { - return nil - } - - runtime.LockOSThread() - defer runtime.UnlockOSThread() - - i := C.dlclose(lib.handle) - if i == -1 { - return fmt.Errorf("%s", C.GoString(C.dlerror())) - } - - lib.handle = nil - return nil -} - -func appendOSExt(path string) string { - ext := ".so" - if runtime.GOOS == "darwin" { - ext = ".dylib" - } - if !strings.HasSuffix(path, ext) { - path += ext - } - return path -} - -// populateFunctions ranges over the library's ftable, initializing each -// function inside. Assumes that the caller executes runtime.LockOSThread. -func (lib *Lib) populateFunctions() error { - libT := reflect.TypeOf(lib.ftable) - functionsV := reflect.ValueOf(lib).Elem().FieldByName("ftable") - - n := libT.NumField() - for i := 0; i < n; i++ { - // Get the field name, and make sure it's an Fp_. - f := libT.FieldByIndex([]int{i}) - - if !strings.HasPrefix(f.Name, fpPrefix) { - return fmt.Errorf( - "Unexpected: field %q does not start with %q", - f.Name, fpPrefix) - } - - // Resolve the symbol. - cfname := C.CString(f.Name[len(fpPrefix):]) - v := C.dlsym(lib.handle, cfname) - C.free(unsafe.Pointer(cfname)) - if v == nil { - return fmt.Errorf("%s", C.GoString(C.dlerror())) - } - - // Save the value into the struct - functionsV.FieldByIndex([]int{i}).SetPointer(v) - } - - return nil -} - -// initConstants sets the initial values of a library's set of 'constants'. -func (lib *Lib) initConstants() { - lib.GSS_C_NO_BUFFER = &Buffer{ - Lib: lib, - // C_gss_buffer_t: C.GSS_C_NO_BUFFER, already nil - // alloc: allocNone, already 0 - } - lib.GSS_C_NO_OID = lib.NewOID() - lib.GSS_C_NO_OID_SET = lib.NewOIDSet() - lib.GSS_C_NO_CONTEXT = lib.NewCtxId() - lib.GSS_C_NO_CREDENTIAL = lib.NewCredId() - - lib.GSS_C_NT_USER_NAME = &OID{Lib: lib, C_gss_OID: C._GSS_C_NT_USER_NAME} - lib.GSS_C_NT_MACHINE_UID_NAME = &OID{Lib: lib, C_gss_OID: C._GSS_C_NT_MACHINE_UID_NAME} - lib.GSS_C_NT_STRING_UID_NAME = &OID{Lib: lib, C_gss_OID: C._GSS_C_NT_MACHINE_UID_NAME} - lib.GSS_C_NT_HOSTBASED_SERVICE_X = &OID{Lib: lib, C_gss_OID: C._GSS_C_NT_HOSTBASED_SERVICE_X} - lib.GSS_C_NT_HOSTBASED_SERVICE = &OID{Lib: lib, C_gss_OID: C._GSS_C_NT_HOSTBASED_SERVICE} - lib.GSS_C_NT_ANONYMOUS = &OID{Lib: lib, C_gss_OID: C._GSS_C_NT_ANONYMOUS} - lib.GSS_C_NT_EXPORT_NAME = &OID{Lib: lib, C_gss_OID: C._GSS_C_NT_EXPORT_NAME} - - lib.GSS_KRB5_NT_PRINCIPAL_NAME = &OID{Lib: lib, C_gss_OID: C._GSS_KRB5_NT_PRINCIPAL_NAME} - lib.GSS_KRB5_NT_PRINCIPAL = &OID{Lib: lib, C_gss_OID: C._GSS_KRB5_NT_PRINCIPAL} - - lib.GSS_MECH_KRB5 = &OID{Lib: lib, C_gss_OID: C._GSS_MECH_KRB5} - lib.GSS_MECH_KRB5_LEGACY = &OID{Lib: lib, C_gss_OID: C._GSS_MECH_KRB5_LEGACY} - lib.GSS_MECH_KRB5_OLD = &OID{Lib: lib, C_gss_OID: C._GSS_MECH_KRB5_OLD} - lib.GSS_MECH_SPNEGO = &OID{Lib: lib, C_gss_OID: C._GSS_MECH_SPNEGO} - lib.GSS_MECH_IAKERB = &OID{Lib: lib, C_gss_OID: C._GSS_MECH_IAKERB} - lib.GSS_MECH_NTLMSSP = &OID{Lib: lib, C_gss_OID: C._GSS_MECH_NTLMSSP} -} - -// Print outputs a log line to the specified severity. -func (lib *Lib) Print(level Severity, a ...interface{}) { - if lib == nil || lib.Printers == nil || level >= Severity(len(lib.Printers)) { - return - } - lib.Printers[level].Print(a...) -} - -func (lib *Lib) Emerg(a ...interface{}) { lib.Print(Emerg, a...) } -func (lib *Lib) Alert(a ...interface{}) { lib.Print(Alert, a...) } -func (lib *Lib) Crit(a ...interface{}) { lib.Print(Crit, a...) } -func (lib *Lib) Err(a ...interface{}) { lib.Print(Err, a...) } -func (lib *Lib) Warn(a ...interface{}) { lib.Print(Warn, a...) } -func (lib *Lib) Notice(a ...interface{}) { lib.Print(Notice, a...) } -func (lib *Lib) Info(a ...interface{}) { lib.Print(Info, a...) } -func (lib *Lib) Debug(a ...interface{}) { lib.Print(Debug, a...) } diff --git a/vendor/github.com/apcera/gssapi/lib_test.go b/vendor/github.com/apcera/gssapi/lib_test.go deleted file mode 100644 index 2ced40b81ded..000000000000 --- a/vendor/github.com/apcera/gssapi/lib_test.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2013-2015 Apcera Inc. All rights reserved. - -package gssapi - -import ( - "fmt" - "log" - "os" - "testing" -) - -func testLoad() (lib *Lib, err error) { - pp := make([]Printer, 0, MaxSeverity) - for i := Severity(0); i < MaxSeverity; i++ { - pp = append(pp, log.New(os.Stderr, - fmt.Sprintf("%s gssapi-test:\t", i), - log.LstdFlags)) - } - return Load(&Options{ - Printers: pp, - }) -} - -func TestLoadLib(t *testing.T) { - l, err := testLoad() - if err != nil { - t.Fatal(err) - } - - if l.Fp_gss_export_name == nil { - t.Error("Fp_gss_export_name did not get initialized") - return - } - - // TODO: maybe use reflect to enumerate all Fp's - - defer l.Unload() -} diff --git a/vendor/github.com/apcera/gssapi/message.go b/vendor/github.com/apcera/gssapi/message.go deleted file mode 100644 index f3273d7e02d4..000000000000 --- a/vendor/github.com/apcera/gssapi/message.go +++ /dev/null @@ -1,229 +0,0 @@ -// Copyright 2013-2015 Apcera Inc. All rights reserved. - -package gssapi - -/* -#include - -OM_uint32 -wrap_gss_get_mic(void *fp, - OM_uint32 * minor_status, - const gss_ctx_id_t context_handle, - gss_qop_t qop_req, - const gss_buffer_t message_buffer, - gss_buffer_t message_token) -{ - return ((OM_uint32(*) ( - OM_uint32 *, - const gss_ctx_id_t, - gss_qop_t, - const gss_buffer_t, - gss_buffer_t) - ) fp)( - minor_status, - context_handle, - qop_req, - message_buffer, - message_token); -} - -OM_uint32 -wrap_gss_verify_mic(void *fp, - OM_uint32 * minor_status, - const gss_ctx_id_t context_handle, - const gss_buffer_t message_buffer, - const gss_buffer_t token_buffer, - gss_qop_t * qop_state) -{ - return ((OM_uint32(*) ( - OM_uint32 *, - const gss_ctx_id_t, - const gss_buffer_t, - const gss_buffer_t, - gss_qop_t *) - ) fp)( - minor_status, - context_handle, - message_buffer, - token_buffer, - qop_state); -} - -OM_uint32 -wrap_gss_wrap(void *fp, - OM_uint32 * minor_status, - const gss_ctx_id_t context_handle, - int conf_req_flag, - gss_qop_t qop_req, - const gss_buffer_t input_message_buffer, - int * conf_state, - gss_buffer_t output_message_buffer) -{ - return ((OM_uint32(*) ( - OM_uint32 *, - const gss_ctx_id_t, - int, - gss_qop_t, - const gss_buffer_t, - int *, - gss_buffer_t) - ) fp)( - minor_status, - context_handle, - conf_req_flag, - qop_req, - input_message_buffer, - conf_state, - output_message_buffer); -} - -OM_uint32 -wrap_gss_unwrap(void *fp, - OM_uint32 * minor_status, - const gss_ctx_id_t context_handle, - const gss_buffer_t input_message_buffer, - gss_buffer_t output_message_buffer, - int * conf_state, - gss_qop_t * qop_state) -{ - return ((OM_uint32(*) ( - OM_uint32 *, - const gss_ctx_id_t, - const gss_buffer_t, - gss_buffer_t, - int *, - gss_qop_t *) - ) fp)( - minor_status, - context_handle, - input_message_buffer, - output_message_buffer, - conf_state, - qop_state); -} - -*/ -import "C" - -// GetMIC implements gss_GetMIC API, as per https://tools.ietf.org/html/rfc2743#page-63. -// messageToken must be .Release()-ed by the caller. -func (ctx *CtxId) GetMIC(qopReq QOP, messageBuffer *Buffer) ( - messageToken *Buffer, err error) { - - min := C.OM_uint32(0) - - token, err := ctx.MakeBuffer(allocGSSAPI) - if err != nil { - return nil, err - } - - maj := C.wrap_gss_get_mic(ctx.Fp_gss_get_mic, - &min, - ctx.C_gss_ctx_id_t, - C.gss_qop_t(qopReq), - messageBuffer.C_gss_buffer_t, - token.C_gss_buffer_t) - - err = ctx.stashLastStatus(maj, min) - if err != nil { - return nil, err - } - - return token, nil -} - -// VerifyMIC implements gss_VerifyMIC API, as per https://tools.ietf.org/html/rfc2743#page-64. -func (ctx *CtxId) VerifyMIC(messageBuffer *Buffer, tokenBuffer *Buffer) ( - qopState QOP, err error) { - - min := C.OM_uint32(0) - qop := C.gss_qop_t(0) - - maj := C.wrap_gss_verify_mic(ctx.Fp_gss_verify_mic, - &min, - ctx.C_gss_ctx_id_t, - messageBuffer.C_gss_buffer_t, - tokenBuffer.C_gss_buffer_t, - &qop) - - err = ctx.stashLastStatus(maj, min) - if err != nil { - return 0, err - } - - return QOP(qop), nil -} - -// Wrap implements gss_wrap API, as per https://tools.ietf.org/html/rfc2743#page-65. -// outputMessageBuffer must be .Release()-ed by the caller -func (ctx *CtxId) Wrap( - confReq bool, qopReq QOP, inputMessageBuffer *Buffer) ( - confState bool, outputMessageBuffer *Buffer, err error) { - - min := C.OM_uint32(0) - - encrypt := C.int(0) - if confReq { - encrypt = 1 - } - - outputMessageBuffer, err = ctx.MakeBuffer(allocGSSAPI) - if err != nil { - return false, nil, err - } - - encrypted := C.int(0) - - maj := C.wrap_gss_wrap(ctx.Fp_gss_wrap, - &min, - ctx.C_gss_ctx_id_t, - encrypt, - C.gss_qop_t(qopReq), - inputMessageBuffer.C_gss_buffer_t, - &encrypted, - outputMessageBuffer.C_gss_buffer_t) - - err = ctx.stashLastStatus(maj, min) - if err != nil { - return false, nil, err - } - - return encrypted != 0, - outputMessageBuffer, - nil -} - -// Unwrap implements gss_unwrap API, as per https://tools.ietf.org/html/rfc2743#page-66. -// outputMessageBuffer must be .Release()-ed by the caller -func (ctx *CtxId) Unwrap( - inputMessageBuffer *Buffer) ( - outputMessageBuffer *Buffer, confState bool, qopState QOP, err error) { - - min := C.OM_uint32(0) - - outputMessageBuffer, err = ctx.MakeBuffer(allocGSSAPI) - if err != nil { - return nil, false, 0, err - } - - encrypted := C.int(0) - qop := C.gss_qop_t(0) - - maj := C.wrap_gss_unwrap(ctx.Fp_gss_unwrap, - &min, - ctx.C_gss_ctx_id_t, - inputMessageBuffer.C_gss_buffer_t, - outputMessageBuffer.C_gss_buffer_t, - &encrypted, - &qop) - - err = ctx.stashLastStatus(maj, min) - if err != nil { - return nil, false, 0, err - } - - return outputMessageBuffer, - encrypted != 0, - QOP(qop), - nil -} diff --git a/vendor/github.com/apcera/gssapi/misc.go b/vendor/github.com/apcera/gssapi/misc.go deleted file mode 100644 index bb57a4632480..000000000000 --- a/vendor/github.com/apcera/gssapi/misc.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2013-2015 Apcera Inc. All rights reserved. - -package gssapi - -/* -#include -#include - -OM_uint32 -wrap_gss_indicate_mechs(void *fp, - OM_uint32 *minor_status, - gss_OID_set * mech_set) -{ - gss_OID_set_desc *ms = NULL; - OM_uint32 maj; - maj = ((OM_uint32(*)( - OM_uint32 *, - gss_OID_set *))fp) ( - minor_status, - mech_set); - - return maj; -} - -*/ -import "C" - -// IndicateMechs implements the gss_Indicate_mechs call, according to https://tools.ietf.org/html/rfc2743#page-69. -// This returns an OIDSet of the Mechs supported on the current OS. -func (lib *Lib) IndicateMechs() (*OIDSet, error) { - - mechs := lib.NewOIDSet() - - var min C.OM_uint32 - maj := C.wrap_gss_indicate_mechs( - lib.Fp_gss_indicate_mechs, - &min, - &mechs.C_gss_OID_set) - err := lib.stashLastStatus(maj, min) - if err != nil { - return nil, err - } - - return mechs, nil -} diff --git a/vendor/github.com/apcera/gssapi/name.go b/vendor/github.com/apcera/gssapi/name.go deleted file mode 100644 index 751d32e46938..000000000000 --- a/vendor/github.com/apcera/gssapi/name.go +++ /dev/null @@ -1,296 +0,0 @@ -// Copyright 2013-2015 Apcera Inc. All rights reserved. - -package gssapi - -// Side-note: gss_const_name_t is defined in RFC5587 as a bug-fix over RFC2744, -// since "const gss_name_t foo" says that the foo pointer is const, not the item -// pointed to is const. Ideally, we'd be able to detect that, or have a macro -// which indicates availability of the 5587 extensions. Instead, we're stuck with -// the ancient system GSSAPI headers on MacOS not supporting this. -// -// Choosing between "correctness" on the target platform and losing that for others, -// I've chosen to pull in /opt/local/include for MacPorts on MacOS; that should get -// us a functioning type; it's a pointer, at the ABI level the typing doesn't matter, -// so once we compile we're good. If modern (correct) headers are available in other -// locations, just add them to the search path for the relevant OS below. -// -// Using "MacPorts" on MacOS gives us: -I/opt/local/include -// Using "brew" on MacOS gives us: -I/usr/local/opt/heimdal/include - -/* -#cgo darwin CFLAGS: -I/opt/local/include -I/usr/local/opt/heimdal/include -#include - -#include - -OM_uint32 -wrap_gss_display_name(void *fp, - OM_uint32 *minor_status, - const gss_name_t input_name, - gss_buffer_t output_name_buffer, - gss_OID *output_name_type) -{ - return ((OM_uint32(*)( - OM_uint32 *, const gss_name_t, gss_buffer_t, gss_OID *) - )fp)( - minor_status, input_name, output_name_buffer, output_name_type); -} - -OM_uint32 -wrap_gss_compare_name(void *fp, - OM_uint32 *minor_status, - const gss_name_t name1, - const gss_name_t name2, - int * name_equal) -{ - return ((OM_uint32(*)( - OM_uint32 *, const gss_name_t, const gss_name_t, int *) - )fp)( - minor_status, name1, name2, name_equal); -} - -OM_uint32 -wrap_gss_release_name(void *fp, - OM_uint32 *minor_status, - gss_name_t *input_name) -{ - return ((OM_uint32(*)( - OM_uint32 *, gss_name_t *) - )fp)( - minor_status, input_name); -} - -OM_uint32 -wrap_gss_inquire_mechs_for_name(void *fp, - OM_uint32 *minor_status, - const gss_name_t input_name, - gss_OID_set *mech_types) -{ - return ((OM_uint32(*)( - OM_uint32 *, const gss_name_t, gss_OID_set *) - )fp)( - minor_status, input_name, mech_types); -} - -OM_uint32 -wrap_gss_inquire_names_for_mech(void *fp, - OM_uint32 *minor_status, - const gss_OID mechanism, - gss_OID_set * name_types) -{ - return ((OM_uint32(*)( - OM_uint32 *, const gss_OID, gss_OID_set *) - )fp)( - minor_status, mechanism, name_types); -} - -OM_uint32 -wrap_gss_canonicalize_name(void *fp, - OM_uint32 *minor_status, - gss_const_name_t input_name, - const gss_OID mech_type, - gss_name_t *output_name) -{ - return ((OM_uint32(*)( - OM_uint32 *, gss_const_name_t, const gss_OID, gss_name_t *) - )fp)( - minor_status, input_name, mech_type, output_name); -} - -OM_uint32 -wrap_gss_export_name(void *fp, - OM_uint32 *minor_status, - const gss_name_t input_name, - gss_buffer_t exported_name) -{ - OM_uint32 maj; - - maj = ((OM_uint32(*)( - OM_uint32 *, const gss_name_t, gss_buffer_t) - )fp)( - minor_status, input_name, exported_name); - - return maj; -} - -OM_uint32 -wrap_gss_duplicate_name(void *fp, - OM_uint32 *minor_status, - const gss_name_t src_name, - gss_name_t *dest_name) -{ - return ((OM_uint32(*)( - OM_uint32 *, const gss_name_t, gss_name_t *) - )fp)( - minor_status, src_name, dest_name); -} - -*/ -import "C" - -// NewName initializes a new principal name. -func (lib *Lib) NewName() *Name { - return &Name{ - Lib: lib, - } -} - -// GSS_C_NO_NAME is a Name where the value is NULL, used to request special -// behavior in some GSSAPI calls. -func (lib *Lib) GSS_C_NO_NAME() *Name { - return lib.NewName() -} - -// Release frees the memory associated with an internal representation of the -// name. -func (n *Name) Release() error { - if n == nil || n.C_gss_name_t == nil { - return nil - } - - var min C.OM_uint32 - maj := C.wrap_gss_release_name(n.Fp_gss_release_name, &min, &n.C_gss_name_t) - err := n.stashLastStatus(maj, min) - if err == nil { - n.C_gss_name_t = nil - } - return err -} - -// Equal tests 2 names for semantic equality (refer to the same entity) -func (n Name) Equal(other Name) (equal bool, err error) { - var min C.OM_uint32 - var isEqual C.int - - maj := C.wrap_gss_compare_name(n.Fp_gss_compare_name, &min, - n.C_gss_name_t, other.C_gss_name_t, &isEqual) - err = n.stashLastStatus(maj, min) - if err != nil { - return false, err - } - - return isEqual != 0, nil -} - -// Display "allows an application to obtain a textual representation of an -// opaque internal-form name for display purposes" -func (n Name) Display() (name string, oid *OID, err error) { - var min C.OM_uint32 - b, err := n.MakeBuffer(allocGSSAPI) - if err != nil { - return "", nil, err - } - defer b.Release() - - oid = n.NewOID() - - maj := C.wrap_gss_display_name(n.Fp_gss_display_name, &min, - n.C_gss_name_t, b.C_gss_buffer_t, &oid.C_gss_OID) - - err = n.stashLastStatus(maj, min) - if err != nil { - oid.Release() - return "", nil, err - } - - return b.String(), oid, err -} - -// String displays a Go-friendly version of a name. ("" on error) -func (n Name) String() string { - s, _, _ := n.Display() - return s -} - -// Canonicalize returns a copy of this name, canonicalized for the specified -// mechanism -func (n Name) Canonicalize(mech_type *OID) (canonical *Name, err error) { - canonical = &Name{ - Lib: n.Lib, - } - - var min C.OM_uint32 - maj := C.wrap_gss_canonicalize_name(n.Fp_gss_canonicalize_name, &min, - n.C_gss_name_t, mech_type.C_gss_OID, &canonical.C_gss_name_t) - err = n.stashLastStatus(maj, min) - if err != nil { - return nil, err - } - - return canonical, nil -} - -// Duplicate creates a new independent imported name; after this, both the original and -// the duplicate will need to be .Released(). -func (n *Name) Duplicate() (duplicate *Name, err error) { - duplicate = &Name{ - Lib: n.Lib, - } - - var min C.OM_uint32 - maj := C.wrap_gss_duplicate_name(n.Fp_gss_duplicate_name, &min, - n.C_gss_name_t, &duplicate.C_gss_name_t) - err = n.stashLastStatus(maj, min) - if err != nil { - return nil, err - } - - return duplicate, nil -} - -// Export makes a text (Buffer) version from an internal representation -func (n *Name) Export() (b *Buffer, err error) { - b, err = n.MakeBuffer(allocGSSAPI) - if err != nil { - return nil, err - } - - var min C.OM_uint32 - maj := C.wrap_gss_export_name(n.Fp_gss_export_name, &min, - n.C_gss_name_t, b.C_gss_buffer_t) - err = n.stashLastStatus(maj, min) - if err != nil { - b.Release() - return nil, err - } - - return b, nil -} - -// InquireMechs returns the set of mechanisms supported by the GSS-API -// implementation that may be able to process the specified name -func (n *Name) InquireMechs() (oids *OIDSet, err error) { - oidset := n.NewOIDSet() - if err != nil { - return nil, err - } - - var min C.OM_uint32 - maj := C.wrap_gss_inquire_mechs_for_name(n.Fp_gss_inquire_mechs_for_name, &min, - n.C_gss_name_t, &oidset.C_gss_OID_set) - err = n.stashLastStatus(maj, min) - if err != nil { - return nil, err - } - - return oidset, nil -} - -// InquireNameForMech returns the set of name types supported by -// the specified mechanism -func (lib *Lib) InquireNamesForMechs(mech *OID) (name_types *OIDSet, err error) { - oidset := lib.NewOIDSet() - if err != nil { - return nil, err - } - - var min C.OM_uint32 - maj := C.wrap_gss_inquire_names_for_mech(lib.Fp_gss_inquire_mechs_for_name, &min, - mech.C_gss_OID, &oidset.C_gss_OID_set) - err = lib.stashLastStatus(maj, min) - if err != nil { - return nil, err - } - - return oidset, nil -} diff --git a/vendor/github.com/apcera/gssapi/name_test.go b/vendor/github.com/apcera/gssapi/name_test.go deleted file mode 100644 index 984f93618bcf..000000000000 --- a/vendor/github.com/apcera/gssapi/name_test.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright 2014 Apcera Inc. All rights reserved. - -package gssapi - -import ( - "testing" -) - -// Tests importing exporting names -func TestNameImportExport(t *testing.T) { - l, err := testLoad() - if err != nil { - t.Fatal(err) - } - defer l.Unload() - - names := []string{ - `test@corp.example.com`, - `test@corp.ExAmple.com`, - `test@CORP.EXAMPLE.COM`, - } - - makeName := func(n string) (name *Name) { - b, _ := l.MakeBufferString(n) - if err != nil { - t.Fatalf("%q: Got error %v, expected nil", n, err) - } - if b == nil { - t.Fatalf("%q: Got nil, expected non-nil", n) - } - defer b.Release() - - name, err := b.Name(l.GSS_C_NT_HOSTBASED_SERVICE) - if err != nil { - t.Fatalf("%q: Got error %v, expected nil", n, err) - } - if name == nil { - t.Fatalf("%q: Got nil, expected non-nil", n) - } - return name - } - - // Make the reference name - n0 := makeName(names[0]) - defer n0.Release() - - // Make sure we can have the krb mechanism, and normalize the reference - // name using it - mechs, err := n0.InquireMechs() - if err != nil { - //TODO: need a better test for OS X since this InquireMechs doesn't - // seem to work - t.Skipf("Couldn't get mechs for %q, error: %v", names[0], err.Error()) - } - - // This OID seems to be an avalable merch on linux - kerbOID, err := l.MakeOIDBytes([]byte{'\x2a', '\x86', '\x48', '\x86', '\xf7', '\x12', '\x01', '\x02', '\x02'}) - if err != nil { - t.Fatalf("Got error %v, expected nil", err) - } - defer kerbOID.Release() - - if !mechs.Contains(kerbOID) { - t.Fatalf("Expected %q to be in %q", kerbOID.DebugString(), mechs.DebugString()) - } - - makeNames := func(n string) ( - name *Name, canonical *Name, display string, exported *Buffer) { - - name = makeName(n) - if name == nil { - return nil, nil, "", nil - } - - origDisplay, _, err := name.Display() - if err != nil { - t.Fatalf("Got error %q, expected nil", err.Error()) - } - if origDisplay != n { - t.Fatalf("Got %q, expected %q", origDisplay, n) - } - - canonical, err = name.Canonicalize(kerbOID) - if err != nil { - t.Fatalf("Got error %q, expected nil", err.Error()) - } - if canonical == nil { - t.Fatal("Got nil, expected non-nil") - } - - display, _, err = canonical.Display() - if err != nil { - t.Fatalf("Got error %q, expected nil", err.Error()) - } - - exported, err = canonical.Export() - if err != nil { - t.Fatalf("Got error %q, expected nil", err.Error()) - } - if exported == nil { - t.Fatal("Got nil, expected non-nil") - } - - return name, canonical, display, exported - } - - n0, _, d0, e0 := makeNames(names[0]) - if n0 == nil { - t.Fatal("Got nil, expected non-nil") - } - - for _, n := range names { - n, _, d, e := makeNames(n) - if n == nil { - t.Fatalf("%s: Got nil, expected non-nil", n) - } - if d != d0 { - t.Fatalf("%s: Got %q, expected %q", n, d, d0) - } - if !e.Equal(e0) { - t.Fatalf("%s: Got %q, expected %q", n, e.String(), e0.String()) - } - } -} diff --git a/vendor/github.com/apcera/gssapi/oid.go b/vendor/github.com/apcera/gssapi/oid.go deleted file mode 100644 index f63f42e99e95..000000000000 --- a/vendor/github.com/apcera/gssapi/oid.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright 2013-2015 Apcera Inc. All rights reserved. - -package gssapi - -/* -#include -#include - -#include - -const size_t gss_OID_size=sizeof(gss_OID_desc); - -void helper_gss_OID_desc_free_elements(gss_OID oid) { - free(oid->elements); -} - -void helper_gss_OID_desc_set_elements(gss_OID oid, OM_uint32 l, void *p) { - oid->length = l; - oid->elements = p; -} - -void helper_gss_OID_desc_get_elements(gss_OID oid, OM_uint32 *l, char **p) { - *l = oid->length; - *p = oid->elements; -} - -int -wrap_gss_oid_equal(void *fp, gss_OID oid1, gss_OID oid2) -{ - return ((int(*) (gss_OID, gss_OID)) fp)(oid1, oid2); -} - -*/ -import "C" - -import ( - "bytes" - "fmt" - "unsafe" -) - -// NewOID initializes a new OID. (Object Identifier) -func (lib *Lib) NewOID() *OID { - return &OID{Lib: lib} -} - -// MakeOIDBytes makes an OID encapsulating a byte slice. Note that it does not -// duplicate the data, but rather it points to it directly. -func (lib *Lib) MakeOIDBytes(data []byte) (*OID, error) { - oid := lib.NewOID() - - s := C.malloc(C.gss_OID_size) // s for struct - if s == nil { - return nil, ErrMallocFailed - } - C.memset(s, 0, C.gss_OID_size) - - l := C.size_t(len(data)) - e := C.malloc(l) // c for contents - if e == nil { - return nil, ErrMallocFailed - } - C.memmove(e, (unsafe.Pointer)(&data[0]), l) - - oid.C_gss_OID = C.gss_OID(s) - oid.alloc = allocMalloc - - // because of the alignment issues I can't access o.oid's fields from go, - // so invoking a C function to do the same as: - // oid.C_gss_OID.length = l - // oid.C_gss_OID.elements = c - C.helper_gss_OID_desc_set_elements(oid.C_gss_OID, C.OM_uint32(l), e) - - return oid, nil -} - -// MakeOIDString makes an OID from a string. -func (lib *Lib) MakeOIDString(data string) (*OID, error) { - return lib.MakeOIDBytes([]byte(data)) -} - -// Release safely frees the contents of an OID if it's allocated with malloc by -// MakeOIDBytes. -func (oid *OID) Release() error { - if oid == nil || oid.C_gss_OID == nil { - return nil - } - - switch oid.alloc { - case allocMalloc: - // same as with get and set, use a C helper to free(oid.C_gss_OID.elements) - C.helper_gss_OID_desc_free_elements(oid.C_gss_OID) - C.free(unsafe.Pointer(oid.C_gss_OID)) - oid.C_gss_OID = nil - oid.alloc = allocNone - } - - return nil -} - -// Bytes displays the bytes of an OID. -func (oid OID) Bytes() []byte { - var l C.OM_uint32 - var p *C.char - - C.helper_gss_OID_desc_get_elements(oid.C_gss_OID, &l, &p) - - return C.GoBytes(unsafe.Pointer(p), C.int(l)) -} - -// String displays a string representation of an OID. -func (oid *OID) String() string { - var l C.OM_uint32 - var p *C.char - - C.helper_gss_OID_desc_get_elements(oid.C_gss_OID, &l, &p) - - return fmt.Sprintf(`%x`, C.GoStringN(p, C.int(l))) -} - -// Returns a symbolic name for a known OID, or the string. Note that this -// function is intended for debugging and is not at all performant. -func (oid *OID) DebugString() string { - switch { - case bytes.Equal(oid.Bytes(), oid.GSS_C_NT_USER_NAME.Bytes()): - return "GSS_C_NT_USER_NAME" - case bytes.Equal(oid.Bytes(), oid.GSS_C_NT_MACHINE_UID_NAME.Bytes()): - return "GSS_C_NT_MACHINE_UID_NAME" - case bytes.Equal(oid.Bytes(), oid.GSS_C_NT_STRING_UID_NAME.Bytes()): - return "GSS_C_NT_STRING_UID_NAME" - case bytes.Equal(oid.Bytes(), oid.GSS_C_NT_HOSTBASED_SERVICE_X.Bytes()): - return "GSS_C_NT_HOSTBASED_SERVICE_X" - case bytes.Equal(oid.Bytes(), oid.GSS_C_NT_HOSTBASED_SERVICE.Bytes()): - return "GSS_C_NT_HOSTBASED_SERVICE" - case bytes.Equal(oid.Bytes(), oid.GSS_C_NT_ANONYMOUS.Bytes()): - return "GSS_C_NT_ANONYMOUS" - case bytes.Equal(oid.Bytes(), oid.GSS_C_NT_EXPORT_NAME.Bytes()): - return "GSS_C_NT_EXPORT_NAME" - case bytes.Equal(oid.Bytes(), oid.GSS_KRB5_NT_PRINCIPAL_NAME.Bytes()): - return "GSS_KRB5_NT_PRINCIPAL_NAME" - case bytes.Equal(oid.Bytes(), oid.GSS_KRB5_NT_PRINCIPAL.Bytes()): - return "GSS_KRB5_NT_PRINCIPAL" - case bytes.Equal(oid.Bytes(), oid.GSS_MECH_KRB5.Bytes()): - return "GSS_MECH_KRB5" - case bytes.Equal(oid.Bytes(), oid.GSS_MECH_KRB5_LEGACY.Bytes()): - return "GSS_MECH_KRB5_LEGACY" - case bytes.Equal(oid.Bytes(), oid.GSS_MECH_KRB5_OLD.Bytes()): - return "GSS_MECH_KRB5_OLD" - case bytes.Equal(oid.Bytes(), oid.GSS_MECH_SPNEGO.Bytes()): - return "GSS_MECH_SPNEGO" - case bytes.Equal(oid.Bytes(), oid.GSS_MECH_IAKERB.Bytes()): - return "GSS_MECH_IAKERB" - case bytes.Equal(oid.Bytes(), oid.GSS_MECH_NTLMSSP.Bytes()): - return "GSS_MECH_NTLMSSP" - } - - return oid.String() -} diff --git a/vendor/github.com/apcera/gssapi/oid_set.go b/vendor/github.com/apcera/gssapi/oid_set.go deleted file mode 100644 index 45beb5a92aef..000000000000 --- a/vendor/github.com/apcera/gssapi/oid_set.go +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright 2013-2015 Apcera Inc. All rights reserved. - -package gssapi - -/* -#include - -OM_uint32 -wrap_gss_create_empty_oid_set(void *fp, - OM_uint32 *minor_status, - gss_OID_set * set) -{ - return ((OM_uint32(*) ( - OM_uint32 *, - gss_OID_set *)) fp)( - minor_status, - set); -} - -OM_uint32 -wrap_gss_release_oid_set(void *fp, - OM_uint32 *minor_status, - gss_OID_set * set) -{ - return ((OM_uint32(*) ( - OM_uint32 *, - gss_OID_set *)) fp)( - minor_status, set); -} - -OM_uint32 -wrap_gss_add_oid_set_member(void *fp, - OM_uint32 *minor_status, - const gss_OID member_oid, - gss_OID_set * set) -{ - return ((OM_uint32(*) ( - OM_uint32 *, - const gss_OID, - gss_OID_set *)) fp)( - minor_status, member_oid, set); -} - -OM_uint32 -wrap_gss_test_oid_set_member(void *fp, - OM_uint32 *minor_status, - const gss_OID member_oid, - const gss_OID_set set, - int * present) -{ - return ((OM_uint32(*) ( - OM_uint32 *, - const gss_OID, - const gss_OID_set, - int *)) fp)( - minor_status, member_oid, set, present); -} - -gss_OID -get_oid_set_member( - gss_OID_set set, - int index) -{ - return &(set->elements[index]); -} - -*/ -import "C" - -import ( - "fmt" - "strings" -) - -// NewOIDSet constructs a new empty OID set. -func (lib *Lib) NewOIDSet() *OIDSet { - return &OIDSet{ - Lib: lib, - // C_gss_OID_set: (C.gss_OID_set)(unsafe.Pointer(nil)), - } -} - -// MakeOIDSet makes an OIDSet prepopulated with the given OIDs. -func (lib *Lib) MakeOIDSet(oids ...*OID) (s *OIDSet, err error) { - s = &OIDSet{ - Lib: lib, - } - - var min C.OM_uint32 - maj := C.wrap_gss_create_empty_oid_set(s.Fp_gss_create_empty_oid_set, - &min, &s.C_gss_OID_set) - err = s.stashLastStatus(maj, min) - if err != nil { - return nil, err - } - - err = s.Add(oids...) - if err != nil { - return nil, err - } - - return s, nil -} - -// Release frees all C memory associated with an OIDSet. -func (s *OIDSet) Release() (err error) { - if s == nil || s.C_gss_OID_set == nil { - return nil - } - - var min C.OM_uint32 - maj := C.wrap_gss_release_oid_set(s.Fp_gss_release_oid_set, &min, &s.C_gss_OID_set) - return s.stashLastStatus(maj, min) -} - -// Add adds OIDs to an OIDSet. -func (s *OIDSet) Add(oids ...*OID) (err error) { - var min C.OM_uint32 - for _, oid := range oids { - maj := C.wrap_gss_add_oid_set_member(s.Fp_gss_add_oid_set_member, - &min, oid.C_gss_OID, &s.C_gss_OID_set) - err = s.stashLastStatus(maj, min) - if err != nil { - return err - } - } - - return nil -} - -// TestOIDSetMember a wrapper to determine if an OIDSet contains an OID. -func (s *OIDSet) TestOIDSetMember(oid *OID) (contains bool, err error) { - var min C.OM_uint32 - var isPresent C.int - - maj := C.wrap_gss_test_oid_set_member(s.Fp_gss_test_oid_set_member, - &min, oid.C_gss_OID, s.C_gss_OID_set, &isPresent) - err = s.stashLastStatus(maj, min) - if err != nil { - return false, err - } - - return isPresent != 0, nil -} - -// Contains (gss_test_oid_set_member) checks if an OID is present OIDSet. -func (s *OIDSet) Contains(oid *OID) bool { - contains, _ := s.TestOIDSetMember(oid) - return contains -} - -// Length returns the number of OIDs in a set. -func (s *OIDSet) Length() int { - if s == nil { - return 0 - } - return int(s.C_gss_OID_set.count) -} - -// Get returns a specific OID from the set. The memory will be released when the -// set itself is released. -func (s *OIDSet) Get(index int) (*OID, error) { - if s == nil || index < 0 || index >= int(s.C_gss_OID_set.count) { - return nil, fmt.Errorf("index %d out of bounds", index) - } - oid := s.NewOID() - oid.C_gss_OID = C.get_oid_set_member(s.C_gss_OID_set, C.int(index)) - return oid, nil -} - -func (s *OIDSet) DebugString() string { - names := make([]string, 0) - for i := 0; i < s.Length(); i++ { - oid, _ := s.Get(i) - names = append(names, oid.DebugString()) - } - - return "[" + strings.Join(names, ", ") + "]" -} diff --git a/vendor/github.com/apcera/gssapi/spnego/spnego_server.go b/vendor/github.com/apcera/gssapi/spnego/spnego_server.go deleted file mode 100644 index 977e6a970966..000000000000 --- a/vendor/github.com/apcera/gssapi/spnego/spnego_server.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2015 Apcera Inc. All rights reserved. - -// This is intended to give an interface for Kerberized servers to negotiate -// with clients using SPNEGO. A reference implementation is provided below. -package spnego - -import ( - "errors" - "net/http" - - "github.com/apcera/gssapi" -) - -// A ServerNegotiator is an interface that defines minimal functionality for -// SPNEGO and credential issuance using GSSAPI from the server side. -type ServerNegotiator interface { - // AcquireCred acquires a credential from the server's environment. - AcquireCred(string) (*gssapi.CredId, error) - - // Negotiate handles the negotiation with the client. - Negotiate(*gssapi.CredId, http.Header, http.Header) (string, int, error) -} - -// A KerberizedServer allows a server to negotiate authentication over SPNEGO -// with a client. -type KerberizedServer struct { - *gssapi.Lib - UseProxyAuthentication bool -} - -var _ ServerNegotiator = KerberizedServer{} - -// AcquireCred acquires a Kerberos credential (keytab) from environment. The -// CredId MUST be released by the caller. -func (k KerberizedServer) AcquireCred(serviceName string) (*gssapi.CredId, error) { - nameBuf, err := k.MakeBufferString(serviceName) - if err != nil { - return nil, err - } - defer nameBuf.Release() - - name, err := nameBuf.Name(k.GSS_KRB5_NT_PRINCIPAL_NAME) - if err != nil { - return nil, err - } - defer name.Release() - - cred, actualMechs, _, err := k.Lib.AcquireCred(name, - gssapi.GSS_C_INDEFINITE, k.GSS_C_NO_OID_SET, gssapi.GSS_C_ACCEPT) - if err != nil { - return nil, err - } - defer actualMechs.Release() - - return cred, nil -} - -// Negotiate handles the SPNEGO client-server negotiation. Negotiate will likely -// be invoked multiple times; a 200 or 400 response code are terminating -// conditions, whereas a 401 or 407 means that the client should respond to the -// challenge that we send. -func (k KerberizedServer) Negotiate(cred *gssapi.CredId, inHeader, outHeader http.Header) (string, int, error) { - var challengeHeader, authHeader string - var challengeStatus int - if k.UseProxyAuthentication { - challengeHeader = "Proxy-Authenticate" - challengeStatus = http.StatusProxyAuthRequired - authHeader = "Proxy-Authorization" - } else { - challengeHeader = "WWW-Authenticate" - challengeStatus = http.StatusUnauthorized - authHeader = "Authorization" - } - - negotiate, inputToken := CheckSPNEGONegotiate(k.Lib, inHeader, authHeader) - defer inputToken.Release() - - // Here, challenge the client to initiate the security context. The first - // request a client has made will often be unauthenticated, so we return a - // 401, which the client handles. - if !negotiate || inputToken.Length() == 0 { - AddSPNEGONegotiate(outHeader, challengeHeader, inputToken) - return "", challengeStatus, errors.New("SPNEGO: unauthorized") - } - - // FIXME: GSS_S_CONTINUED_NEEDED handling? - ctx, srcName, _, outputToken, _, _, delegatedCredHandle, err := - k.AcceptSecContext(k.GSS_C_NO_CONTEXT, - cred, inputToken, k.GSS_C_NO_CHANNEL_BINDINGS) - if err != nil { - return "", http.StatusBadRequest, err - } - delegatedCredHandle.Release() - ctx.DeleteSecContext() - outputToken.Release() - defer srcName.Release() - - return srcName.String(), http.StatusOK, nil -} diff --git a/vendor/github.com/apcera/gssapi/spnego/spnego_transport.go b/vendor/github.com/apcera/gssapi/spnego/spnego_transport.go deleted file mode 100644 index 624d1a4ad1fd..000000000000 --- a/vendor/github.com/apcera/gssapi/spnego/spnego_transport.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2013-2015 Apcera Inc. All rights reserved. - -package spnego - -import ( - "encoding/base64" - "fmt" - "net/http" - "strings" - - "github.com/apcera/gssapi" -) - -const negotiateScheme = "Negotiate" - -// AddSPNEGONegotiate adds a Negotiate header with the value of a serialized -// token to an http header. -func AddSPNEGONegotiate(h http.Header, name string, token *gssapi.Buffer) { - if name == "" { - return - } - - v := negotiateScheme - if token.Length() != 0 { - data := token.Bytes() - v = v + " " + base64.StdEncoding.EncodeToString(data) - } - h.Set(name, v) -} - -// CheckSPNEGONegotiate checks for the presence of a Negotiate header. If -// present, we return a gssapi Token created from the header value sent to us. -func CheckSPNEGONegotiate(lib *gssapi.Lib, h http.Header, name string) (bool, *gssapi.Buffer) { - var err error - defer func() { - if err != nil { - lib.Debug(fmt.Sprintf("CheckSPNEGONegotiate: %v", err)) - } - }() - - for _, header := range h[http.CanonicalHeaderKey(name)] { - if len(header) < len(negotiateScheme) { - continue - } - if !strings.EqualFold(header[:len(negotiateScheme)], negotiateScheme) { - continue - } - - // Remove the "Negotiate" prefix - normalizedToken := header[len(negotiateScheme):] - // Trim leading and trailing whitespace - normalizedToken = strings.TrimSpace(normalizedToken) - // Remove internal whitespace (some servers insert whitespace every 76 chars) - normalizedToken = strings.Replace(normalizedToken, " ", "", -1) - // Pad to a multiple of 4 chars for base64 (some servers strip trailing padding) - if unpaddedChars := len(normalizedToken) % 4; unpaddedChars != 0 { - normalizedToken += strings.Repeat("=", 4-unpaddedChars) - } - - tbytes, err := base64.StdEncoding.DecodeString(normalizedToken) - if err != nil { - continue - } - - if len(tbytes) == 0 { - return true, nil - } - - token, err := lib.MakeBufferBytes(tbytes) - if err != nil { - continue - } - return true, token - } - - return false, nil -} diff --git a/vendor/github.com/apcera/gssapi/spnego/spnego_transport_test.go b/vendor/github.com/apcera/gssapi/spnego/spnego_transport_test.go deleted file mode 100644 index ae428cc6bd6b..000000000000 --- a/vendor/github.com/apcera/gssapi/spnego/spnego_transport_test.go +++ /dev/null @@ -1,121 +0,0 @@ -package spnego - -import ( - "net/http" - "testing" - - "github.com/apcera/gssapi" -) - -func TestCheckSPNEGONegotiate(t *testing.T) { - lib, err := gssapi.Load(nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - name := "WWW-Authenticate" - canonicalName := http.CanonicalHeaderKey(name) - - testcases := map[string]struct { - Headers http.Header - Name string - ExpectedPresent bool - ExpectedToken string - }{ - "empty": { - Headers: http.Header{}, - Name: name, - ExpectedPresent: false, - ExpectedToken: "", - }, - - "non-negotiate": { - Headers: http.Header{canonicalName: []string{"Basic"}}, - Name: name, - ExpectedPresent: false, - ExpectedToken: "", - }, - - "negotiate, no token": { - Headers: http.Header{canonicalName: []string{"Negotiate"}}, - Name: name, - ExpectedPresent: true, - ExpectedToken: "", - }, - "negotiate, case-insensitive": { - Headers: http.Header{canonicalName: []string{"negotiate"}}, - Name: name, - ExpectedPresent: true, - ExpectedToken: "", - }, - "negotiate, fallback from basic-auth": { - Headers: http.Header{canonicalName: []string{"Basic", "Negotiate"}}, - Name: name, - ExpectedPresent: true, - ExpectedToken: "", - }, - - "negotiate, with token": { - Headers: http.Header{canonicalName: []string{"Negotiate aGVsbG8="}}, - Name: name, - ExpectedPresent: true, - ExpectedToken: "hello", - }, - "negotiate, with token with whitespace": { - Headers: http.Header{canonicalName: []string{"Negotiate aGVs bG8="}}, - Name: name, - ExpectedPresent: true, - ExpectedToken: "hello", - }, - - "negotiate, with token needing no padding": { - Headers: http.Header{canonicalName: []string{"Negotiate cGFk"}}, - Name: name, - ExpectedPresent: true, - ExpectedToken: "pad", - }, - "negotiate, with token with 1 end-padding =": { - Headers: http.Header{canonicalName: []string{"Negotiate cGFkXzE="}}, - Name: name, - ExpectedPresent: true, - ExpectedToken: "pad_1", - }, - "negotiate, with token missing 1 end-padding =": { - Headers: http.Header{canonicalName: []string{"Negotiate cGFkXzE"}}, - Name: name, - ExpectedPresent: true, - ExpectedToken: "pad_1", - }, - "negotiate, with token with 2 end-padding =": { - Headers: http.Header{canonicalName: []string{"Negotiate cGFkX19fMg=="}}, - Name: name, - ExpectedPresent: true, - ExpectedToken: "pad___2", - }, - "negotiate, with token missing 2 end-padding =": { - Headers: http.Header{canonicalName: []string{"Negotiate cGFkX19fMg"}}, - Name: name, - ExpectedPresent: true, - ExpectedToken: "pad___2", - }, - - "negotiate, with invalid token": { - Headers: http.Header{canonicalName: []string{"Negotiate !@#$%"}}, - Name: name, - ExpectedPresent: false, - ExpectedToken: "", - }, - } - - for k, tc := range testcases { - present, token := CheckSPNEGONegotiate(lib, tc.Headers, tc.Name) - if present != tc.ExpectedPresent { - t.Errorf("%s: expected present=%v, got %v", k, tc.ExpectedPresent, present) - continue - } - if token.String() != tc.ExpectedToken { - t.Errorf("%s: expected token=%q, got %q", k, tc.ExpectedToken, token) - continue - } - } -} diff --git a/vendor/github.com/apcera/gssapi/status.go b/vendor/github.com/apcera/gssapi/status.go deleted file mode 100644 index ce14c2a143ca..000000000000 --- a/vendor/github.com/apcera/gssapi/status.go +++ /dev/null @@ -1,247 +0,0 @@ -// Copyright 2013-2015 Apcera Inc. All rights reserved. - -// GSS status and errors - -package gssapi - -/* -#include - -OM_uint32 -wrap_gss_display_status(void *fp, - OM_uint32 *minor_status, - OM_uint32 status_value, - int status_type, - const gss_OID mech_type, - OM_uint32 *message_context, - gss_buffer_t status_string) -{ - return ((OM_uint32(*)( - OM_uint32 *, - OM_uint32, - int, - const gss_OID, - OM_uint32 *, - gss_buffer_t) - )fp)(minor_status, - status_value, - status_type, - mech_type, - message_context, - status_string); -} - -*/ -import "C" - -import ( - "errors" - "fmt" - "strings" -) - -// Constant values are specified for C-language bindings in RFC 2744. -/* -""" - These errors are encoded into the 32-bit GSS status code as follows: - - MSB LSB - |------------------------------------------------------------| - | Calling Error | Routine Error | Supplementary Info | - |------------------------------------------------------------| - Bit 31 24 23 16 15 0 -""" - -Note that the first two fields hold integer consts, whereas Supplementary Info -is a bit-field. -*/ - -const ( - shiftCALLING = 24 - shiftROUTINE = 16 - maskCALLING = 0xFF000000 - maskROUTINE = 0x00FF0000 - maskSUPPINFO = 0x0000FFFF -) - -// Status values are returned by gssapi calls to indicate the result of a call. -// Declared according to: https://tools.ietf.org/html/rfc2743#page-17 -const ( - GSS_S_COMPLETE MajorStatus = 0 - - GSS_S_CALL_INACCESSIBLE_READ MajorStatus = 1 << shiftCALLING - GSS_S_CALL_INACCESSIBLE_WRITE = 2 << shiftCALLING - GSS_S_CALL_BAD_STRUCTURE = 3 << shiftCALLING - - GSS_S_BAD_MECH MajorStatus = 1 << shiftROUTINE - GSS_S_BAD_NAME = 2 << shiftROUTINE - GSS_S_BAD_NAMETYPE = 3 << shiftROUTINE - GSS_S_BAD_BINDINGS = 4 << shiftROUTINE - GSS_S_BAD_STATUS = 5 << shiftROUTINE - GSS_S_BAD_MIC = 6 << shiftROUTINE - GSS_S_BAD_SIG = 6 << shiftROUTINE // duplication deliberate - GSS_S_NO_CRED = 7 << shiftROUTINE - GSS_S_NO_CONTEXT = 8 << shiftROUTINE - GSS_S_DEFECTIVE_TOKEN = 9 << shiftROUTINE - GSS_S_DEFECTIVE_CREDENTIAL = 10 << shiftROUTINE - GSS_S_CREDENTIALS_EXPIRED = 11 << shiftROUTINE - GSS_S_CONTEXT_EXPIRED = 12 << shiftROUTINE - GSS_S_FAILURE = 13 << shiftROUTINE - GSS_S_BAD_QOP = 14 << shiftROUTINE - GSS_S_UNAUTHORIZED = 15 << shiftROUTINE - GSS_S_UNAVAILABLE = 16 << shiftROUTINE - GSS_S_DUPLICATE_ELEMENT = 17 << shiftROUTINE - GSS_S_NAME_NOT_MN = 18 << shiftROUTINE - - field_GSS_S_CONTINUE_NEEDED = 1 << 0 - field_GSS_S_DUPLICATE_TOKEN = 1 << 1 - field_GSS_S_OLD_TOKEN = 1 << 2 - field_GSS_S_UNSEQ_TOKEN = 1 << 3 - field_GSS_S_GAP_TOKEN = 1 << 4 -) - -// These are GSSAPI-defined: -// TODO: should MajorStatus be defined as C.OM_uint32? -type MajorStatus uint32 - -// CallingError is equivalent to C GSS_CALLING_ERROR() macro. -func (st MajorStatus) CallingError() MajorStatus { - return st & maskCALLING -} - -// RoutineError is equivalent to C GSS_ROUTINE_ERROR() macro. -func (st MajorStatus) RoutineError() MajorStatus { - return st & maskROUTINE -} - -// SupplementaryInfo is equivalent to C GSS_SUPPLEMENTARY_INFO() macro. -func (st MajorStatus) SupplementaryInfo() MajorStatus { - return st & maskSUPPINFO -} - -// IsError is equivalent to C GSS_ERROR() macro. Not written as 'Error' because -// that's special in Go conventions. (i.e. conforming to error interface) -func (st MajorStatus) IsError() bool { - return st&(maskCALLING|maskROUTINE) != 0 -} - -// ContinueNeeded is equivalent to a C bitfield set test against the -// GSS_S_CONTINUE_NEEDED macro. -func (st MajorStatus) ContinueNeeded() bool { - return st&field_GSS_S_CONTINUE_NEEDED != 0 -} - -// DuplicateToken is equivalent to a C bitfield set test against the -// GSS_S_DUPLICATE_TOKEN macro. -func (st MajorStatus) DuplicateToken() bool { - return st&field_GSS_S_DUPLICATE_TOKEN != 0 -} - -// OldToken is equivalent to a C bitfield set test against the -// GSS_S_OLD_TOKEN macro. -func (st MajorStatus) OldToken() bool { - return st&field_GSS_S_OLD_TOKEN != 0 -} - -// UnseqToken is equivalent to a C bitfield set test against the -// GSS_S_UNSEQ_TOKEN macro. -func (st MajorStatus) UnseqToken() bool { - return st&field_GSS_S_UNSEQ_TOKEN != 0 -} - -// GapToken is equivalent to a C bitfield set test against the -// GSS_S_GAP_TOKEN macro. -func (st MajorStatus) GapToken() bool { - return st&field_GSS_S_GAP_TOKEN != 0 -} - -// Error is designed to serve both as an error, and as a general gssapi status -// container. If Major is GSS_S_FAILURE, then information will be in Minor. -// The GoError method will return a nil if it doesn't represent a real error. -type Error struct { - // gssapi lib binding, so that we can convert the results of an - // operation to a string for diagnosis. - *Lib - - // Specified by gssapi - Major MajorStatus - - // Mechanism-specific: - Minor C.OM_uint32 -} - -// MakeError creates a golang Error object from a gssapi major & minor status. -func (lib *Lib) MakeError(major, minor C.OM_uint32) *Error { - return &Error{ - Lib: lib, - Major: MajorStatus(major), - Minor: minor, - } -} - -// ErrContinueNeeded may be returned by InitSecContext or AcceptSecContext to -// indicate that another iteration is needed -var ErrContinueNeeded = errors.New("continue needed") - -func (lib *Lib) stashLastStatus(major, minor C.OM_uint32) error { - lib.LastStatus = lib.MakeError(major, minor) - return lib.LastStatus.GoError() -} - -// GoError returns an untyped error interface object. -func (e *Error) GoError() error { - if e.Major.IsError() { - return e - } - return nil -} - -// Error returns a string representation of an Error object. -func (e *Error) Error() string { - messages := []string{} - nOther := 0 - context := C.OM_uint32(0) - inquiry := C.OM_uint32(0) - code_type := 0 - first := true - - if e.Major.RoutineError() == GSS_S_FAILURE { - inquiry = e.Minor - code_type = GSS_C_MECH_CODE - } else { - inquiry = C.OM_uint32(e.Major) - code_type = GSS_C_GSS_CODE - } - - for first || context != C.OM_uint32(0) { - first = false - min := C.OM_uint32(0) - - b, err := e.MakeBuffer(allocGSSAPI) - if err != nil { - break - } - - // TODO: store a mech_type at the lib level? Or context? For now GSS_C_NO_OID... - maj := C.wrap_gss_display_status( - e.Fp_gss_display_status, - &min, - inquiry, - C.int(code_type), - nil, - &context, - b.C_gss_buffer_t) - - err = e.MakeError(maj, min).GoError() - if err != nil { - nOther = nOther + 1 - } - messages = append(messages, b.String()) - b.Release() - } - if nOther > 0 { - messages = append(messages, fmt.Sprintf("additionally, %d conversions failed", nOther)) - } - messages = append(messages, "") - return strings.Join(messages, "\n") -} diff --git a/vendor/github.com/apcera/gssapi/status_test.go b/vendor/github.com/apcera/gssapi/status_test.go deleted file mode 100644 index d2b127192995..000000000000 --- a/vendor/github.com/apcera/gssapi/status_test.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2013-2015 Apcera Inc. All rights reserved. - -package gssapi - -import ( - "testing" -) - -func TestStatus(t *testing.T) { - l, err := testLoad() - if err != nil { - t.Error(err) - return - } - defer l.Unload() - -} diff --git a/vendor/github.com/apcera/gssapi/test/client_access_test.go b/vendor/github.com/apcera/gssapi/test/client_access_test.go deleted file mode 100644 index 7a4a50066019..000000000000 --- a/vendor/github.com/apcera/gssapi/test/client_access_test.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2013-2015 Apcera Inc. All rights reserved. - -// +build clienttest - -package test - -import ( - "bytes" - "io" - "io/ioutil" - "net/http" - "net/http/httputil" - "strings" - "testing" - - "github.com/apcera/gssapi" - "github.com/apcera/gssapi/spnego" -) - -func initClientContext(t *testing.T, method, path string, - bodyf func(ctx *gssapi.CtxId) string) ( - ctx *gssapi.CtxId, r *http.Request) { - // establish a context - ctx, _, token, _, _, err := c.InitSecContext( - c.GSS_C_NO_CREDENTIAL, - nil, - prepareServiceName(t), - c.GSS_C_NO_OID, - 0, - 0, - c.GSS_C_NO_CHANNEL_BINDINGS, - c.GSS_C_NO_BUFFER) - defer token.Release() - if err != nil { - e, ok := err.(*gssapi.Error) - if ok && e.Major.ContinueNeeded() { - t.Fatal("Unexpected GSS_S_CONTINUE_NEEDED") - } - t.Fatal(err) - } - - u := c.ServiceAddress + path - if !strings.HasPrefix(u, "http://") { - u = "http://" + u - } - - body := io.Reader(nil) - if bodyf != nil { - body = bytes.NewBufferString(bodyf(ctx)) - } - - r, err = http.NewRequest(method, u, body) - if err != nil { - t.Fatal(err) - } - spnego.AddSPNEGONegotiate(r.Header, "Authorization", token) - - return ctx, r -} - -func TestClientAccess(t *testing.T) { - // establish a context - ctx, r := initClientContext(t, "GET", "/access/", nil) - defer ctx.Release() - - resp, err := http.DefaultClient.Do(r) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - - out, err := httputil.DumpResponse(resp, true) - if err != nil { - t.Fatal(err) - } - - bodybytes, err := ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatal(err) - } - if string(bodybytes) != "OK" { - t.Fatalf( - "Test failed: unexpected response: url:%s, code:%v, response:\n%s", - r.URL.String(), resp.StatusCode, string(out)) - } -} diff --git a/vendor/github.com/apcera/gssapi/test/client_inquire_context_test.go b/vendor/github.com/apcera/gssapi/test/client_inquire_context_test.go deleted file mode 100644 index 4cbec83691c0..000000000000 --- a/vendor/github.com/apcera/gssapi/test/client_inquire_context_test.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2013-2015 Apcera Inc. All rights reserved. - -// +build clienttest - -package test - -import ( - "fmt" - "io/ioutil" - "net/http" - "regexp" - "strings" - "testing" -) - -func verifyInquireContextResult(t *testing.T, result string, regexps []string) { - rr := strings.Split(result, " ") - if len(rr) != len(regexps) { - t.Fatalf("got %v fragments, expected %v (%s)", len(rr), len(regexps), result) - } - - for i, r := range rr { - rx := regexp.MustCompile(regexps[i]) - if !rx.MatchString(r) { - t.Errorf("%s does not match %s", r, regexps[i]) - } - } -} - -func TestClientInquireContext(t *testing.T) { - ctx, r := initClientContext(t, "GET", "/inquire_context/", nil) - defer ctx.Release() - - srcName, targetName, lifetimeRec, mechType, ctxFlags, - locallyInitiated, open, err := ctx.InquireContext() - if err != nil { - t.Fatal(err) - } - defer srcName.Release() - defer targetName.Release() - - verifyInquireContextResult(t, - fmt.Sprintf("%q %q %v %q %x %v %v", - srcName, targetName, lifetimeRec, mechType.DebugString(), ctxFlags, - locallyInitiated, open), - []string{ - `"[a-zA-Z_-]+@[[:graph:]]+"`, - `"HTTP/[[:graph:]]+@[[:graph:]]+"`, - `[0-9a-z]+`, - `[A-Z]+`, - "1b0", - "true", - "true", - }) - - resp, err := http.DefaultClient.Do(r) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - t.Fatalf("Expected status 200, got %v", resp.StatusCode) - } - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatal(err) - } - verifyInquireContextResult(t, string(body), - []string{ - `"[a-zA-Z_-]+@[[:graph:]]+"`, - `"HTTP/[[:graph:]]+@[[:graph:]]+"`, - `[0-9a-z]+`, - `[A-Z]+`, - "130", - "false", - "true", - }) -} diff --git a/vendor/github.com/apcera/gssapi/test/client_message_test.go b/vendor/github.com/apcera/gssapi/test/client_message_test.go deleted file mode 100644 index 18f75b0a477d..000000000000 --- a/vendor/github.com/apcera/gssapi/test/client_message_test.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2013-2015 Apcera Inc. All rights reserved. - -// +build clienttest - -package test - -import ( - "encoding/base64" - "io/ioutil" - "net/http" - "testing" - - "github.com/apcera/gssapi" -) - -func TestClientWrap(t *testing.T) { - b := "test message in body" - - bodyf := func(ctx *gssapi.CtxId) string { - // Wrap and send a message to the service - buf, err := c.MakeBufferString(b) - if err != nil { - t.Fatal(err) - } - defer buf.Release() - - _, wrapped, err := ctx.Wrap(true, gssapi.GSS_C_QOP_DEFAULT, buf) - if err != nil { - t.Fatal(err) - } - defer wrapped.Release() - - return base64.StdEncoding.EncodeToString(wrapped.Bytes()) - } - - ctx, r := initClientContext(t, "POST", "/unwrap/", bodyf) - defer ctx.Release() - - resp, err := http.DefaultClient.Do(r) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - - // if successful, the response body is the same message, re-wrapped by - // the service, unwrap and compare - wrapped64bytes, err := ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatal(err) - } - wrappedbytes, err := base64.StdEncoding.DecodeString(string(wrapped64bytes)) - if err != nil { - t.Fatal(err) - } - wrapped, err := c.MakeBufferBytes(wrappedbytes) - if err != nil { - t.Fatal(err) - } - defer wrapped.Release() - unwrapped, _, _, err := ctx.Unwrap(wrapped) - if err != nil { - t.Fatal(err) - } - defer unwrapped.Release() - - if unwrapped.String() != b { - t.Fatalf("Got %q, expected %q", unwrapped.String(), b) - } -} - -func TestClientMIC(t *testing.T) { - b := "test message in body" - - ctx, r := initClientContext(t, "POST", "/verify_mic/", - func(ctx *gssapi.CtxId) string { - return b - }) - defer ctx.Release() - - body, err := c.MakeBufferString(b) - if err != nil { - t.Fatal(err) - } - defer body.Release() - - mic, err := ctx.GetMIC(gssapi.GSS_C_QOP_DEFAULT, body) - if err != nil { - t.Fatal(err) - } - defer mic.Release() - - r.Header.Set(micHeader, - base64.StdEncoding.EncodeToString(mic.Bytes())) - - resp, err := http.DefaultClient.Do(r) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - t.Fatalf("Expected %v, got %v", http.StatusOK, resp.StatusCode) - } -} diff --git a/vendor/github.com/apcera/gssapi/test/docker/client/Dockerfile b/vendor/github.com/apcera/gssapi/test/docker/client/Dockerfile deleted file mode 100644 index 65b1b98239c4..000000000000 --- a/vendor/github.com/apcera/gssapi/test/docker/client/Dockerfile +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright 2013-2015 Apcera Inc. All rights reserved. - -FROM ubuntu:14.04 -RUN apt-get -y update -RUN apt-get -y install \ - gcc \ - krb5-user \ - libgssapi-krb5-2 \ - libkrb5-dev \ - libsasl2-modules-gssapi-mit \ - wget -RUN (cd /tmp && wget https://storage.googleapis.com/golang/go1.5.linux-amd64.tar.gz && tar xvf go1.5.linux-amd64.tar.gz && mv go/ /opt) -ENV GOROOT /opt/go - -ADD krb5.conf.template /tmp/krb5.conf.template -ENV KRB5_CONFIG_TEMPLATE /tmp/krb5.conf.template -ENV KRB5_CONFIG /opt/go-gssapi-test-client/krb5.conf -ENV GSSAPI_PATH /usr/lib/x86_64-linux-gnu/libgssapi_krb5.so.2 -ENV TEST_DIR /opt/go-gssapi-test-client -ADD entrypoint.sh /opt/go-gssapi-test-client/entrypoint.sh -ENTRYPOINT /opt/go-gssapi-test-client/entrypoint.sh diff --git a/vendor/github.com/apcera/gssapi/test/docker/client/entrypoint.sh b/vendor/github.com/apcera/gssapi/test/docker/client/entrypoint.sh deleted file mode 100755 index 4eda1257723a..000000000000 --- a/vendor/github.com/apcera/gssapi/test/docker/client/entrypoint.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/bin/bash -eu - -# Copyright 2013-2015 Apcera Inc. All rights reserved. - -# This script is used in the context of a docker VM when runnning the linux -# client test, and in the context of OS X when running on the Macintosh. The -# following variables must be set (either via --link or explicitely) -# KDC_PORT_88_TCP_ADDR -# KDC_PORT_88_TCP_PORT -# KDC address and port -# -# SERVICE_PORT_80_TCP_ADDR -# SERVICE_PORT_80_TCP_PORT -# Http service address and port -# -# KRB5_CONFIG_TEMPLATE -# KRB5_CONFIG -# The locations of the krb5.conf template, and where the -# processed file must go -# -# GSSAPI_PATH -# The gssapi .so -# -# TEST_DIR -# The directory to build the client test app in -# -# SERVICE_NAME -# REALM_NAME -# DOMAIN_NAME -# USER_NAME -# USER_PASSWORD - -export PATH=$PATH:$GOROOT/bin - -cat $KRB5_CONFIG_TEMPLATE \ - | sed -e "s/KDC_ADDRESS/$KDC_PORT_88_TCP_ADDR:$KDC_PORT_88_TCP_PORT/g" \ - | sed -e "s/DOMAIN_NAME/${DOMAIN_NAME}/g" \ - | sed -e "s/REALM_NAME/${REALM_NAME}/g" \ - > $KRB5_CONFIG - -echo ${USER_PASSWORD} | kinit -V ${USER_NAME}@${REALM_NAME} >/dev/null - -(cd $TEST_DIR && go test -c -o test -tags 'clienttest' github.com/apcera/gssapi/test) - -# --test.bench=. -# --test.benchtime=2s -$TEST_DIR/test \ - --test.v=true \ - --debug=true \ - --service-name=${SERVICE_NAME} \ - --service-address=$SERVICE_PORT_80_TCP_ADDR:$SERVICE_PORT_80_TCP_PORT \ - --gssapi-path=$GSSAPI_PATH \ - 2>&1 diff --git a/vendor/github.com/apcera/gssapi/test/docker/client/krb5.conf.template b/vendor/github.com/apcera/gssapi/test/docker/client/krb5.conf.template deleted file mode 100644 index 39f16a58e24e..000000000000 --- a/vendor/github.com/apcera/gssapi/test/docker/client/krb5.conf.template +++ /dev/null @@ -1,14 +0,0 @@ -# REALM_NAME, KDC_ADDRESS, and DOMAIN_NAME will be replaced by the script - -[libdefaults] - default_realm = REALM_NAME - noaddresses = true - -[realms] - REALM_NAME = { - kdc = KDC_ADDRESS - } - -[domain_realm] - DOMAIN_NAME = REALM_NAME - .DOMAIN_NAME = REALM_NAME diff --git a/vendor/github.com/apcera/gssapi/test/docker/kdc/Dockerfile b/vendor/github.com/apcera/gssapi/test/docker/kdc/Dockerfile deleted file mode 100644 index debf95df6963..000000000000 --- a/vendor/github.com/apcera/gssapi/test/docker/kdc/Dockerfile +++ /dev/null @@ -1,10 +0,0 @@ -FROM ubuntu:14.04 -ADD krb5.conf /etc/krb5.conf - -RUN apt-get -y update -RUN apt-get -y install heimdal-kdc - -ADD entrypoint.sh /etc/docker-kdc/entrypoint.sh -EXPOSE 88 -ENTRYPOINT /etc/docker-kdc/entrypoint.sh - diff --git a/vendor/github.com/apcera/gssapi/test/docker/kdc/entrypoint.sh b/vendor/github.com/apcera/gssapi/test/docker/kdc/entrypoint.sh deleted file mode 100755 index 146ce4958b8c..000000000000 --- a/vendor/github.com/apcera/gssapi/test/docker/kdc/entrypoint.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -eu - -# Add kerberos principals. -echo -e "\n\n\n\n\n\n${USER_PASSWORD}\n${USER_PASSWORD}\n" | kadmin -l add ${SERVICE_NAME}@${REALM_NAME} -echo -e "\n\n\n\n\n\n${USER_PASSWORD}\n${USER_PASSWORD}\n" | kadmin -l add ${USER_NAME}@${REALM_NAME} -#kadmin -l list --long ${USER_NAME}@${REALM_NAME} -#kadmin -l list --long ${SERVICE_NAME}@${REALM_NAME} - -# Export keytab. -kadmin -l ext_keytab -k /etc/docker-kdc/krb5.keytab ${SERVICE_NAME}@${REALM_NAME} - -# KDC daemon startup. -#TODO -- what's relevant in this config? Need to provide my own? -exec /usr/lib/heimdal-servers/kdc --config-file=/etc/heimdal-kdc/kdc.conf -P 88 - - diff --git a/vendor/github.com/apcera/gssapi/test/docker/kdc/krb5.conf.template b/vendor/github.com/apcera/gssapi/test/docker/kdc/krb5.conf.template deleted file mode 100644 index 39f16a58e24e..000000000000 --- a/vendor/github.com/apcera/gssapi/test/docker/kdc/krb5.conf.template +++ /dev/null @@ -1,14 +0,0 @@ -# REALM_NAME, KDC_ADDRESS, and DOMAIN_NAME will be replaced by the script - -[libdefaults] - default_realm = REALM_NAME - noaddresses = true - -[realms] - REALM_NAME = { - kdc = KDC_ADDRESS - } - -[domain_realm] - DOMAIN_NAME = REALM_NAME - .DOMAIN_NAME = REALM_NAME diff --git a/vendor/github.com/apcera/gssapi/test/docker/service/Dockerfile b/vendor/github.com/apcera/gssapi/test/docker/service/Dockerfile deleted file mode 100644 index 0e896df40fd9..000000000000 --- a/vendor/github.com/apcera/gssapi/test/docker/service/Dockerfile +++ /dev/null @@ -1,17 +0,0 @@ -FROM ubuntu:14.04 -RUN apt-get -y update -RUN apt-get -y install \ - gcc \ - libgssapi-krb5-2 \ - libkrb5-dev \ - libsasl2-modules-gssapi-mit \ - wget - -RUN (cd /tmp && wget https://storage.googleapis.com/golang/go1.5.linux-amd64.tar.gz && tar xvf go1.5.linux-amd64.tar.gz && mv go/ /opt) -ENV GOROOT="/opt/go" -ADD krb5.keytab /opt/go-gssapi-test-service/krb5.keytab -ADD krb5.conf.template /tmp/krb5.conf.template -ADD entrypoint.sh /opt/go-gssapi-test-service/entrypoint.sh - -EXPOSE 80 -ENTRYPOINT /opt/go-gssapi-test-service/entrypoint.sh diff --git a/vendor/github.com/apcera/gssapi/test/docker/service/entrypoint.sh b/vendor/github.com/apcera/gssapi/test/docker/service/entrypoint.sh deleted file mode 100755 index c13a500fc10d..000000000000 --- a/vendor/github.com/apcera/gssapi/test/docker/service/entrypoint.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash -eu - -export PATH=$PATH:$GOROOT/bin - -cat /tmp/krb5.conf.template \ - | sed -e "s/KDC_ADDRESS/$KDC_PORT_88_TCP_ADDR:$KDC_PORT_88_TCP_PORT/g" \ - | sed -e "s/DOMAIN_NAME/${DOMAIN_NAME}/g" \ - | sed -e "s/REALM_NAME/${REALM_NAME}/g" \ - > /opt/go-gssapi-test-service/krb5.conf - -(cd /opt/go-gssapi-test-service && go test -c -o test -tags 'servicetest' github.com/apcera/gssapi/test) - -exec /opt/go-gssapi-test-service/test \ - --test.v=true \ - --debug=true \ - --service=true \ - --service-name=${SERVICE_NAME} \ - --service-address=:80 \ - --gssapi-path=/usr/lib/x86_64-linux-gnu/libgssapi_krb5.so.2 \ - --krb5-ktname=/opt/go-gssapi-test-service/krb5.keytab \ - --krb5-config=/opt/go-gssapi-test-service/krb5.conf \ - 2>&1 diff --git a/vendor/github.com/apcera/gssapi/test/docker/service/krb5.conf.template b/vendor/github.com/apcera/gssapi/test/docker/service/krb5.conf.template deleted file mode 100644 index 39f16a58e24e..000000000000 --- a/vendor/github.com/apcera/gssapi/test/docker/service/krb5.conf.template +++ /dev/null @@ -1,14 +0,0 @@ -# REALM_NAME, KDC_ADDRESS, and DOMAIN_NAME will be replaced by the script - -[libdefaults] - default_realm = REALM_NAME - noaddresses = true - -[realms] - REALM_NAME = { - kdc = KDC_ADDRESS - } - -[domain_realm] - DOMAIN_NAME = REALM_NAME - .DOMAIN_NAME = REALM_NAME diff --git a/vendor/github.com/apcera/gssapi/test/main_test.go b/vendor/github.com/apcera/gssapi/test/main_test.go deleted file mode 100644 index b94b04605b11..000000000000 --- a/vendor/github.com/apcera/gssapi/test/main_test.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2013-2015 Apcera Inc. All rights reserved. - -package test - -import ( - "encoding/json" - "flag" - "fmt" - "log" - "os" - "sync" - "testing" - - "github.com/apcera/gssapi" -) - -const ( - micHeader = "X-go-gssapi-test-mic" -) - -type Context struct { - DebugLog bool - RunAsService bool - ServiceName string - ServiceAddress string - - gssapi.Options - - *gssapi.Lib `json:"-"` - loadonce sync.Once - - // Service credentials loaded from keytab - credential *gssapi.CredId -} - -var c = &Context{} - -func init() { - flag.BoolVar(&c.DebugLog, "debug", false, "Output debug log") - flag.BoolVar(&c.RunAsService, "service", false, "Stay running as sample service after executing the tests") - flag.StringVar(&c.ServiceName, "service-name", "SampleService", "service name") - flag.StringVar(&c.ServiceAddress, "service-address", ":8080", "service address hostname:port") - flag.StringVar(&c.Options.LibPath, "gssapi-path", "", "use the specified path to libgssapi shared object") - flag.StringVar(&c.Options.Krb5Ktname, "krb5-ktname", "", "path to the keytab file") - flag.StringVar(&c.Options.Krb5Config, "krb5-config", "", "path to krb5.config file") -} - -func TestMain(m *testing.M) { - flag.Parse() - prefix := "go-gssapi-test-client" - if c.RunAsService { - prefix = "go-gssapi-test-service" - } - lib, err := loadlib(c.DebugLog, prefix) - if err != nil { - log.Fatal(err) - } - c.Lib = lib - - j, _ := json.MarshalIndent(c, "", " ") - c.Debug(fmt.Sprintf("Config: %s", string(j))) - - code := m.Run() - if code != 0 { - os.Exit(code) - } - - if c.RunAsService { - log.Fatal(Service(c)) - } -} - -func loadlib(debug bool, prefix string) (*gssapi.Lib, error) { - max := gssapi.Err + 1 - if debug { - max = gssapi.MaxSeverity - } - pp := make([]gssapi.Printer, 0, max) - for i := gssapi.Severity(0); i < max; i++ { - p := log.New(os.Stderr, - fmt.Sprintf("%s: %s\t", prefix, i), - log.LstdFlags) - pp = append(pp, p) - } - c.Options.Printers = pp - - lib, err := gssapi.Load(&c.Options) - if err != nil { - return nil, err - } - return lib, nil -} - -func prepareServiceName(t *testing.T) *gssapi.Name { - if c.ServiceName == "" { - t.Fatal("Need a --service-name") - } - - nameBuf, err := c.MakeBufferString(c.ServiceName) - if err != nil { - t.Fatal(err) - } - defer nameBuf.Release() - - name, err := nameBuf.Name(c.GSS_KRB5_NT_PRINCIPAL_NAME) - if err != nil { - t.Fatal(err) - } - if name.String() != c.ServiceName { - t.Fatalf("name: got %q, expected %q", name.String(), c.ServiceName) - } - - return name -} diff --git a/vendor/github.com/apcera/gssapi/test/run-heimdal.sh b/vendor/github.com/apcera/gssapi/test/run-heimdal.sh deleted file mode 100755 index 603214a7bacd..000000000000 --- a/vendor/github.com/apcera/gssapi/test/run-heimdal.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -eu - -# Copyright 2013-2015 Apcera Inc. All rights reserved. - -REUSE_DOCKER_IMAGES="" \ -SERVICE_LOG_FILTER="" \ -EXT_KDC_HOST="" \ -EXT_KDC_PORT="" \ -KEYTAB_FILE="" \ -SERVICE_NAME="HTTP/auth.www.xample.test" \ -REALM_NAME="XAMPLE.TEST" \ -DOMAIN_NAME="xample.test" \ -USER_NAME="testuser" \ -USER_PASSWORD="P@ssword!" \ -CLIENT_IN_CONTAINER="yes" \ - ./run.sh - - diff --git a/vendor/github.com/apcera/gssapi/test/run.sh b/vendor/github.com/apcera/gssapi/test/run.sh deleted file mode 100755 index ecda43a0390a..000000000000 --- a/vendor/github.com/apcera/gssapi/test/run.sh +++ /dev/null @@ -1,209 +0,0 @@ -#!/bin/bash -eu - -# Copyright 2013-2015 Apcera Inc. All rights reserved. - -result="NOT OK FAILED" - -# boot2docker doesn't seem to like /tmp so use the home direcotry for the build -BASE_DIR="$(cd .. && pwd)" -export TEST_DIR="$HOME/tmp/$(uuidgen)" -mkdir -p -- "$TEST_DIR" -cp -R "$BASE_DIR" "$TEST_DIR" -DOCKER_DIR="$TEST_DIR/gssapi/test/docker" - -if [[ "$OSTYPE" == "darwin"* ]]; then - DOCKER=docker -else - DOCKER='sudo docker' -fi - -function log() { - printf "go-gssapi-test: %s\n" "$*" >&2 -} - -function cleanup_containers() { - log "Clean up running containers" - running=`$DOCKER ps --all | grep 'go-gssapi-test' | awk '{print $1}'` - if [[ "$running" != "" ]]; then - echo $running | xargs $DOCKER stop >/dev/null - echo $running | xargs $DOCKER rm >/dev/null - fi -} - -function cleanup() { - set +e - - if [[ "${EXT_KDC_HOST:-}" == "" ]]; then - log "kdc logs: - -" - $DOCKER logs kdc 2>&1 - fi - - log "service logs: - -" - if [[ "${SERVICE_LOG_FILTER:-}" != "" ]]; then - $DOCKER logs service 2>&1 | egrep -v "gssapi-sample:\t[0-9 /:]+ ACCESS " - else - $DOCKER logs service 2>&1 - fi - - cleanup_containers - - log "Clean up build directory" - rm -rf -- "${TEST_DIR:?}" - - log $result -} - -function build_image() { - comp="$1" - name="$2" - func="$3" - img="go-gssapi-test-${name}" - image="$($DOCKER images --quiet ${img})" - - if [[ "${REUSE_DOCKER_IMAGES:-}" != "" && "$image" != "" ]]; then - log "Reuse cached docker image ${img} ${image}" - else - log "Build docker image ${img}" - if [[ "$func" != "" ]]; then - (${func}) - fi - - $DOCKER build \ - --rm \ - --quiet \ - --tag=${img} \ - "$DOCKER_DIR/${comp}" - fi -} - -# Caveat: Quote characters in USER_PASSWORD may cause Severe Pain. -# Don't do that. -# This only has to handle Docker tests, not quite the Real World, -# so we can get away with this restriction. -# -function run_image() { - comp="$1" - name="$2" - options="$3" - img="go-gssapi-test-${name}" - log "Run docker image ${img}" - options="${options} \ - --hostname=${comp} \ - --name=${comp} \ - --env SERVICE_NAME=${SERVICE_NAME} \ - --env USER_NAME=${USER_NAME} \ - --env USER_PASSWORD=${USER_PASSWORD} \ - --env REALM_NAME=${REALM_NAME} \ - --env DOMAIN_NAME=${DOMAIN_NAME}" - $DOCKER run -P ${options} ${img} -} - -function map_ports() { - comp="$1" - port="$2" - COMP="$(printf "%s\n" "$comp" | tr '[:lower:]' '[:upper:]')" - if [[ "${OSTYPE}" == "darwin"* ]]; then - b2d_ip=$(docker-machine ip default) - export ${COMP}_PORT_${port}_TCP_ADDR=${b2d_ip} - else - export ${COMP}_PORT_${port}_TCP_ADDR=127.0.0.1 - fi - export ${COMP}_PORT_${port}_TCP_PORT=$($DOCKER port ${comp} ${port} | cut -f2 -d ':') -} - -function wait_until_available() { - comp="$1" - addr="$2" - port="$3" - - let i=1 - while ! echo exit | nc $addr $port >/dev/null; do - echo "Waiting for $comp to start" - sleep 1 - let i++ - if (( i > 10 )); then - echo "Timed out waiting for ${comp} to start at ${addr}:${port}" - exit 1 - fi - done -} - -# Cleanup -trap 'cleanup' INT TERM EXIT -cleanup_containers - -env_suffix=$(/bin/echo "${REALM_NAME}-${SERVICE_NAME}" | shasum | cut -f1 -d ' ') - -# KDC -if [[ "${EXT_KDC_HOST}" == "" ]]; then - cat "$DOCKER_DIR/kdc/krb5.conf.template" \ - | sed -e "s/KDC_ADDRESS/0.0.0.0:88/g" \ - | sed -e "s/DOMAIN_NAME/${DOMAIN_NAME}/g" \ - | sed -e "s/REALM_NAME/${REALM_NAME}/g" \ - > "$DOCKER_DIR/kdc/krb5.conf" - - build_image "kdc" "kdc-${env_suffix}" "" >/dev/null - run_image "kdc" "kdc-${env_suffix}" "--detach" >/dev/null - map_ports "kdc" 88 -else - export KDC_PORT_88_TCP_ADDR=${EXT_KDC_HOST} - export KDC_PORT_88_TCP_PORT=${EXT_KDC_PORT} -fi -wait_until_available "kdc" $KDC_PORT_88_TCP_ADDR $KDC_PORT_88_TCP_PORT - -function keytab_from_kdc() { - $DOCKER cp kdc:/etc/docker-kdc/krb5.keytab "$DOCKER_DIR/service" -} - -function keytab_from_options() { - cp "${KEYTAB_FILE}" "$DOCKER_DIR/service/krb5.keytab" -} - -if [[ "${EXT_KDC_HOST:-}" == "" ]]; then - DOCKER_KDC_OPTS='--link=kdc:kdc' - KEYTAB_FUNCTION='keytab_from_kdc' -else - DOCKER_KDC_OPTS="--env KDC_PORT_88_TCP_ADDR=${EXT_KDC_HOST} \ - --env KDC_PORT_88_TCP_PORT=${EXT_KDC_PORT}" - KEYTAB_FUNCTION='keytab_from_options' -fi - -# GSSAPI service -log "Build and unit-test gssapi on host" -go test github.com/apcera/gssapi - -build_image "service" "service-${env_suffix}" "$KEYTAB_FUNCTION" >/dev/null -run_image "service" \ - "service-${env_suffix}" \ - "--detach \ - $DOCKER_KDC_OPTS \ - --volume $TEST_DIR/gssapi:/opt/go/src/github.com/apcera/gssapi" >/dev/null -map_ports "service" 80 -wait_until_available "service" $SERVICE_PORT_80_TCP_ADDR $SERVICE_PORT_80_TCP_PORT - -# GSSAPI client -if [[ "$OSTYPE" != "darwin"* || "$CLIENT_IN_CONTAINER" != "" ]]; then - build_image "client" "client" "" >/dev/null - run_image "client" \ - "client" \ - "--link=service:service \ - $DOCKER_KDC_OPTS \ - --volume $TEST_DIR/gssapi:/opt/go/src/github.com/apcera/gssapi" -else - log "Run gssapi sample client on host" - KRB5_CONFIG_TEMPLATE=${DOCKER_DIR}/client/krb5.conf.template \ - DOMAIN_NAME="${DOMAIN_NAME}" \ - GSSAPI_PATH=/opt/local/lib/libgssapi_krb5.dylib \ - KRB5_CONFIG="${TEST_DIR}/krb5.conf" \ - REALM_NAME="${REALM_NAME}" \ - SERVICE_NAME="${SERVICE_NAME}" \ - USER_NAME="${USER_NAME}" \ - USER_PASSWORD="${USER_PASSWORD}" \ - "${DOCKER_DIR}/client/entrypoint.sh" -fi - -result="OK TEST PASSED" diff --git a/vendor/github.com/apcera/gssapi/test/service.go b/vendor/github.com/apcera/gssapi/test/service.go deleted file mode 100644 index d6bd78a7873a..000000000000 --- a/vendor/github.com/apcera/gssapi/test/service.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2013-2015 Apcera Inc. All rights reserved. - -package test - -import ( - "fmt" - "net/http" - "os" - - "github.com/apcera/gssapi" -) - -type loggingHandler struct { - *Context - handler func(*Context, http.ResponseWriter, *http.Request) (code int, message string) -} - -func (h loggingHandler) ServeHTTP(rw http.ResponseWriter, r *http.Request) { - code, message := h.handler(h.Context, rw, r) - - severity := gssapi.Info - if code != http.StatusOK { - severity = gssapi.Err - rw.WriteHeader(code) - } - h.Print(severity, fmt.Sprintf( - "%d %q %q %q", code, r.Method, r.URL.String(), message)) -} - -func Service(c *Context) error { - if c.ServiceName == "" { - return fmt.Errorf("Must provide a non-empty value for --service-name") - } - c.Debug(fmt.Sprintf("Starting service %q", c.ServiceName)) - - nameBuf, err := c.MakeBufferString(c.ServiceName) - if err != nil { - return err - } - defer nameBuf.Release() - - name, err := nameBuf.Name(c.GSS_KRB5_NT_PRINCIPAL_NAME) - if err != nil { - return err - } - defer name.Release() - - cred, actualMechs, _, err := c.AcquireCred(name, - gssapi.GSS_C_INDEFINITE, c.GSS_C_NO_OID_SET, gssapi.GSS_C_ACCEPT) - actualMechs.Release() - if err != nil { - return err - } - c.credential = cred - - keytab := os.Getenv("KRB5_KTNAME") - if keytab == "" { - keytab = "default /etc/krb5.keytab" - } - c.Debug(fmt.Sprintf("Acquired credentials using %v", keytab)) - - http.Handle("/access/", loggingHandler{c, HandleAccess}) - http.Handle("/verify_mic/", loggingHandler{c, HandleVerifyMIC}) - http.Handle("/unwrap/", loggingHandler{c, HandleUnwrap}) - http.Handle("/inquire_context/", loggingHandler{c, HandleInquireContext}) - - err = http.ListenAndServe(c.ServiceAddress, nil) - if err != nil { - return err - } - - // this isn't executed since the entire container is killed, but for - // illustration purposes - c.credential.Release() - - return nil -} diff --git a/vendor/github.com/apcera/gssapi/test/service_access.go b/vendor/github.com/apcera/gssapi/test/service_access.go deleted file mode 100644 index ae959163e9a2..000000000000 --- a/vendor/github.com/apcera/gssapi/test/service_access.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2013-2015 Apcera Inc. All rights reserved. - -package test - -import ( - "net/http" - - "github.com/apcera/gssapi" - "github.com/apcera/gssapi/spnego" -) - -func HandleAccess(c *Context, w http.ResponseWriter, r *http.Request) (code int, message string) { - ctx, code, message := allowed(c, w, r) - if ctx == nil { - return code, message - } - - w.Write([]byte("OK")) - return http.StatusOK, "OK" -} - -// allowed implements the SPNEGO protocol. When the request is to be passed -// through, it returns http.StatusOK and a valid gssapi CtxId object. -// Otherwise, it sets the WWW-Authorization header as applicable, and returns -// http.StatusUnathorized. -func allowed(c *Context, w http.ResponseWriter, r *http.Request) ( - ctx *gssapi.CtxId, code int, message string) { - - // returning a 401 with a challenge, but no token will make the client - // initiate security context and re-submit with a non-empty Authorization - negotiate, inputToken := spnego.CheckSPNEGONegotiate(c.Lib, r.Header, "Authorization") - if !negotiate || inputToken.Length() == 0 { - spnego.AddSPNEGONegotiate(w.Header(), "WWW-Authenticate", nil) - return nil, http.StatusUnauthorized, "no input token provided" - } - - ctx, srcName, _, outputToken, _, _, delegatedCredHandle, err := - c.AcceptSecContext(c.GSS_C_NO_CONTEXT, - c.credential, inputToken, c.GSS_C_NO_CHANNEL_BINDINGS) - - //TODO: special case handling of GSS_S_CONTINUE_NEEDED - // but it doesn't change the logic, still fail - if err != nil { - //TODO: differentiate invalid tokens here and return a 403 - //TODO: add a test for a bad and maybe an expired auth tokens - return nil, http.StatusInternalServerError, err.Error() - } - - srcName.Release() - delegatedCredHandle.Release() - - spnego.AddSPNEGONegotiate(w.Header(), "WWW-Authenticate", outputToken) - return ctx, http.StatusOK, "pass" -} diff --git a/vendor/github.com/apcera/gssapi/test/service_credential_test.go b/vendor/github.com/apcera/gssapi/test/service_credential_test.go deleted file mode 100644 index dae8b40aa17f..000000000000 --- a/vendor/github.com/apcera/gssapi/test/service_credential_test.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2013-2015 Apcera Inc. All rights reserved. - -//+build servicetest - -package test - -// test the credentials APIs with a keytab, configured against a real KDC - -import ( - "strings" - "testing" - "time" - - "github.com/apcera/gssapi" -) - -func TestAcquireCredential(t *testing.T) { - name := prepareServiceName(t) - defer name.Release() - if name.String() != c.ServiceName { - t.Fatalf("name: got %q, expected %q", name.String(), c.ServiceName) - } - - mechs, err := c.MakeOIDSet(c.GSS_MECH_KRB5) - if err != nil { - t.Fatal(err) - } - defer mechs.Release() - - cred, actualMechs, timeRec, err := c.AcquireCred(name, - gssapi.GSS_C_INDEFINITE, mechs, gssapi.GSS_C_ACCEPT) - defer cred.Release() - defer actualMechs.Release() - verifyCred(t, cred, actualMechs, timeRec, err) -} - -func TestAddCredential(t *testing.T) { - name := prepareServiceName(t) - defer name.Release() - if name.String() != c.ServiceName { - t.Fatalf("name: got %q, expected %q", name.String(), c.ServiceName) - } - - mechs, err := c.MakeOIDSet(c.GSS_MECH_KRB5) - if err != nil { - t.Fatal(err) - } - defer mechs.Release() - - cred := c.NewCredId() - cred, actualMechs, _, acceptorTimeRec, err := c.AddCred( - cred, name, c.GSS_MECH_KRB5, gssapi.GSS_C_ACCEPT, - gssapi.GSS_C_INDEFINITE, gssapi.GSS_C_INDEFINITE) - defer cred.Release() - defer actualMechs.Release() - verifyCred(t, cred, actualMechs, acceptorTimeRec, err) -} - -func verifyCred(t *testing.T, cred *gssapi.CredId, - actualMechs *gssapi.OIDSet, timeRec time.Duration, err error) { - - if err != nil { - t.Fatal(err) - } - if cred == nil { - t.Fatal("Got nil cred, expected non-nil") - } - if actualMechs == nil { - t.Fatal("Got nil actualMechs, expected non-nil") - } - contains, _ := actualMechs.TestOIDSetMember(c.GSS_MECH_KRB5) - if !contains { - t.Fatalf("Expected mechs to contain %q, got %q", - c.GSS_MECH_KRB5.DebugString(), - actualMechs.DebugString) - } - name, lifetime, credUsage, _, err := c.InquireCred(cred) - if err != nil { - t.Fatal(err) - } - parts := strings.Split(name.String(), "@") - if len(parts) != 2 || parts[0] != c.ServiceName { - t.Fatalf("name: got %q, expected %q", name.String(), c.ServiceName+"@") - } - if credUsage != gssapi.GSS_C_ACCEPT { - t.Fatalf("credUsage: got %v, expected gssapi.GSS_C_ACCEPT", credUsage) - } - if timeRec != lifetime { - t.Fatalf("timeRec:%v != lifetime:%v", timeRec, lifetime) - } -} diff --git a/vendor/github.com/apcera/gssapi/test/service_inquire_context.go b/vendor/github.com/apcera/gssapi/test/service_inquire_context.go deleted file mode 100644 index cc920cf2c502..000000000000 --- a/vendor/github.com/apcera/gssapi/test/service_inquire_context.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2013-2015 Apcera Inc. All rights reserved. - -package test - -import ( - "fmt" - "net/http" -) - -// HandleInquireContext accepts the context, unwraps, and then outputs its -// parameters obtained with InquireContext -func HandleInquireContext( - c *Context, w http.ResponseWriter, r *http.Request) ( - code int, message string) { - - ctx, code, message := allowed(c, w, r) - if ctx == nil { - return code, message - } - - srcName, targetName, lifetimeRec, mechType, ctxFlags, - locallyInitiated, open, err := ctx.InquireContext() - if err != nil { - return http.StatusInternalServerError, err.Error() - } - defer srcName.Release() - defer targetName.Release() - - body := fmt.Sprintf("%q %q %v %q %x %v %v", - srcName, targetName, lifetimeRec, mechType.DebugString(), ctxFlags, - locallyInitiated, open) - - w.Write([]byte(body)) - return http.StatusOK, "OK" -} diff --git a/vendor/github.com/apcera/gssapi/test/service_message.go b/vendor/github.com/apcera/gssapi/test/service_message.go deleted file mode 100644 index 6de2c0bc4aff..000000000000 --- a/vendor/github.com/apcera/gssapi/test/service_message.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2013-2015 Apcera Inc. All rights reserved. - -package test - -import ( - "encoding/base64" - "io/ioutil" - "net/http" - - "github.com/apcera/gssapi" -) - -// This test handler accepts the context, unwraps, and then re-wraps the request body -func HandleUnwrap(c *Context, w http.ResponseWriter, r *http.Request) (code int, message string) { - ctx, code, message := allowed(c, w, r) - if ctx == nil { - return code, message - } - - // Unwrap the request - wrappedbytes, err := ioutil.ReadAll( - base64.NewDecoder(base64.StdEncoding, r.Body)) - if err != nil { - return http.StatusInternalServerError, err.Error() - } - wrapped, err := c.MakeBufferBytes(wrappedbytes) - if err != nil { - return http.StatusInternalServerError, err.Error() - } - defer wrapped.Release() - - unwrapped, _, _, err := ctx.Unwrap(wrapped) - if err != nil { - return http.StatusInternalServerError, err.Error() - } - defer unwrapped.Release() - - // Re-wrap the for the response - _, wrapped, err = ctx.Wrap(true, gssapi.GSS_C_QOP_DEFAULT, unwrapped) - if err != nil { - return http.StatusInternalServerError, err.Error() - } - defer wrapped.Release() - - wrapped64 := base64.StdEncoding.EncodeToString(wrapped.Bytes()) - w.Write([]byte(wrapped64)) - return http.StatusOK, "OK" -} - -func HandleVerifyMIC(c *Context, w http.ResponseWriter, r *http.Request) (code int, message string) { - ctx, code, message := allowed(c, w, r) - if ctx == nil { - return code, message - } - - mic64 := r.Header.Get(micHeader) - if mic64 == "" { - return http.StatusInternalServerError, "No " + micHeader + " header" - } - micbytes, err := base64.StdEncoding.DecodeString(mic64) - if err != nil { - return http.StatusInternalServerError, err.Error() - } - mic, err := c.MakeBufferBytes(micbytes) - if err != nil { - return http.StatusInternalServerError, err.Error() - } - - bodybytes, err := ioutil.ReadAll(r.Body) - if err != nil { - return http.StatusInternalServerError, err.Error() - } - body, err := c.MakeBufferBytes(bodybytes) - if err != nil { - return http.StatusInternalServerError, err.Error() - } - - _, err = ctx.VerifyMIC(body, mic) - if err != nil { - return http.StatusInternalServerError, err.Error() - } - - w.Write([]byte("OK")) - return http.StatusOK, "OK" -} diff --git a/vendor/github.com/apcera/gssapi/test/service_misc_test.go b/vendor/github.com/apcera/gssapi/test/service_misc_test.go deleted file mode 100644 index 9deac3f6f35e..000000000000 --- a/vendor/github.com/apcera/gssapi/test/service_misc_test.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2013-2015 Apcera Inc. All rights reserved. - -//+build servicetest - -package test - -import ( - "testing" - - "github.com/apcera/gssapi" -) - -func TestIndicateMechs(t *testing.T) { - expectedMechs := []*gssapi.OID{ - c.GSS_MECH_KRB5, - // c.GSS_MECH_KRB5_OLD, - // c.GSS_MECH_KRB5_LEGACY, - // c.GSS_MECH_IAKERB, - c.GSS_MECH_SPNEGO, - } - mechs, err := c.IndicateMechs() - if err != nil { - t.Fatal(err) - } - defer mechs.Release() - - for _, oid := range expectedMechs { - if !mechs.Contains(oid) { - t.Errorf("Expected to find %s in mechs %s", oid.DebugString(), mechs.DebugString()) - } - } -} diff --git a/vendor/github.com/apcera/gssapi/test/service_name_test.go b/vendor/github.com/apcera/gssapi/test/service_name_test.go deleted file mode 100644 index 91f257580a92..000000000000 --- a/vendor/github.com/apcera/gssapi/test/service_name_test.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2013-2015 Apcera Inc. All rights reserved. - -//+build servicetest - -package test - -// test the credentials APIs with a keytab, configured against a real KDC - -import ( - "strings" - "testing" -) - -func TestInquireMechsForName(t *testing.T) { - name := prepareServiceName(t) - defer name.Release() - - mechs, err := name.InquireMechs() - if err != nil { - t.Fatal(err) - } - defer mechs.Release() - contains, _ := mechs.TestOIDSetMember(c.GSS_MECH_KRB5) - if !contains { - t.Fatalf("Expected mechs to contain %s, got %s", - c.GSS_MECH_KRB5.DebugString(), mechs.DebugString()) - } -} - -func TestCanonicalizeName(t *testing.T) { - name := prepareServiceName(t) - defer name.Release() - - name, err := name.Canonicalize(c.GSS_MECH_KRB5) - if err != nil { - t.Fatal(err) - } - defer name.Release() - parts := strings.Split(name.String(), "@") - if len(parts) != 2 || parts[0] != c.ServiceName { - t.Fatalf("name: got %q, expected %q", name.String(), c.ServiceName+"@") - } -} diff --git a/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go b/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go index 0c9c3d392d4d..c292db0ce07e 100644 --- a/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go +++ b/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go @@ -129,11 +129,7 @@ func VerifyNoOverlap(subnets []*net.IPNet, CIDRBlock *net.IPNet) error { if !CIDRBlock.Contains(firstLastIP[i][0]) || !CIDRBlock.Contains(firstLastIP[i][1]) { return fmt.Errorf("%s does not fully contain %s", CIDRBlock.String(), s.String()) } - for j := 0; j < len(subnets); j++ { - if i == j { - continue - } - + for j := i + 1; j < len(subnets); j++ { first := firstLastIP[j][0] last := firstLastIP[j][1] if s.Contains(first) || s.Contains(last) { diff --git a/vendor/github.com/apparentlymart/go-cidr/cidr/cidr_test.go b/vendor/github.com/apparentlymart/go-cidr/cidr/cidr_test.go index 07de6f154019..64fdce8e1d13 100644 --- a/vendor/github.com/apparentlymart/go-cidr/cidr/cidr_test.go +++ b/vendor/github.com/apparentlymart/go-cidr/cidr/cidr_test.go @@ -397,15 +397,6 @@ func TestVerifyNetowrk(t *testing.T) { "192.168.12.128/26", }, }, - &testVerifyNetwork{ - CIDRBlock: "10.42.0.0/24", - CIDRList: []string{ - - "10.42.0.16/28", - "10.42.0.32/28", - "10.42.0.0/24", - }, - }, } for _, tc := range testCases { diff --git a/vendor/github.com/containerd/continuity/fs/fstest/compare.go b/vendor/github.com/containerd/continuity/fs/fstest/compare.go index b61d830828fd..0d100b624ff6 100644 --- a/vendor/github.com/containerd/continuity/fs/fstest/compare.go +++ b/vendor/github.com/containerd/continuity/fs/fstest/compare.go @@ -49,15 +49,7 @@ func CheckDirectoryEqual(d1, d2 string) error { diff := diffResourceList(m1.Resources, m2.Resources) if diff.HasDiff() { - if len(diff.Deletions) != 0 { - return errors.Errorf("directory diff between %s and %s\n%s", d1, d2, diff.String()) - } - // TODO: Also skip Recycle Bin contents in Windows layers which is used to store deleted files in some cases - for _, add := range diff.Additions { - if ok, _ := metadataFiles[add.Path()]; !ok { - return errors.Errorf("directory diff between %s and %s\n%s", d1, d2, diff.String()) - } - } + return errors.Errorf("directory diff between %s and %s\n%s", d1, d2, diff.String()) } return nil diff --git a/vendor/github.com/containerd/continuity/fs/fstest/compare_windows.go b/vendor/github.com/containerd/continuity/fs/fstest/compare_windows.go index 6b9104de0fcc..a3578199963c 100644 --- a/vendor/github.com/containerd/continuity/fs/fstest/compare_windows.go +++ b/vendor/github.com/containerd/continuity/fs/fstest/compare_windows.go @@ -17,6 +17,7 @@ package fstest // TODO: Any more metadata files generated by Windows layers? +// TODO: Also skip Recycle Bin contents in Windows layers which is used to store deleted files in some cases var metadataFiles = map[string]bool{ "\\System Volume Information": true, "\\WcSandboxState": true, diff --git a/vendor/github.com/containerd/continuity/fs/fstest/continuity_util.go b/vendor/github.com/containerd/continuity/fs/fstest/continuity_util.go index 9cbfc0b6c384..4d30dd01fe7d 100644 --- a/vendor/github.com/containerd/continuity/fs/fstest/continuity_util.go +++ b/vendor/github.com/containerd/continuity/fs/fstest/continuity_util.go @@ -42,7 +42,17 @@ type resourceListDifference struct { } func (l resourceListDifference) HasDiff() bool { - return len(l.Additions) > 0 || len(l.Deletions) > 0 || len(l.Updates) > 0 + if len(l.Deletions) > 0 || len(l.Updates) > 0 || (len(metadataFiles) == 0 && len(l.Additions) > 0) { + return true + } + + for _, add := range l.Additions { + if ok, _ := metadataFiles[add.Path()]; !ok { + return true + } + } + + return false } func (l resourceListDifference) String() string { diff --git a/vendor/github.com/containers/image/.gitignore b/vendor/github.com/containers/image/.gitignore deleted file mode 100644 index aa9517589139..000000000000 --- a/vendor/github.com/containers/image/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -vendor -tools.timestamp diff --git a/vendor/github.com/containers/image/.pullapprove.yml b/vendor/github.com/containers/image/.pullapprove.yml deleted file mode 100644 index 0da2fcfacb3c..000000000000 --- a/vendor/github.com/containers/image/.pullapprove.yml +++ /dev/null @@ -1,9 +0,0 @@ -approve_by_comment: true -approve_regex: '^(Approved|lgtm|LGTM|:shipit:|:star:|:\+1:|:ship:)' -reject_regex: ^Rejected -reset_on_push: false -reviewers: - teams: - - image-maintainers - name: default - required: 2 diff --git a/vendor/github.com/containers/image/.travis.Dockerfile b/vendor/github.com/containers/image/.travis.Dockerfile deleted file mode 100644 index c66e32d5e939..000000000000 --- a/vendor/github.com/containers/image/.travis.Dockerfile +++ /dev/null @@ -1,4 +0,0 @@ -FROM ubuntu:zesty - -RUN apt-get -qq update && \ - apt-get install -y sudo docker.io git make golang golint btrfs-tools libdevmapper-dev libgpgme-dev libostree-dev diff --git a/vendor/github.com/containers/image/.travis.yml b/vendor/github.com/containers/image/.travis.yml deleted file mode 100644 index b79c8c2f1fa1..000000000000 --- a/vendor/github.com/containers/image/.travis.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -language: go -sudo: required -notifications: - email: false -dist: trusty -services: - - docker -os: - - linux -before_install: - - sudo docker build -t image-test -f .travis.Dockerfile . - - sudo chown -R $(id -u):$(id -g) $HOME/gopath - -env: - - BUILDTAGS='btrfs_noversion libdm_no_deferred_remove' - - BUILDTAGS='btrfs_noversion libdm_no_deferred_remove containers_image_openpgp' - -script: > - sudo docker run --privileged -ti --rm --user $(id -u):$(id -g) - -e TRAVIS=$TRAVIS -e TRAVIS_COMMIT_RANGE=$TRAVIS_COMMIT_RANGE - -e TRAVIS_PULL_REQUEST=$TRAVIS_PULL_REQUEST -e TRAVIS_REPO_SLUG=$TRAVIS_REPO_SLUG - -e TRAVIS_BRANCH=$TRAVIS_BRANCH -e TRAVIS_COMMIT=$TRAVIS_COMMIT - -e GOPATH=/gopath -e TRASH_CACHE=/gopath/.trashcache - -v /etc/passwd:/etc/passwd -v /etc/sudoers:/etc/sudoers -v /etc/sudoers.d:/etc/sudoers.d - -v /var/run:/var/run:z -v $HOME/gopath:/gopath:Z - -w /gopath/src/github.com/containers/image image-test bash -c "PATH=$PATH:/gopath/bin make tools .gitvalidation validate test test-skopeo SUDO=sudo BUILDTAGS=\"$BUILDTAGS\"" diff --git a/vendor/github.com/containers/image/LICENSE b/vendor/github.com/containers/image/LICENSE deleted file mode 100644 index 953563530606..000000000000 --- a/vendor/github.com/containers/image/LICENSE +++ /dev/null @@ -1,189 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/containers/image/MAINTAINERS b/vendor/github.com/containers/image/MAINTAINERS deleted file mode 100644 index e23cea9b6ce9..000000000000 --- a/vendor/github.com/containers/image/MAINTAINERS +++ /dev/null @@ -1,3 +0,0 @@ -Antonio Murdaca (@runcom) -Brandon Philips (@philips) -Miloslav Trmac (@mtrmac) diff --git a/vendor/github.com/containers/image/Makefile b/vendor/github.com/containers/image/Makefile deleted file mode 100644 index a8b7457fa1df..000000000000 --- a/vendor/github.com/containers/image/Makefile +++ /dev/null @@ -1,72 +0,0 @@ -.PHONY: all tools test validate lint - -# Which github repostiory and branch to use for testing with skopeo -SKOPEO_REPO = projectatomic/skopeo -SKOPEO_BRANCH = master -# Set SUDO=sudo to run container integration tests using sudo. -SUDO = -BUILDTAGS = btrfs_noversion libdm_no_deferred_remove -BUILDFLAGS := -tags "$(BUILDTAGS)" - -PACKAGES := $(shell go list ./... | grep -v github.com/containers/image/vendor) - -all: tools .gitvalidation test validate - -tools: tools.timestamp - -tools.timestamp: Makefile - @go get -u $(BUILDFLAGS) github.com/golang/lint/golint - @go get $(BUILDFLAGS) github.com/vbatts/git-validation - @go get -u github.com/rancher/trash - @touch tools.timestamp - -vendor: tools.timestamp vendor.conf - @trash - @touch vendor - -clean: - rm -rf vendor tools.timestamp - -test: vendor - @go test $(BUILDFLAGS) -cover $(PACKAGES) - -# This is not run as part of (make all), but Travis CI does run this. -# Demonstarting a working version of skopeo (possibly with modified SKOPEO_REPO/SKOPEO_BRANCH, e.g. -# make test-skopeo SKOPEO_REPO=runcom/skopeo-1 SKOPEO_BRANCH=oci-3 SUDO=sudo -# ) is a requirement before merging; note that Travis will only test -# the master branch of the upstream repo. -test-skopeo: - @echo === Testing skopeo build - @export GOPATH=$$(mktemp -d) && \ - skopeo_path=$${GOPATH}/src/github.com/projectatomic/skopeo && \ - vendor_path=$${skopeo_path}/vendor/github.com/containers/image && \ - git clone -b $(SKOPEO_BRANCH) https://github.com/$(SKOPEO_REPO) $${skopeo_path} && \ - rm -rf $${vendor_path} && cp -r . $${vendor_path} && rm -rf $${vendor_path}/vendor && \ - cd $${skopeo_path} && \ - make BUILDTAGS="$(BUILDTAGS)" binary-local test-all-local && \ - $(SUDO) make BUILDTAGS="$(BUILDTAGS)" check && \ - rm -rf $${skopeo_path} - -validate: lint - @go vet $(PACKAGES) - @test -z "$$(gofmt -s -l . | grep -ve '^vendor' | tee /dev/stderr)" - -lint: - @out="$$(golint $(PACKAGES))"; \ - if [ -n "$$out" ]; then \ - echo "$$out"; \ - exit 1; \ - fi - -.PHONY: .gitvalidation - -EPOCH_TEST_COMMIT ?= e68e0e1110e64f906f9b482e548f17d73e02e6b1 - -# When this is running in travis, it will only check the travis commit range -.gitvalidation: - @which git-validation > /dev/null 2>/dev/null || (echo "ERROR: git-validation not found. Consider 'make clean && make tools'" && false) -ifeq ($(TRAVIS),true) - @git-validation -q -run DCO,short-subject,dangling-whitespace -else - @git-validation -q -run DCO,short-subject,dangling-whitespace -range $(EPOCH_TEST_COMMIT)..HEAD -endif diff --git a/vendor/github.com/containers/image/README.md b/vendor/github.com/containers/image/README.md deleted file mode 100644 index 8e812bb72038..000000000000 --- a/vendor/github.com/containers/image/README.md +++ /dev/null @@ -1,80 +0,0 @@ -[![GoDoc](https://godoc.org/github.com/containers/image?status.svg)](https://godoc.org/github.com/containers/image) [![Build Status](https://travis-ci.org/containers/image.svg?branch=master)](https://travis-ci.org/containers/image) -= - -`image` is a set of Go libraries aimed at working in various way with -containers' images and container image registries. - -The containers/image library allows application to pull and push images from -container image registries, like the upstream docker registry. It also -implements "simple image signing". - -The containers/image library also allows you to inspect a repository on a -container registry without pulling down the image. This means it fetches the -repository's manifest and it is able to show you a `docker inspect`-like json -output about a whole repository or a tag. This library, in contrast to `docker -inspect`, helps you gather useful information about a repository or a tag -without requiring you to run `docker pull`. - -The containers/image library also allows you to translate from one image format -to another, for example docker container images to OCI images. It also allows -you to copy container images between various registries, possibly converting -them as necessary, and to sign and verify images. - -## Command-line usage - -The containers/image project is only a library with no user interface; -you can either incorporate it into your Go programs, or use the `skopeo` tool: - -The [skopeo](https://github.com/projectatomic/skopeo) tool uses the -containers/image library and takes advantage of many of its features, -e.g. `skopeo copy` exposes the `containers/image/copy.Image` functionality. - -## Dependencies - -This library does not ship a committed version of its dependencies in a `vendor` -subdirectory. This is so you can make well-informed decisions about which -libraries you should use with this package in your own projects, and because -types defined in the `vendor` directory would be impossible to use from your projects. - -What this project tests against dependencies-wise is located -[in vendor.conf](https://github.com/containers/image/blob/master/vendor.conf). - -## Building - -If you want to see what the library can do, or an example of how it is called, -consider starting with the [skopeo](https://github.com/projectatomic/skopeo) tool -instead. - -To integrate this library into your project, put it into `$GOPATH` or use -your preferred vendoring tool to include a copy in your project. -Ensure that the dependencies documented [in vendor.conf](https://github.com/containers/image/blob/master/vendor.conf) -are also available -(using those exact versions or different versions of your choosing). - -This library, by default, also depends on the GpgME and libostree C libraries. Either install them: -```sh -Fedora$ dnf install gpgme-devel libassuan-devel libostree-devel -macOS$ brew install gpgme -``` -or use the build tags described below to avoid the dependencies (e.g. using `go build -tags …`) - -### Supported build tags - -- `containers_image_openpgp`: Use a Golang-only OpenPGP implementation for signature verification instead of the default cgo/gpgme-based implementation; -the primary downside is that creating new signatures with the Golang-only implementation is not supported. -- `containers_image_ostree_stub`: Instead of importing `ostree:` transport in `github.com/containers/image/transports/alltransports`, use a stub which reports that the transport is not supported. This allows building the library without requiring the `libostree` development libraries. - - (Note that explicitly importing `github.com/containers/image/ostree` will still depend on the `libostree` library, this build tag only affects generic users of …`/alltransports`.) - -## Contributing - -When developing this library, please use `make` (or `make … BUILDTAGS=…`) to take advantage of the tests and validation. - -## License - -ASL 2.0 - -## Contact - -- Mailing list: [containers-dev](https://groups.google.com/forum/?hl=en#!forum/containers-dev) -- IRC: #[container-projects](irc://irc.freenode.net:6667/#container-projects) on freenode.net diff --git a/vendor/github.com/containers/image/copy/copy.go b/vendor/github.com/containers/image/copy/copy.go deleted file mode 100644 index bb52ea7630ff..000000000000 --- a/vendor/github.com/containers/image/copy/copy.go +++ /dev/null @@ -1,663 +0,0 @@ -package copy - -import ( - "bytes" - "compress/gzip" - "context" - "fmt" - "io" - "io/ioutil" - "reflect" - "runtime" - "strings" - "time" - - pb "gopkg.in/cheggaaa/pb.v1" - - "github.com/containers/image/image" - "github.com/containers/image/pkg/compression" - "github.com/containers/image/signature" - "github.com/containers/image/transports" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -type digestingReader struct { - source io.Reader - digester digest.Digester - expectedDigest digest.Digest - validationFailed bool -} - -// imageCopier allows us to keep track of diffID values for blobs, and other -// data, that we're copying between images, and cache other information that -// might allow us to take some shortcuts -type imageCopier struct { - copiedBlobs map[digest.Digest]digest.Digest - cachedDiffIDs map[digest.Digest]digest.Digest - manifestUpdates *types.ManifestUpdateOptions - dest types.ImageDestination - src types.Image - rawSource types.ImageSource - diffIDsAreNeeded bool - canModifyManifest bool - reportWriter io.Writer - progressInterval time.Duration - progress chan types.ProgressProperties -} - -// newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error -// and set validationFailed to true if the source stream does not match expectedDigest. -func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) { - if err := expectedDigest.Validate(); err != nil { - return nil, errors.Errorf("Invalid digest specification %s", expectedDigest) - } - digestAlgorithm := expectedDigest.Algorithm() - if !digestAlgorithm.Available() { - return nil, errors.Errorf("Invalid digest specification %s: unsupported digest algorithm %s", expectedDigest, digestAlgorithm) - } - return &digestingReader{ - source: source, - digester: digestAlgorithm.Digester(), - expectedDigest: expectedDigest, - validationFailed: false, - }, nil -} - -func (d *digestingReader) Read(p []byte) (int, error) { - n, err := d.source.Read(p) - if n > 0 { - if n2, err := d.digester.Hash().Write(p[:n]); n2 != n || err != nil { - // Coverage: This should not happen, the hash.Hash interface requires - // d.digest.Write to never return an error, and the io.Writer interface - // requires n2 == len(input) if no error is returned. - return 0, errors.Wrapf(err, "Error updating digest during verification: %d vs. %d", n2, n) - } - } - if err == io.EOF { - actualDigest := d.digester.Digest() - if actualDigest != d.expectedDigest { - d.validationFailed = true - return 0, errors.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest) - } - } - return n, err -} - -// Options allows supplying non-default configuration modifying the behavior of CopyImage. -type Options struct { - RemoveSignatures bool // Remove any pre-existing signatures. SignBy will still add a new signature. - SignBy string // If non-empty, asks for a signature to be added during the copy, and specifies a key ID, as accepted by signature.NewGPGSigningMechanism().SignDockerManifest(), - ReportWriter io.Writer - SourceCtx *types.SystemContext - DestinationCtx *types.SystemContext - ProgressInterval time.Duration // time to wait between reports to signal the progress channel - Progress chan types.ProgressProperties // Reported to when ProgressInterval has arrived for a single artifact+offset. -} - -// Image copies image from srcRef to destRef, using policyContext to validate -// source image admissibility. -func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageReference, options *Options) (retErr error) { - // NOTE this function uses an output parameter for the error return value. - // Setting this and returning is the ideal way to return an error. - // - // the defers in this routine will wrap the error return with its own errors - // which can be valuable context in the middle of a multi-streamed copy. - if options == nil { - options = &Options{} - } - - reportWriter := ioutil.Discard - - if options.ReportWriter != nil { - reportWriter = options.ReportWriter - } - - writeReport := func(f string, a ...interface{}) { - fmt.Fprintf(reportWriter, f, a...) - } - - dest, err := destRef.NewImageDestination(options.DestinationCtx) - if err != nil { - return errors.Wrapf(err, "Error initializing destination %s", transports.ImageName(destRef)) - } - defer func() { - if err := dest.Close(); err != nil { - retErr = errors.Wrapf(retErr, " (dest: %v)", err) - } - }() - - destSupportedManifestMIMETypes := dest.SupportedManifestMIMETypes() - - rawSource, err := srcRef.NewImageSource(options.SourceCtx, destSupportedManifestMIMETypes) - if err != nil { - return errors.Wrapf(err, "Error initializing source %s", transports.ImageName(srcRef)) - } - unparsedImage := image.UnparsedFromSource(rawSource) - defer func() { - if unparsedImage != nil { - if err := unparsedImage.Close(); err != nil { - retErr = errors.Wrapf(retErr, " (unparsed: %v)", err) - } - } - }() - - // Please keep this policy check BEFORE reading any other information about the image. - if allowed, err := policyContext.IsRunningImageAllowed(unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so. - return errors.Wrap(err, "Source image rejected") - } - src, err := image.FromUnparsedImage(unparsedImage) - if err != nil { - return errors.Wrapf(err, "Error initializing image from source %s", transports.ImageName(srcRef)) - } - unparsedImage = nil - defer func() { - if err := src.Close(); err != nil { - retErr = errors.Wrapf(retErr, " (source: %v)", err) - } - }() - - if err := checkImageDestinationForCurrentRuntimeOS(src, dest); err != nil { - return err - } - - if src.IsMultiImage() { - return errors.Errorf("can not copy %s: manifest contains multiple images", transports.ImageName(srcRef)) - } - - var sigs [][]byte - if options.RemoveSignatures { - sigs = [][]byte{} - } else { - writeReport("Getting image source signatures\n") - s, err := src.Signatures(context.TODO()) - if err != nil { - return errors.Wrap(err, "Error reading signatures") - } - sigs = s - } - if len(sigs) != 0 { - writeReport("Checking if image destination supports signatures\n") - if err := dest.SupportsSignatures(); err != nil { - return errors.Wrap(err, "Can not copy signatures") - } - } - - canModifyManifest := len(sigs) == 0 - manifestUpdates := types.ManifestUpdateOptions{} - manifestUpdates.InformationOnly.Destination = dest - - if err := updateEmbeddedDockerReference(&manifestUpdates, dest, src, canModifyManifest); err != nil { - return err - } - - // We compute preferredManifestMIMEType only to show it in error messages. - // Without having to add this context in an error message, we would be happy enough to know only that no conversion is needed. - preferredManifestMIMEType, otherManifestMIMETypeCandidates, err := determineManifestConversion(&manifestUpdates, src, destSupportedManifestMIMETypes, canModifyManifest) - if err != nil { - return err - } - - // If src.UpdatedImageNeedsLayerDiffIDs(manifestUpdates) will be true, it needs to be true by the time we get here. - ic := imageCopier{ - copiedBlobs: make(map[digest.Digest]digest.Digest), - cachedDiffIDs: make(map[digest.Digest]digest.Digest), - manifestUpdates: &manifestUpdates, - dest: dest, - src: src, - rawSource: rawSource, - diffIDsAreNeeded: src.UpdatedImageNeedsLayerDiffIDs(manifestUpdates), - canModifyManifest: canModifyManifest, - reportWriter: reportWriter, - progressInterval: options.ProgressInterval, - progress: options.Progress, - } - - if err := ic.copyLayers(); err != nil { - return err - } - - // With docker/distribution registries we do not know whether the registry accepts schema2 or schema1 only; - // and at least with the OpenShift registry "acceptschema2" option, there is no way to detect the support - // without actually trying to upload something and getting a types.ManifestTypeRejectedError. - // So, try the preferred manifest MIME type. If the process succeeds, fine… - manifest, err := ic.copyUpdatedConfigAndManifest() - if err != nil { - logrus.Debugf("Writing manifest using preferred type %s failed: %v", preferredManifestMIMEType, err) - // … if it fails, _and_ the failure is because the manifest is rejected, we may have other options. - if _, isManifestRejected := errors.Cause(err).(types.ManifestTypeRejectedError); !isManifestRejected || len(otherManifestMIMETypeCandidates) == 0 { - // We don’t have other options. - // In principle the code below would handle this as well, but the resulting error message is fairly ugly. - // Don’t bother the user with MIME types if we have no choice. - return err - } - // If the original MIME type is acceptable, determineManifestConversion always uses it as preferredManifestMIMEType. - // So if we are here, we will definitely be trying to convert the manifest. - // With !canModifyManifest, that would just be a string of repeated failures for the same reason, - // so let’s bail out early and with a better error message. - if !canModifyManifest { - return errors.Wrap(err, "Writing manifest failed (and converting it is not possible)") - } - - // errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil. - errs := []string{fmt.Sprintf("%s(%v)", preferredManifestMIMEType, err)} - for _, manifestMIMEType := range otherManifestMIMETypeCandidates { - logrus.Debugf("Trying to use manifest type %s…", manifestMIMEType) - manifestUpdates.ManifestMIMEType = manifestMIMEType - attemptedManifest, err := ic.copyUpdatedConfigAndManifest() - if err != nil { - logrus.Debugf("Upload of manifest type %s failed: %v", manifestMIMEType, err) - errs = append(errs, fmt.Sprintf("%s(%v)", manifestMIMEType, err)) - continue - } - - // We have successfully uploaded a manifest. - manifest = attemptedManifest - errs = nil // Mark this as a success so that we don't abort below. - break - } - if errs != nil { - return fmt.Errorf("Uploading manifest failed, attempted the following formats: %s", strings.Join(errs, ", ")) - } - } - - if options.SignBy != "" { - newSig, err := createSignature(dest, manifest, options.SignBy, reportWriter) - if err != nil { - return err - } - sigs = append(sigs, newSig) - } - - writeReport("Storing signatures\n") - if err := dest.PutSignatures(sigs); err != nil { - return errors.Wrap(err, "Error writing signatures") - } - - if err := dest.Commit(); err != nil { - return errors.Wrap(err, "Error committing the finished image") - } - - return nil -} - -func checkImageDestinationForCurrentRuntimeOS(src types.Image, dest types.ImageDestination) error { - if dest.MustMatchRuntimeOS() { - c, err := src.OCIConfig() - if err != nil { - return errors.Wrapf(err, "Error parsing image configuration") - } - osErr := fmt.Errorf("image operating system %q cannot be used on %q", c.OS, runtime.GOOS) - if runtime.GOOS == "windows" && c.OS == "linux" { - return osErr - } else if runtime.GOOS != "windows" && c.OS == "windows" { - return osErr - } - } - return nil -} - -// updateEmbeddedDockerReference handles the Docker reference embedded in Docker schema1 manifests. -func updateEmbeddedDockerReference(manifestUpdates *types.ManifestUpdateOptions, dest types.ImageDestination, src types.Image, canModifyManifest bool) error { - destRef := dest.Reference().DockerReference() - if destRef == nil { - return nil // Destination does not care about Docker references - } - if !src.EmbeddedDockerReferenceConflicts(destRef) { - return nil // No reference embedded in the manifest, or it matches destRef already. - } - - if !canModifyManifest { - return errors.Errorf("Copying a schema1 image with an embedded Docker reference to %s (Docker reference %s) would invalidate existing signatures. Explicitly enable signature removal to proceed anyway", - transports.ImageName(dest.Reference()), destRef.String()) - } - manifestUpdates.EmbeddedDockerReference = destRef - return nil -} - -// copyLayers copies layers from src/rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.canModifyManifest. -func (ic *imageCopier) copyLayers() error { - srcInfos := ic.src.LayerInfos() - destInfos := []types.BlobInfo{} - diffIDs := []digest.Digest{} - for _, srcLayer := range srcInfos { - var ( - destInfo types.BlobInfo - diffID digest.Digest - err error - ) - if ic.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 { - // DiffIDs are, currently, needed only when converting from schema1. - // In which case src.LayerInfos will not have URLs because schema1 - // does not support them. - if ic.diffIDsAreNeeded { - return errors.New("getting DiffID for foreign layers is unimplemented") - } - destInfo = srcLayer - fmt.Fprintf(ic.reportWriter, "Skipping foreign layer %q copy to %s\n", destInfo.Digest, ic.dest.Reference().Transport().Name()) - } else { - destInfo, diffID, err = ic.copyLayer(srcLayer) - if err != nil { - return err - } - } - destInfos = append(destInfos, destInfo) - diffIDs = append(diffIDs, diffID) - } - ic.manifestUpdates.InformationOnly.LayerInfos = destInfos - if ic.diffIDsAreNeeded { - ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs - } - if layerDigestsDiffer(srcInfos, destInfos) { - ic.manifestUpdates.LayerInfos = destInfos - } - return nil -} - -// layerDigestsDiffer return true iff the digests in a and b differ (ignoring sizes and possible other fields) -func layerDigestsDiffer(a, b []types.BlobInfo) bool { - if len(a) != len(b) { - return true - } - for i := range a { - if a[i].Digest != b[i].Digest { - return true - } - } - return false -} - -// copyUpdatedConfigAndManifest updates the image per ic.manifestUpdates, if necessary, -// stores the resulting config and manifest to the destination, and returns the stored manifest. -func (ic *imageCopier) copyUpdatedConfigAndManifest() ([]byte, error) { - pendingImage := ic.src - if !reflect.DeepEqual(*ic.manifestUpdates, types.ManifestUpdateOptions{InformationOnly: ic.manifestUpdates.InformationOnly}) { - if !ic.canModifyManifest { - return nil, errors.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden") - } - if !ic.diffIDsAreNeeded && ic.src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) { - // We have set ic.diffIDsAreNeeded based on the preferred MIME type returned by determineManifestConversion. - // So, this can only happen if we are trying to upload using one of the other MIME type candidates. - // Because UpdatedImageNeedsLayerDiffIDs is true only when converting from s1 to s2, this case should only arise - // when ic.dest.SupportedManifestMIMETypes() includes both s1 and s2, the upload using s1 failed, and we are now trying s2. - // Supposedly s2-only registries do not exist or are extremely rare, so failing with this error message is good enough for now. - // If handling such registries turns out to be necessary, we could compute ic.diffIDsAreNeeded based on the full list of manifest MIME type candidates. - return nil, errors.Errorf("Can not convert image to %s, preparing DiffIDs for this case is not supported", ic.manifestUpdates.ManifestMIMEType) - } - pi, err := ic.src.UpdatedImage(*ic.manifestUpdates) - if err != nil { - return nil, errors.Wrap(err, "Error creating an updated image manifest") - } - pendingImage = pi - } - manifest, _, err := pendingImage.Manifest() - if err != nil { - return nil, errors.Wrap(err, "Error reading manifest") - } - - if err := ic.copyConfig(pendingImage); err != nil { - return nil, err - } - - fmt.Fprintf(ic.reportWriter, "Writing manifest to image destination\n") - if err := ic.dest.PutManifest(manifest); err != nil { - return nil, errors.Wrap(err, "Error writing manifest") - } - return manifest, nil -} - -// copyConfig copies config.json, if any, from src to dest. -func (ic *imageCopier) copyConfig(src types.Image) error { - srcInfo := src.ConfigInfo() - if srcInfo.Digest != "" { - fmt.Fprintf(ic.reportWriter, "Copying config %s\n", srcInfo.Digest) - configBlob, err := src.ConfigBlob() - if err != nil { - return errors.Wrapf(err, "Error reading config blob %s", srcInfo.Digest) - } - destInfo, err := ic.copyBlobFromStream(bytes.NewReader(configBlob), srcInfo, nil, false) - if err != nil { - return err - } - if destInfo.Digest != srcInfo.Digest { - return errors.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcInfo.Digest, destInfo.Digest) - } - } - return nil -} - -// diffIDResult contains both a digest value and an error from diffIDComputationGoroutine. -// We could also send the error through the pipeReader, but this more cleanly separates the copying of the layer and the DiffID computation. -type diffIDResult struct { - digest digest.Digest - err error -} - -// copyLayer copies a layer with srcInfo (with known Digest and possibly known Size) in src to dest, perhaps compressing it if canCompress, -// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded -func (ic *imageCopier) copyLayer(srcInfo types.BlobInfo) (types.BlobInfo, digest.Digest, error) { - // Check if we already have a blob with this digest - haveBlob, extantBlobSize, err := ic.dest.HasBlob(srcInfo) - if err != nil { - return types.BlobInfo{}, "", errors.Wrapf(err, "Error checking for blob %s at destination", srcInfo.Digest) - } - // If we already have a cached diffID for this blob, we don't need to compute it - diffIDIsNeeded := ic.diffIDsAreNeeded && (ic.cachedDiffIDs[srcInfo.Digest] == "") - // If we already have the blob, and we don't need to recompute the diffID, then we might be able to avoid reading it again - if haveBlob && !diffIDIsNeeded { - // Check the blob sizes match, if we were given a size this time - if srcInfo.Size != -1 && srcInfo.Size != extantBlobSize { - return types.BlobInfo{}, "", errors.Errorf("Error: blob %s is already present, but with size %d instead of %d", srcInfo.Digest, extantBlobSize, srcInfo.Size) - } - srcInfo.Size = extantBlobSize - // Tell the image destination that this blob's delta is being applied again. For some image destinations, this can be faster than using GetBlob/PutBlob - blobinfo, err := ic.dest.ReapplyBlob(srcInfo) - if err != nil { - return types.BlobInfo{}, "", errors.Wrapf(err, "Error reapplying blob %s at destination", srcInfo.Digest) - } - fmt.Fprintf(ic.reportWriter, "Skipping fetch of repeat blob %s\n", srcInfo.Digest) - return blobinfo, ic.cachedDiffIDs[srcInfo.Digest], err - } - - // Fallback: copy the layer, computing the diffID if we need to do so - fmt.Fprintf(ic.reportWriter, "Copying blob %s\n", srcInfo.Digest) - srcStream, srcBlobSize, err := ic.rawSource.GetBlob(srcInfo) - if err != nil { - return types.BlobInfo{}, "", errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest) - } - defer srcStream.Close() - - blobInfo, diffIDChan, err := ic.copyLayerFromStream(srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize}, - diffIDIsNeeded) - if err != nil { - return types.BlobInfo{}, "", err - } - var diffIDResult diffIDResult // = {digest:""} - if diffIDIsNeeded { - diffIDResult = <-diffIDChan - if diffIDResult.err != nil { - return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "Error computing layer DiffID") - } - logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest) - ic.cachedDiffIDs[srcInfo.Digest] = diffIDResult.digest - } - return blobInfo, diffIDResult.digest, nil -} - -// copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate “defer” scope. -// it copies a blob with srcInfo (with known Digest and possibly known Size) from srcStream to dest, -// perhaps compressing the stream if canCompress, -// and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller. -func (ic *imageCopier) copyLayerFromStream(srcStream io.Reader, srcInfo types.BlobInfo, - diffIDIsNeeded bool) (types.BlobInfo, <-chan diffIDResult, error) { - var getDiffIDRecorder func(compression.DecompressorFunc) io.Writer // = nil - var diffIDChan chan diffIDResult - - err := errors.New("Internal error: unexpected panic in copyLayer") // For pipeWriter.CloseWithError below - if diffIDIsNeeded { - diffIDChan = make(chan diffIDResult, 1) // Buffered, so that sending a value after this or our caller has failed and exited does not block. - pipeReader, pipeWriter := io.Pipe() - defer func() { // Note that this is not the same as {defer pipeWriter.CloseWithError(err)}; we need err to be evaluated lazily. - pipeWriter.CloseWithError(err) // CloseWithError(nil) is equivalent to Close() - }() - - getDiffIDRecorder = func(decompressor compression.DecompressorFunc) io.Writer { - // If this fails, e.g. because we have exited and due to pipeWriter.CloseWithError() above further - // reading from the pipe has failed, we don’t really care. - // We only read from diffIDChan if the rest of the flow has succeeded, and when we do read from it, - // the return value includes an error indication, which we do check. - // - // If this gets never called, pipeReader will not be used anywhere, but pipeWriter will only be - // closed above, so we are happy enough with both pipeReader and pipeWriter to just get collected by GC. - go diffIDComputationGoroutine(diffIDChan, pipeReader, decompressor) // Closes pipeReader - return pipeWriter - } - } - blobInfo, err := ic.copyBlobFromStream(srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest) // Sets err to nil on success - return blobInfo, diffIDChan, err - // We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan -} - -// diffIDComputationGoroutine reads all input from layerStream, uncompresses using decompressor if necessary, and sends its digest, and status, if any, to dest. -func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadCloser, decompressor compression.DecompressorFunc) { - result := diffIDResult{ - digest: "", - err: errors.New("Internal error: unexpected panic in diffIDComputationGoroutine"), - } - defer func() { dest <- result }() - defer layerStream.Close() // We do not care to bother the other end of the pipe with other failures; we send them to dest instead. - - result.digest, result.err = computeDiffID(layerStream, decompressor) -} - -// computeDiffID reads all input from layerStream, uncompresses it using decompressor if necessary, and returns its digest. -func computeDiffID(stream io.Reader, decompressor compression.DecompressorFunc) (digest.Digest, error) { - if decompressor != nil { - s, err := decompressor(stream) - if err != nil { - return "", err - } - stream = s - } - - return digest.Canonical.FromReader(stream) -} - -// copyBlobFromStream copies a blob with srcInfo (with known Digest and possibly known Size) from srcStream to dest, -// perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil, -// perhaps compressing it if canCompress, -// and returns a complete blobInfo of the copied blob. -func (ic *imageCopier) copyBlobFromStream(srcStream io.Reader, srcInfo types.BlobInfo, - getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer, - canCompress bool) (types.BlobInfo, error) { - // The copying happens through a pipeline of connected io.Readers. - // === Input: srcStream - - // === Process input through digestingReader to validate against the expected digest. - // Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader, - // use a separate validation failure indicator. - // Note that we don't use a stronger "validationSucceeded" indicator, because - // dest.PutBlob may detect that the layer already exists, in which case we don't - // read stream to the end, and validation does not happen. - digestingReader, err := newDigestingReader(srcStream, srcInfo.Digest) - if err != nil { - return types.BlobInfo{}, errors.Wrapf(err, "Error preparing to verify blob %s", srcInfo.Digest) - } - var destStream io.Reader = digestingReader - - // === Detect compression of the input stream. - // This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression. - decompressor, destStream, err := compression.DetectCompression(destStream) // We could skip this in some cases, but let's keep the code path uniform - if err != nil { - return types.BlobInfo{}, errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest) - } - isCompressed := decompressor != nil - - // === Report progress using a pb.Reader. - bar := pb.New(int(srcInfo.Size)).SetUnits(pb.U_BYTES) - bar.Output = ic.reportWriter - bar.SetMaxWidth(80) - bar.ShowTimeLeft = false - bar.ShowPercent = false - bar.Start() - destStream = bar.NewProxyReader(destStream) - defer fmt.Fprint(ic.reportWriter, "\n") - - // === Send a copy of the original, uncompressed, stream, to a separate path if necessary. - var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so. - if getOriginalLayerCopyWriter != nil { - destStream = io.TeeReader(destStream, getOriginalLayerCopyWriter(decompressor)) - originalLayerReader = destStream - } - - // === Compress the layer if it is uncompressed and compression is desired - var inputInfo types.BlobInfo - if !canCompress || isCompressed || !ic.dest.ShouldCompressLayers() { - logrus.Debugf("Using original blob without modification") - inputInfo = srcInfo - } else { - logrus.Debugf("Compressing blob on the fly") - pipeReader, pipeWriter := io.Pipe() - defer pipeReader.Close() - - // If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise, - // e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed, - // we don’t care. - go compressGoroutine(pipeWriter, destStream) // Closes pipeWriter - destStream = pipeReader - inputInfo.Digest = "" - inputInfo.Size = -1 - } - - // === Report progress using the ic.progress channel, if required. - if ic.progress != nil && ic.progressInterval > 0 { - destStream = &progressReader{ - source: destStream, - channel: ic.progress, - interval: ic.progressInterval, - artifact: srcInfo, - lastTime: time.Now(), - } - } - - // === Finally, send the layer stream to dest. - uploadedInfo, err := ic.dest.PutBlob(destStream, inputInfo) - if err != nil { - return types.BlobInfo{}, errors.Wrap(err, "Error writing blob") - } - - // This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consumer - // all of the input (to compute DiffIDs), even if dest.PutBlob does not need it. - // So, read everything from originalLayerReader, which will cause the rest to be - // sent there if we are not already at EOF. - if getOriginalLayerCopyWriter != nil { - logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter") - _, err := io.Copy(ioutil.Discard, originalLayerReader) - if err != nil { - return types.BlobInfo{}, errors.Wrapf(err, "Error reading input blob %s", srcInfo.Digest) - } - } - - if digestingReader.validationFailed { // Coverage: This should never happen. - return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, digest verification failed but was ignored", srcInfo.Digest) - } - if inputInfo.Digest != "" && uploadedInfo.Digest != inputInfo.Digest { - return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, inputInfo.Digest, uploadedInfo.Digest) - } - return uploadedInfo, nil -} - -// compressGoroutine reads all input from src and writes its compressed equivalent to dest. -func compressGoroutine(dest *io.PipeWriter, src io.Reader) { - err := errors.New("Internal error: unexpected panic in compressGoroutine") - defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily. - dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close() - }() - - zipper := gzip.NewWriter(dest) - defer zipper.Close() - - _, err = io.Copy(zipper, src) // Sets err to nil, i.e. causes dest.Close() -} diff --git a/vendor/github.com/containers/image/copy/copy_test.go b/vendor/github.com/containers/image/copy/copy_test.go deleted file mode 100644 index b98133a88f27..000000000000 --- a/vendor/github.com/containers/image/copy/copy_test.go +++ /dev/null @@ -1,124 +0,0 @@ -package copy - -import ( - "bytes" - "io" - "os" - "testing" - "time" - - "github.com/pkg/errors" - - "github.com/containers/image/pkg/compression" - "github.com/opencontainers/go-digest" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestNewDigestingReader(t *testing.T) { - // Only the failure cases, success is tested in TestDigestingReaderRead below. - source := bytes.NewReader([]byte("abc")) - for _, input := range []digest.Digest{ - "abc", // Not algo:hexvalue - "crc32:", // Unknown algorithm, empty value - "crc32:012345678", // Unknown algorithm - "sha256:", // Empty value - "sha256:0", // Invalid hex value - "sha256:01", // Invalid length of hex value - } { - _, err := newDigestingReader(source, input) - assert.Error(t, err, input.String()) - } -} - -func TestDigestingReaderRead(t *testing.T) { - cases := []struct { - input []byte - digest digest.Digest - }{ - {[]byte(""), "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"}, - {[]byte("abc"), "sha256:ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad"}, - {make([]byte, 65537, 65537), "sha256:3266304f31be278d06c3bd3eb9aa3e00c59bedec0a890de466568b0b90b0e01f"}, - } - // Valid input - for _, c := range cases { - source := bytes.NewReader(c.input) - reader, err := newDigestingReader(source, c.digest) - require.NoError(t, err, c.digest.String()) - dest := bytes.Buffer{} - n, err := io.Copy(&dest, reader) - assert.NoError(t, err, c.digest.String()) - assert.Equal(t, int64(len(c.input)), n, c.digest.String()) - assert.Equal(t, c.input, dest.Bytes(), c.digest.String()) - assert.False(t, reader.validationFailed, c.digest.String()) - } - // Modified input - for _, c := range cases { - source := bytes.NewReader(bytes.Join([][]byte{c.input, []byte("x")}, nil)) - reader, err := newDigestingReader(source, c.digest) - require.NoError(t, err, c.digest.String()) - dest := bytes.Buffer{} - _, err = io.Copy(&dest, reader) - assert.Error(t, err, c.digest.String()) - assert.True(t, reader.validationFailed) - } -} - -func goDiffIDComputationGoroutineWithTimeout(layerStream io.ReadCloser, decompressor compression.DecompressorFunc) *diffIDResult { - ch := make(chan diffIDResult) - go diffIDComputationGoroutine(ch, layerStream, nil) - timeout := time.After(time.Second) - select { - case res := <-ch: - return &res - case <-timeout: - return nil - } -} - -func TestDiffIDComputationGoroutine(t *testing.T) { - stream, err := os.Open("fixtures/Hello.uncompressed") - require.NoError(t, err) - res := goDiffIDComputationGoroutineWithTimeout(stream, nil) - require.NotNil(t, res) - assert.NoError(t, res.err) - assert.Equal(t, "sha256:185f8db32271fe25f561a6fc938b2e264306ec304eda518007d1764826381969", res.digest.String()) - - // Error reading input - reader, writer := io.Pipe() - writer.CloseWithError(errors.New("Expected error reading input in diffIDComputationGoroutine")) - res = goDiffIDComputationGoroutineWithTimeout(reader, nil) - require.NotNil(t, res) - assert.Error(t, res.err) -} - -func TestComputeDiffID(t *testing.T) { - for _, c := range []struct { - filename string - decompressor compression.DecompressorFunc - result digest.Digest - }{ - {"fixtures/Hello.uncompressed", nil, "sha256:185f8db32271fe25f561a6fc938b2e264306ec304eda518007d1764826381969"}, - {"fixtures/Hello.gz", nil, "sha256:0bd4409dcd76476a263b8f3221b4ce04eb4686dec40bfdcc2e86a7403de13609"}, - {"fixtures/Hello.gz", compression.GzipDecompressor, "sha256:185f8db32271fe25f561a6fc938b2e264306ec304eda518007d1764826381969"}, - } { - stream, err := os.Open(c.filename) - require.NoError(t, err, c.filename) - defer stream.Close() - - diffID, err := computeDiffID(stream, c.decompressor) - require.NoError(t, err, c.filename) - assert.Equal(t, c.result, diffID) - } - - // Error initializing decompression - _, err := computeDiffID(bytes.NewReader([]byte{}), compression.GzipDecompressor) - assert.Error(t, err) - - // Error reading input - reader, writer := io.Pipe() - defer reader.Close() - writer.CloseWithError(errors.New("Expected error reading input in computeDiffID")) - _, err = computeDiffID(reader, nil) - assert.Error(t, err) -} diff --git a/vendor/github.com/containers/image/copy/fixtures/Hello.bz2 b/vendor/github.com/containers/image/copy/fixtures/Hello.bz2 deleted file mode 120000 index fc28d6c9ac27..000000000000 --- a/vendor/github.com/containers/image/copy/fixtures/Hello.bz2 +++ /dev/null @@ -1 +0,0 @@ -../../pkg/compression/fixtures/Hello.bz2 \ No newline at end of file diff --git a/vendor/github.com/containers/image/copy/fixtures/Hello.gz b/vendor/github.com/containers/image/copy/fixtures/Hello.gz deleted file mode 120000 index 08aa805fcc19..000000000000 --- a/vendor/github.com/containers/image/copy/fixtures/Hello.gz +++ /dev/null @@ -1 +0,0 @@ -../../pkg/compression/fixtures/Hello.gz \ No newline at end of file diff --git a/vendor/github.com/containers/image/copy/fixtures/Hello.uncompressed b/vendor/github.com/containers/image/copy/fixtures/Hello.uncompressed deleted file mode 120000 index 49b46625d8c1..000000000000 --- a/vendor/github.com/containers/image/copy/fixtures/Hello.uncompressed +++ /dev/null @@ -1 +0,0 @@ -../../pkg/compression/fixtures/Hello.uncompressed \ No newline at end of file diff --git a/vendor/github.com/containers/image/copy/fixtures/Hello.xz b/vendor/github.com/containers/image/copy/fixtures/Hello.xz deleted file mode 120000 index 77bcd85587ab..000000000000 --- a/vendor/github.com/containers/image/copy/fixtures/Hello.xz +++ /dev/null @@ -1 +0,0 @@ -../../pkg/compression/fixtures/Hello.xz \ No newline at end of file diff --git a/vendor/github.com/containers/image/copy/manifest.go b/vendor/github.com/containers/image/copy/manifest.go deleted file mode 100644 index e3b294dd1389..000000000000 --- a/vendor/github.com/containers/image/copy/manifest.go +++ /dev/null @@ -1,102 +0,0 @@ -package copy - -import ( - "strings" - - "github.com/containers/image/manifest" - "github.com/containers/image/types" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// preferredManifestMIMETypes lists manifest MIME types in order of our preference, if we can't use the original manifest and need to convert. -// Prefer v2s2 to v2s1 because v2s2 does not need to be changed when uploading to a different location. -// Include v2s1 signed but not v2s1 unsigned, because docker/distribution requires a signature even if the unsigned MIME type is used. -var preferredManifestMIMETypes = []string{manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType} - -// orderedSet is a list of strings (MIME types in our case), with each string appearing at most once. -type orderedSet struct { - list []string - included map[string]struct{} -} - -// newOrderedSet creates a correctly initialized orderedSet. -// [Sometimes it would be really nice if Golang had constructors…] -func newOrderedSet() *orderedSet { - return &orderedSet{ - list: []string{}, - included: map[string]struct{}{}, - } -} - -// append adds s to the end of os, only if it is not included already. -func (os *orderedSet) append(s string) { - if _, ok := os.included[s]; !ok { - os.list = append(os.list, s) - os.included[s] = struct{}{} - } -} - -// determineManifestConversion updates manifestUpdates to convert manifest to a supported MIME type, if necessary and canModifyManifest. -// Note that the conversion will only happen later, through src.UpdatedImage -// Returns the preferred manifest MIME type (whether we are converting to it or using it unmodified), -// and a list of other possible alternatives, in order. -func determineManifestConversion(manifestUpdates *types.ManifestUpdateOptions, src types.Image, destSupportedManifestMIMETypes []string, canModifyManifest bool) (string, []string, error) { - _, srcType, err := src.Manifest() - if err != nil { // This should have been cached?! - return "", nil, errors.Wrap(err, "Error reading manifest") - } - - if len(destSupportedManifestMIMETypes) == 0 { - return srcType, []string{}, nil // Anything goes; just use the original as is, do not try any conversions. - } - supportedByDest := map[string]struct{}{} - for _, t := range destSupportedManifestMIMETypes { - supportedByDest[t] = struct{}{} - } - - // destSupportedManifestMIMETypes is a static guess; a particular registry may still only support a subset of the types. - // So, build a list of types to try in order of decreasing preference. - // FIXME? This treats manifest.DockerV2Schema1SignedMediaType and manifest.DockerV2Schema1MediaType as distinct, - // although we are not really making any conversion, and it is very unlikely that a destination would support one but not the other. - // In practice, schema1 is probably the lowest common denominator, so we would expect to try the first one of the MIME types - // and never attempt the other one. - prioritizedTypes := newOrderedSet() - - // First of all, prefer to keep the original manifest unmodified. - if _, ok := supportedByDest[srcType]; ok { - prioritizedTypes.append(srcType) - } - if !canModifyManifest { - // We could also drop the !canModifyManifest parameter and have the caller - // make the choice; it is already doing that to an extent, to improve error - // messages. But it is nice to hide the “if !canModifyManifest, do no conversion” - // special case in here; the caller can then worry (or not) only about a good UI. - logrus.Debugf("We can't modify the manifest, hoping for the best...") - return srcType, []string{}, nil // Take our chances - FIXME? Or should we fail without trying? - } - - // Then use our list of preferred types. - for _, t := range preferredManifestMIMETypes { - if _, ok := supportedByDest[t]; ok { - prioritizedTypes.append(t) - } - } - - // Finally, try anything else the destination supports. - for _, t := range destSupportedManifestMIMETypes { - prioritizedTypes.append(t) - } - - logrus.Debugf("Manifest has MIME type %s, ordered candidate list [%s]", srcType, strings.Join(prioritizedTypes.list, ", ")) - if len(prioritizedTypes.list) == 0 { // Coverage: destSupportedManifestMIMETypes is not empty (or we would have exited in the “Anything goes” case above), so this should never happen. - return "", nil, errors.New("Internal error: no candidate MIME types") - } - preferredType := prioritizedTypes.list[0] - if preferredType != srcType { - manifestUpdates.ManifestMIMEType = preferredType - } else { - logrus.Debugf("... will first try using the original manifest unmodified") - } - return preferredType, prioritizedTypes.list[1:], nil -} diff --git a/vendor/github.com/containers/image/copy/manifest_test.go b/vendor/github.com/containers/image/copy/manifest_test.go deleted file mode 100644 index ed636c5e6daf..000000000000 --- a/vendor/github.com/containers/image/copy/manifest_test.go +++ /dev/null @@ -1,165 +0,0 @@ -package copy - -import ( - "context" - "errors" - "fmt" - "testing" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/manifest" - "github.com/containers/image/types" - "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestOrderedSet(t *testing.T) { - for _, c := range []struct{ input, expected []string }{ - {[]string{}, []string{}}, - {[]string{"a", "b", "c"}, []string{"a", "b", "c"}}, - {[]string{"a", "b", "a", "c"}, []string{"a", "b", "c"}}, - } { - os := newOrderedSet() - for _, s := range c.input { - os.append(s) - } - assert.Equal(t, c.expected, os.list, fmt.Sprintf("%#v", c.input)) - } -} - -// fakeImageSource is an implementation of types.Image which only returns itself as a MIME type in Manifest -// except that "" means “reading the manifest should fail” -type fakeImageSource string - -func (f fakeImageSource) Reference() types.ImageReference { - panic("Unexpected call to a mock function") -} -func (f fakeImageSource) Close() error { - panic("Unexpected call to a mock function") -} -func (f fakeImageSource) Manifest() ([]byte, string, error) { - if string(f) == "" { - return nil, "", errors.New("Manifest() directed to fail") - } - return nil, string(f), nil -} -func (f fakeImageSource) Signatures(context.Context) ([][]byte, error) { - panic("Unexpected call to a mock function") -} -func (f fakeImageSource) ConfigInfo() types.BlobInfo { - panic("Unexpected call to a mock function") -} -func (f fakeImageSource) ConfigBlob() ([]byte, error) { - panic("Unexpected call to a mock function") -} -func (f fakeImageSource) OCIConfig() (*v1.Image, error) { - panic("Unexpected call to a mock function") -} -func (f fakeImageSource) LayerInfos() []types.BlobInfo { - panic("Unexpected call to a mock function") -} -func (f fakeImageSource) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { - panic("Unexpected call to a mock function") -} -func (f fakeImageSource) Inspect() (*types.ImageInspectInfo, error) { - panic("Unexpected call to a mock function") -} -func (f fakeImageSource) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { - panic("Unexpected call to a mock function") -} -func (f fakeImageSource) UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) { - panic("Unexpected call to a mock function") -} -func (f fakeImageSource) IsMultiImage() bool { - panic("Unexpected call to a mock function") -} -func (f fakeImageSource) Size() (int64, error) { - panic("Unexpected call to a mock function") -} - -func TestDetermineManifestConversion(t *testing.T) { - supportS1S2OCI := []string{ - v1.MediaTypeImageManifest, - manifest.DockerV2Schema2MediaType, - manifest.DockerV2Schema1SignedMediaType, - manifest.DockerV2Schema1MediaType, - } - supportS1OCI := []string{ - v1.MediaTypeImageManifest, - manifest.DockerV2Schema1SignedMediaType, - manifest.DockerV2Schema1MediaType, - } - supportS1S2 := []string{ - manifest.DockerV2Schema2MediaType, - manifest.DockerV2Schema1SignedMediaType, - manifest.DockerV2Schema1MediaType, - } - supportOnlyS1 := []string{ - manifest.DockerV2Schema1SignedMediaType, - manifest.DockerV2Schema1MediaType, - } - - cases := []struct { - description string - sourceType string - destTypes []string - expectedUpdate string - expectedOtherCandidates []string - }{ - // Destination accepts anything — no conversion necessary - {"s1→anything", manifest.DockerV2Schema1SignedMediaType, nil, "", []string{}}, - {"s2→anything", manifest.DockerV2Schema2MediaType, nil, "", []string{}}, - // Destination accepts the unmodified original - {"s1→s1s2", manifest.DockerV2Schema1SignedMediaType, supportS1S2, "", []string{manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1MediaType}}, - {"s2→s1s2", manifest.DockerV2Schema2MediaType, supportS1S2, "", supportOnlyS1}, - {"s1→s1", manifest.DockerV2Schema1SignedMediaType, supportOnlyS1, "", []string{manifest.DockerV2Schema1MediaType}}, - // Conversion necessary, a preferred format is acceptable - {"s2→s1", manifest.DockerV2Schema2MediaType, supportOnlyS1, manifest.DockerV2Schema1SignedMediaType, []string{manifest.DockerV2Schema1MediaType}}, - // Conversion necessary, a preferred format is not acceptable - {"s2→OCI", manifest.DockerV2Schema2MediaType, []string{v1.MediaTypeImageManifest}, v1.MediaTypeImageManifest, []string{}}, - // Conversion necessary, try the preferred formats in order. - { - "special→s2", "this needs conversion", supportS1S2OCI, manifest.DockerV2Schema2MediaType, - []string{manifest.DockerV2Schema1SignedMediaType, v1.MediaTypeImageManifest, manifest.DockerV2Schema1MediaType}, - }, - { - "special→s1", "this needs conversion", supportS1OCI, manifest.DockerV2Schema1SignedMediaType, - []string{v1.MediaTypeImageManifest, manifest.DockerV2Schema1MediaType}, - }, - { - "special→OCI", "this needs conversion", []string{v1.MediaTypeImageManifest, "other options", "with lower priority"}, v1.MediaTypeImageManifest, - []string{"other options", "with lower priority"}, - }, - } - - for _, c := range cases { - src := fakeImageSource(c.sourceType) - mu := types.ManifestUpdateOptions{} - preferredMIMEType, otherCandidates, err := determineManifestConversion(&mu, src, c.destTypes, true) - require.NoError(t, err, c.description) - assert.Equal(t, c.expectedUpdate, mu.ManifestMIMEType, c.description) - if c.expectedUpdate == "" { - assert.Equal(t, c.sourceType, preferredMIMEType, c.description) - } else { - assert.Equal(t, c.expectedUpdate, preferredMIMEType, c.description) - } - assert.Equal(t, c.expectedOtherCandidates, otherCandidates, c.description) - } - - // Whatever the input is, with !canModifyManifest we return "keep the original as is" - for _, c := range cases { - src := fakeImageSource(c.sourceType) - mu := types.ManifestUpdateOptions{} - preferredMIMEType, otherCandidates, err := determineManifestConversion(&mu, src, c.destTypes, false) - require.NoError(t, err, c.description) - assert.Equal(t, "", mu.ManifestMIMEType, c.description) - assert.Equal(t, c.sourceType, preferredMIMEType, c.description) - assert.Equal(t, []string{}, otherCandidates, c.description) - } - - // Error reading the manifest — smoke test only. - mu := types.ManifestUpdateOptions{} - _, _, err := determineManifestConversion(&mu, fakeImageSource(""), supportS1S2, true) - assert.Error(t, err) -} diff --git a/vendor/github.com/containers/image/copy/progress_reader.go b/vendor/github.com/containers/image/copy/progress_reader.go deleted file mode 100644 index b670ee59f1c5..000000000000 --- a/vendor/github.com/containers/image/copy/progress_reader.go +++ /dev/null @@ -1,28 +0,0 @@ -package copy - -import ( - "io" - "time" - - "github.com/containers/image/types" -) - -// progressReader is a reader that reports its progress on an interval. -type progressReader struct { - source io.Reader - channel chan types.ProgressProperties - interval time.Duration - artifact types.BlobInfo - lastTime time.Time - offset uint64 -} - -func (r *progressReader) Read(p []byte) (int, error) { - n, err := r.source.Read(p) - r.offset += uint64(n) - if time.Since(r.lastTime) > r.interval { - r.channel <- types.ProgressProperties{Artifact: r.artifact, Offset: r.offset} - r.lastTime = time.Now() - } - return n, err -} diff --git a/vendor/github.com/containers/image/copy/sign.go b/vendor/github.com/containers/image/copy/sign.go deleted file mode 100644 index 9187d70b33ed..000000000000 --- a/vendor/github.com/containers/image/copy/sign.go +++ /dev/null @@ -1,35 +0,0 @@ -package copy - -import ( - "fmt" - "io" - - "github.com/containers/image/signature" - "github.com/containers/image/transports" - "github.com/containers/image/types" - "github.com/pkg/errors" -) - -// createSignature creates a new signature of manifest at (identified by) dest using keyIdentity. -func createSignature(dest types.ImageDestination, manifest []byte, keyIdentity string, reportWriter io.Writer) ([]byte, error) { - mech, err := signature.NewGPGSigningMechanism() - if err != nil { - return nil, errors.Wrap(err, "Error initializing GPG") - } - defer mech.Close() - if err := mech.SupportsSigning(); err != nil { - return nil, errors.Wrap(err, "Signing not supported") - } - - dockerReference := dest.Reference().DockerReference() - if dockerReference == nil { - return nil, errors.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(dest.Reference())) - } - - fmt.Fprintf(reportWriter, "Signing manifest\n") - newSig, err := signature.SignDockerManifest(manifest, dockerReference.String(), mech, keyIdentity) - if err != nil { - return nil, errors.Wrap(err, "Error creating signature") - } - return newSig, nil -} diff --git a/vendor/github.com/containers/image/copy/sign_test.go b/vendor/github.com/containers/image/copy/sign_test.go deleted file mode 100644 index 4cc5e27c7155..000000000000 --- a/vendor/github.com/containers/image/copy/sign_test.go +++ /dev/null @@ -1,72 +0,0 @@ -package copy - -import ( - "io/ioutil" - "os" - "testing" - - "github.com/containers/image/directory" - "github.com/containers/image/docker" - "github.com/containers/image/manifest" - "github.com/containers/image/signature" - "github.com/containers/image/types" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -const ( - testGPGHomeDirectory = "../signature/fixtures" - // TestKeyFingerprint is the fingerprint of the private key in testGPGHomeDirectory. - // Keep this in sync with signature/fixtures_info_test.go - testKeyFingerprint = "1D8230F6CDB6A06716E414C1DB72F2188BB46CC8" -) - -func TestCreateSignature(t *testing.T) { - manifestBlob := []byte("Something") - manifestDigest, err := manifest.Digest(manifestBlob) - require.NoError(t, err) - - mech, _, err := signature.NewEphemeralGPGSigningMechanism([]byte{}) - require.NoError(t, err) - defer mech.Close() - if err := mech.SupportsSigning(); err != nil { - t.Skipf("Signing not supported: %v", err) - } - - os.Setenv("GNUPGHOME", testGPGHomeDirectory) - defer os.Unsetenv("GNUPGHOME") - - // Signing a directory: reference, which does not have a DockerRefrence(), fails. - tempDir, err := ioutil.TempDir("", "signature-dir-dest") - require.NoError(t, err) - defer os.RemoveAll(tempDir) - dirRef, err := directory.NewReference(tempDir) - require.NoError(t, err) - dirDest, err := dirRef.NewImageDestination(nil) - require.NoError(t, err) - defer dirDest.Close() - _, err = createSignature(dirDest, manifestBlob, testKeyFingerprint, ioutil.Discard) - assert.Error(t, err) - - // Set up a docker: reference - dockerRef, err := docker.ParseReference("//busybox") - require.NoError(t, err) - dockerDest, err := dockerRef.NewImageDestination(&types.SystemContext{RegistriesDirPath: "/this/doesnt/exist", DockerPerHostCertDirPath: "/this/doesnt/exist"}) - require.NoError(t, err) - defer dockerDest.Close() - - // Signing with an unknown key fails - _, err = createSignature(dockerDest, manifestBlob, "this key does not exist", ioutil.Discard) - assert.Error(t, err) - - // Success - mech, err = signature.NewGPGSigningMechanism() - require.NoError(t, err) - defer mech.Close() - sig, err := createSignature(dockerDest, manifestBlob, testKeyFingerprint, ioutil.Discard) - require.NoError(t, err) - verified, err := signature.VerifyDockerManifestSignature(sig, manifestBlob, "docker.io/library/busybox:latest", mech, testKeyFingerprint) - require.NoError(t, err) - assert.Equal(t, "docker.io/library/busybox:latest", verified.DockerReference) - assert.Equal(t, manifestDigest, verified.DockerManifestDigest) -} diff --git a/vendor/github.com/containers/image/directory/directory_dest.go b/vendor/github.com/containers/image/directory/directory_dest.go deleted file mode 100644 index ea46a27ed85f..000000000000 --- a/vendor/github.com/containers/image/directory/directory_dest.go +++ /dev/null @@ -1,149 +0,0 @@ -package directory - -import ( - "io" - "io/ioutil" - "os" - - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -type dirImageDestination struct { - ref dirReference -} - -// newImageDestination returns an ImageDestination for writing to an existing directory. -func newImageDestination(ref dirReference) types.ImageDestination { - return &dirImageDestination{ref} -} - -// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, -// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. -func (d *dirImageDestination) Reference() types.ImageReference { - return d.ref -} - -// Close removes resources associated with an initialized ImageDestination, if any. -func (d *dirImageDestination) Close() error { - return nil -} - -func (d *dirImageDestination) SupportedManifestMIMETypes() []string { - return nil -} - -// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. -// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. -func (d *dirImageDestination) SupportsSignatures() error { - return nil -} - -// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination. -func (d *dirImageDestination) ShouldCompressLayers() bool { - return false -} - -// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually -// uploaded to the image destination, true otherwise. -func (d *dirImageDestination) AcceptsForeignLayerURLs() bool { - return false -} - -// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. -func (d *dirImageDestination) MustMatchRuntimeOS() bool { - return false -} - -// PutBlob writes contents of stream and returns data representing the result (with all data filled in). -// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. -// inputInfo.Size is the expected length of stream, if known. -// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available -// to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *dirImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) { - blobFile, err := ioutil.TempFile(d.ref.path, "dir-put-blob") - if err != nil { - return types.BlobInfo{}, err - } - succeeded := false - defer func() { - blobFile.Close() - if !succeeded { - os.Remove(blobFile.Name()) - } - }() - - digester := digest.Canonical.Digester() - tee := io.TeeReader(stream, digester.Hash()) - - size, err := io.Copy(blobFile, tee) - if err != nil { - return types.BlobInfo{}, err - } - computedDigest := digester.Digest() - if inputInfo.Size != -1 && size != inputInfo.Size { - return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size) - } - if err := blobFile.Sync(); err != nil { - return types.BlobInfo{}, err - } - if err := blobFile.Chmod(0644); err != nil { - return types.BlobInfo{}, err - } - blobPath := d.ref.layerPath(computedDigest) - if err := os.Rename(blobFile.Name(), blobPath); err != nil { - return types.BlobInfo{}, err - } - succeeded = true - return types.BlobInfo{Digest: computedDigest, Size: size}, nil -} - -// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob. -// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned. -// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil); -// it returns a non-nil error only on an unexpected failure. -func (d *dirImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) { - if info.Digest == "" { - return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`) - } - blobPath := d.ref.layerPath(info.Digest) - finfo, err := os.Stat(blobPath) - if err != nil && os.IsNotExist(err) { - return false, -1, nil - } - if err != nil { - return false, -1, err - } - return true, finfo.Size(), nil -} - -func (d *dirImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) { - return info, nil -} - -// PutManifest writes manifest to the destination. -// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. -// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), -// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. -func (d *dirImageDestination) PutManifest(manifest []byte) error { - return ioutil.WriteFile(d.ref.manifestPath(), manifest, 0644) -} - -func (d *dirImageDestination) PutSignatures(signatures [][]byte) error { - for i, sig := range signatures { - if err := ioutil.WriteFile(d.ref.signaturePath(i), sig, 0644); err != nil { - return err - } - } - return nil -} - -// Commit marks the process of storing the image as successful and asks for the image to be persisted. -// WARNING: This does not have any transactional semantics: -// - Uploaded data MAY be visible to others before Commit() is called -// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) -func (d *dirImageDestination) Commit() error { - return nil -} diff --git a/vendor/github.com/containers/image/directory/directory_src.go b/vendor/github.com/containers/image/directory/directory_src.go deleted file mode 100644 index fddc1c522cb8..000000000000 --- a/vendor/github.com/containers/image/directory/directory_src.go +++ /dev/null @@ -1,76 +0,0 @@ -package directory - -import ( - "context" - "io" - "io/ioutil" - "os" - - "github.com/containers/image/manifest" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -type dirImageSource struct { - ref dirReference -} - -// newImageSource returns an ImageSource reading from an existing directory. -// The caller must call .Close() on the returned ImageSource. -func newImageSource(ref dirReference) types.ImageSource { - return &dirImageSource{ref} -} - -// Reference returns the reference used to set up this source, _as specified by the user_ -// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. -func (s *dirImageSource) Reference() types.ImageReference { - return s.ref -} - -// Close removes resources associated with an initialized ImageSource, if any. -func (s *dirImageSource) Close() error { - return nil -} - -// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). -// It may use a remote (= slow) service. -func (s *dirImageSource) GetManifest() ([]byte, string, error) { - m, err := ioutil.ReadFile(s.ref.manifestPath()) - if err != nil { - return nil, "", err - } - return m, manifest.GuessMIMEType(m), err -} - -func (s *dirImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { - return nil, "", errors.Errorf(`Getting target manifest not supported by "dir:"`) -} - -// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). -func (s *dirImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) { - r, err := os.Open(s.ref.layerPath(info.Digest)) - if err != nil { - return nil, 0, nil - } - fi, err := r.Stat() - if err != nil { - return nil, 0, nil - } - return r, fi.Size(), nil -} - -func (s *dirImageSource) GetSignatures(ctx context.Context) ([][]byte, error) { - signatures := [][]byte{} - for i := 0; ; i++ { - signature, err := ioutil.ReadFile(s.ref.signaturePath(i)) - if err != nil { - if os.IsNotExist(err) { - break - } - return nil, err - } - signatures = append(signatures, signature) - } - return signatures, nil -} diff --git a/vendor/github.com/containers/image/directory/directory_test.go b/vendor/github.com/containers/image/directory/directory_test.go deleted file mode 100644 index 86ff004d0eed..000000000000 --- a/vendor/github.com/containers/image/directory/directory_test.go +++ /dev/null @@ -1,163 +0,0 @@ -package directory - -import ( - "bytes" - "context" - "io/ioutil" - "os" - "testing" - - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestDestinationReference(t *testing.T) { - ref, tmpDir := refToTempDir(t) - defer os.RemoveAll(tmpDir) - - dest, err := ref.NewImageDestination(nil) - require.NoError(t, err) - defer dest.Close() - ref2 := dest.Reference() - assert.Equal(t, tmpDir, ref2.StringWithinTransport()) -} - -func TestGetPutManifest(t *testing.T) { - ref, tmpDir := refToTempDir(t) - defer os.RemoveAll(tmpDir) - - man := []byte("test-manifest") - dest, err := ref.NewImageDestination(nil) - require.NoError(t, err) - defer dest.Close() - err = dest.PutManifest(man) - assert.NoError(t, err) - err = dest.Commit() - assert.NoError(t, err) - - src, err := ref.NewImageSource(nil, nil) - require.NoError(t, err) - defer src.Close() - m, mt, err := src.GetManifest() - assert.NoError(t, err) - assert.Equal(t, man, m) - assert.Equal(t, "", mt) -} - -func TestGetPutBlob(t *testing.T) { - ref, tmpDir := refToTempDir(t) - defer os.RemoveAll(tmpDir) - - blob := []byte("test-blob") - dest, err := ref.NewImageDestination(nil) - require.NoError(t, err) - defer dest.Close() - compress := dest.ShouldCompressLayers() - assert.False(t, compress) - info, err := dest.PutBlob(bytes.NewReader(blob), types.BlobInfo{Digest: digest.Digest("sha256:digest-test"), Size: int64(9)}) - assert.NoError(t, err) - err = dest.Commit() - assert.NoError(t, err) - assert.Equal(t, int64(9), info.Size) - assert.Equal(t, digest.FromBytes(blob), info.Digest) - - src, err := ref.NewImageSource(nil, nil) - require.NoError(t, err) - defer src.Close() - rc, size, err := src.GetBlob(info) - assert.NoError(t, err) - defer rc.Close() - b, err := ioutil.ReadAll(rc) - assert.NoError(t, err) - assert.Equal(t, blob, b) - assert.Equal(t, int64(len(blob)), size) -} - -// readerFromFunc allows implementing Reader by any function, e.g. a closure. -type readerFromFunc func([]byte) (int, error) - -func (fn readerFromFunc) Read(p []byte) (int, error) { - return fn(p) -} - -// TestPutBlobDigestFailure simulates behavior on digest verification failure. -func TestPutBlobDigestFailure(t *testing.T) { - const digestErrorString = "Simulated digest error" - const blobDigest = digest.Digest("sha256:test-digest") - - ref, tmpDir := refToTempDir(t) - defer os.RemoveAll(tmpDir) - dirRef, ok := ref.(dirReference) - require.True(t, ok) - blobPath := dirRef.layerPath(blobDigest) - - firstRead := true - reader := readerFromFunc(func(p []byte) (int, error) { - _, err := os.Lstat(blobPath) - require.Error(t, err) - require.True(t, os.IsNotExist(err)) - if firstRead { - if len(p) > 0 { - firstRead = false - } - for i := 0; i < len(p); i++ { - p[i] = 0xAA - } - return len(p), nil - } - return 0, errors.Errorf(digestErrorString) - }) - - dest, err := ref.NewImageDestination(nil) - require.NoError(t, err) - defer dest.Close() - _, err = dest.PutBlob(reader, types.BlobInfo{Digest: blobDigest, Size: -1}) - assert.Error(t, err) - assert.Contains(t, digestErrorString, err.Error()) - err = dest.Commit() - assert.NoError(t, err) - - _, err = os.Lstat(blobPath) - require.Error(t, err) - require.True(t, os.IsNotExist(err)) -} - -func TestGetPutSignatures(t *testing.T) { - ref, tmpDir := refToTempDir(t) - defer os.RemoveAll(tmpDir) - - dest, err := ref.NewImageDestination(nil) - require.NoError(t, err) - defer dest.Close() - signatures := [][]byte{ - []byte("sig1"), - []byte("sig2"), - } - err = dest.SupportsSignatures() - assert.NoError(t, err) - err = dest.PutSignatures(signatures) - assert.NoError(t, err) - err = dest.Commit() - assert.NoError(t, err) - - src, err := ref.NewImageSource(nil, nil) - require.NoError(t, err) - defer src.Close() - sigs, err := src.GetSignatures(context.Background()) - assert.NoError(t, err) - assert.Equal(t, signatures, sigs) -} - -func TestSourceReference(t *testing.T) { - ref, tmpDir := refToTempDir(t) - defer os.RemoveAll(tmpDir) - - src, err := ref.NewImageSource(nil, nil) - require.NoError(t, err) - defer src.Close() - ref2 := src.Reference() - assert.Equal(t, tmpDir, ref2.StringWithinTransport()) -} diff --git a/vendor/github.com/containers/image/directory/directory_transport.go b/vendor/github.com/containers/image/directory/directory_transport.go deleted file mode 100644 index 34f742893563..000000000000 --- a/vendor/github.com/containers/image/directory/directory_transport.go +++ /dev/null @@ -1,179 +0,0 @@ -package directory - -import ( - "fmt" - "path/filepath" - "strings" - - "github.com/pkg/errors" - - "github.com/containers/image/directory/explicitfilepath" - "github.com/containers/image/docker/reference" - "github.com/containers/image/image" - "github.com/containers/image/transports" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" -) - -func init() { - transports.Register(Transport) -} - -// Transport is an ImageTransport for directory paths. -var Transport = dirTransport{} - -type dirTransport struct{} - -func (t dirTransport) Name() string { - return "dir" -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. -func (t dirTransport) ParseReference(reference string) (types.ImageReference, error) { - return NewReference(reference) -} - -// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys -// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). -// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. -// scope passed to this function will not be "", that value is always allowed. -func (t dirTransport) ValidatePolicyConfigurationScope(scope string) error { - if !strings.HasPrefix(scope, "/") { - return errors.Errorf("Invalid scope %s: Must be an absolute path", scope) - } - // Refuse also "/", otherwise "/" and "" would have the same semantics, - // and "" could be unexpectedly shadowed by the "/" entry. - if scope == "/" { - return errors.New(`Invalid scope "/": Use the generic default scope ""`) - } - cleaned := filepath.Clean(scope) - if cleaned != scope { - return errors.Errorf(`Invalid scope %s: Uses non-canonical format, perhaps try %s`, scope, cleaned) - } - return nil -} - -// dirReference is an ImageReference for directory paths. -type dirReference struct { - // Note that the interpretation of paths below depends on the underlying filesystem state, which may change under us at any time! - // Either of the paths may point to a different, or no, inode over time. resolvedPath may contain symbolic links, and so on. - - // Generally we follow the intent of the user, and use the "path" member for filesystem operations (e.g. the user can use a relative path to avoid - // being exposed to symlinks and renames in the parent directories to the working directory). - // (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.) - path string // As specified by the user. May be relative, contain symlinks, etc. - resolvedPath string // Absolute path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces. -} - -// There is no directory.ParseReference because it is rather pointless. -// Callers who need a transport-independent interface will go through -// dirTransport.ParseReference; callers who intentionally deal with directories -// can use directory.NewReference. - -// NewReference returns a directory reference for a specified path. -// -// We do not expose an API supplying the resolvedPath; we could, but recomputing it -// is generally cheap enough that we prefer being confident about the properties of resolvedPath. -func NewReference(path string) (types.ImageReference, error) { - resolved, err := explicitfilepath.ResolvePathToFullyExplicit(path) - if err != nil { - return nil, err - } - return dirReference{path: path, resolvedPath: resolved}, nil -} - -func (ref dirReference) Transport() types.ImageTransport { - return Transport -} - -// StringWithinTransport returns a string representation of the reference, which MUST be such that -// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. -// NOTE: The returned string is not promised to be equal to the original input to ParseReference; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. -// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. -func (ref dirReference) StringWithinTransport() string { - return ref.path -} - -// DockerReference returns a Docker reference associated with this reference -// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, -// not e.g. after redirect or alias processing), or nil if unknown/not applicable. -func (ref dirReference) DockerReference() reference.Named { - return nil -} - -// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. -// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; -// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical -// (i.e. various references with exactly the same semantics should return the same configuration identity) -// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but -// not required/guaranteed that it will be a valid input to Transport().ParseReference(). -// Returns "" if configuration identities for these references are not supported. -func (ref dirReference) PolicyConfigurationIdentity() string { - return ref.resolvedPath -} - -// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search -// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed -// in order, terminating on first match, and an implicit "" is always checked at the end. -// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), -// and each following element to be a prefix of the element preceding it. -func (ref dirReference) PolicyConfigurationNamespaces() []string { - res := []string{} - path := ref.resolvedPath - for { - lastSlash := strings.LastIndex(path, "/") - if lastSlash == -1 || lastSlash == 0 { - break - } - path = path[:lastSlash] - res = append(res, path) - } - // Note that we do not include "/"; it is redundant with the default "" global default, - // and rejected by dirTransport.ValidatePolicyConfigurationScope above. - return res -} - -// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned Image. -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -func (ref dirReference) NewImage(ctx *types.SystemContext) (types.Image, error) { - src := newImageSource(ref) - return image.FromSource(src) -} - -// NewImageSource returns a types.ImageSource for this reference, -// asking the backend to use a manifest from requestedManifestMIMETypes if possible. -// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes. -// The caller must call .Close() on the returned ImageSource. -func (ref dirReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) { - return newImageSource(ref), nil -} - -// NewImageDestination returns a types.ImageDestination for this reference. -// The caller must call .Close() on the returned ImageDestination. -func (ref dirReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) { - return newImageDestination(ref), nil -} - -// DeleteImage deletes the named image from the registry, if supported. -func (ref dirReference) DeleteImage(ctx *types.SystemContext) error { - return errors.Errorf("Deleting images not implemented for dir: images") -} - -// manifestPath returns a path for the manifest within a directory using our conventions. -func (ref dirReference) manifestPath() string { - return filepath.Join(ref.path, "manifest.json") -} - -// layerPath returns a path for a layer tarball within a directory using our conventions. -func (ref dirReference) layerPath(digest digest.Digest) string { - // FIXME: Should we keep the digest identification? - return filepath.Join(ref.path, digest.Hex()+".tar") -} - -// signaturePath returns a path for a signature within a directory using our conventions. -func (ref dirReference) signaturePath(index int) string { - return filepath.Join(ref.path, fmt.Sprintf("signature-%d", index+1)) -} diff --git a/vendor/github.com/containers/image/directory/directory_transport_test.go b/vendor/github.com/containers/image/directory/directory_transport_test.go deleted file mode 100644 index 1384d6f5b122..000000000000 --- a/vendor/github.com/containers/image/directory/directory_transport_test.go +++ /dev/null @@ -1,232 +0,0 @@ -package directory - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" - - "github.com/containers/image/types" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestTransportName(t *testing.T) { - assert.Equal(t, "dir", Transport.Name()) -} - -func TestTransportParseReference(t *testing.T) { - testNewReference(t, Transport.ParseReference) -} - -func TestTransportValidatePolicyConfigurationScope(t *testing.T) { - for _, scope := range []string{ - "/etc", - "/this/does/not/exist", - } { - err := Transport.ValidatePolicyConfigurationScope(scope) - assert.NoError(t, err, scope) - } - - for _, scope := range []string{ - "relative/path", - "/double//slashes", - "/has/./dot", - "/has/dot/../dot", - "/trailing/slash/", - "/", - } { - err := Transport.ValidatePolicyConfigurationScope(scope) - assert.Error(t, err, scope) - } -} - -func TestNewReference(t *testing.T) { - testNewReference(t, NewReference) -} - -// testNewReference is a test shared for Transport.ParseReference and NewReference. -func testNewReference(t *testing.T, fn func(string) (types.ImageReference, error)) { - tmpDir, err := ioutil.TempDir("", "dir-transport-test") - require.NoError(t, err) - defer os.RemoveAll(tmpDir) - - for _, path := range []string{ - "/", - "/etc", - tmpDir, - "relativepath", - tmpDir + "/thisdoesnotexist", - } { - ref, err := fn(path) - require.NoError(t, err, path) - dirRef, ok := ref.(dirReference) - require.True(t, ok) - assert.Equal(t, path, dirRef.path, path) - } - - _, err = fn(tmpDir + "/thisparentdoesnotexist/something") - assert.Error(t, err) -} - -// refToTempDir creates a temporary directory and returns a reference to it. -// The caller should -// defer os.RemoveAll(tmpDir) -func refToTempDir(t *testing.T) (ref types.ImageReference, tmpDir string) { - tmpDir, err := ioutil.TempDir("", "dir-transport-test") - require.NoError(t, err) - ref, err = NewReference(tmpDir) - require.NoError(t, err) - return ref, tmpDir -} - -func TestReferenceTransport(t *testing.T) { - ref, tmpDir := refToTempDir(t) - defer os.RemoveAll(tmpDir) - assert.Equal(t, Transport, ref.Transport()) -} - -func TestReferenceStringWithinTransport(t *testing.T) { - ref, tmpDir := refToTempDir(t) - defer os.RemoveAll(tmpDir) - assert.Equal(t, tmpDir, ref.StringWithinTransport()) -} - -func TestReferenceDockerReference(t *testing.T) { - ref, tmpDir := refToTempDir(t) - defer os.RemoveAll(tmpDir) - assert.Nil(t, ref.DockerReference()) -} - -func TestReferencePolicyConfigurationIdentity(t *testing.T) { - ref, tmpDir := refToTempDir(t) - defer os.RemoveAll(tmpDir) - - assert.Equal(t, tmpDir, ref.PolicyConfigurationIdentity()) - // A non-canonical path. Test just one, the various other cases are - // tested in explicitfilepath.ResolvePathToFullyExplicit. - ref, err := NewReference(tmpDir + "/.") - require.NoError(t, err) - assert.Equal(t, tmpDir, ref.PolicyConfigurationIdentity()) - - // "/" as a corner case. - ref, err = NewReference("/") - require.NoError(t, err) - assert.Equal(t, "/", ref.PolicyConfigurationIdentity()) -} - -func TestReferencePolicyConfigurationNamespaces(t *testing.T) { - ref, tmpDir := refToTempDir(t) - defer os.RemoveAll(tmpDir) - // We don't really know enough to make a full equality test here. - ns := ref.PolicyConfigurationNamespaces() - require.NotNil(t, ns) - assert.NotEmpty(t, ns) - assert.Equal(t, filepath.Dir(tmpDir), ns[0]) - - // Test with a known path which should exist. Test just one non-canonical - // path, the various other cases are tested in explicitfilepath.ResolvePathToFullyExplicit. - // - // It would be nice to test a deeper hierarchy, but it is not obvious what - // deeper path is always available in the various distros, AND is not likely - // to contains a symbolic link. - for _, path := range []string{"/etc/skel", "/etc/skel/./."} { - _, err := os.Lstat(path) - require.NoError(t, err) - ref, err := NewReference(path) - require.NoError(t, err) - ns := ref.PolicyConfigurationNamespaces() - require.NotNil(t, ns) - assert.Equal(t, []string{"/etc"}, ns) - } - - // "/" as a corner case. - ref, err := NewReference("/") - require.NoError(t, err) - assert.Equal(t, []string{}, ref.PolicyConfigurationNamespaces()) -} - -func TestReferenceNewImage(t *testing.T) { - ref, tmpDir := refToTempDir(t) - defer os.RemoveAll(tmpDir) - - dest, err := ref.NewImageDestination(nil) - require.NoError(t, err) - defer dest.Close() - mFixture, err := ioutil.ReadFile("../manifest/fixtures/v2s1.manifest.json") - require.NoError(t, err) - err = dest.PutManifest(mFixture) - assert.NoError(t, err) - err = dest.Commit() - assert.NoError(t, err) - - img, err := ref.NewImage(nil) - assert.NoError(t, err) - defer img.Close() -} - -func TestReferenceNewImageNoValidManifest(t *testing.T) { - ref, tmpDir := refToTempDir(t) - defer os.RemoveAll(tmpDir) - - dest, err := ref.NewImageDestination(nil) - require.NoError(t, err) - defer dest.Close() - err = dest.PutManifest([]byte(`{"schemaVersion":1}`)) - assert.NoError(t, err) - err = dest.Commit() - assert.NoError(t, err) - - _, err = ref.NewImage(nil) - assert.Error(t, err) -} - -func TestReferenceNewImageSource(t *testing.T) { - ref, tmpDir := refToTempDir(t) - defer os.RemoveAll(tmpDir) - src, err := ref.NewImageSource(nil, nil) - assert.NoError(t, err) - defer src.Close() -} - -func TestReferenceNewImageDestination(t *testing.T) { - ref, tmpDir := refToTempDir(t) - defer os.RemoveAll(tmpDir) - dest, err := ref.NewImageDestination(nil) - assert.NoError(t, err) - defer dest.Close() -} - -func TestReferenceDeleteImage(t *testing.T) { - ref, tmpDir := refToTempDir(t) - defer os.RemoveAll(tmpDir) - err := ref.DeleteImage(nil) - assert.Error(t, err) -} - -func TestReferenceManifestPath(t *testing.T) { - ref, tmpDir := refToTempDir(t) - defer os.RemoveAll(tmpDir) - dirRef, ok := ref.(dirReference) - require.True(t, ok) - assert.Equal(t, tmpDir+"/manifest.json", dirRef.manifestPath()) -} - -func TestReferenceLayerPath(t *testing.T) { - const hex = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" - - ref, tmpDir := refToTempDir(t) - defer os.RemoveAll(tmpDir) - dirRef, ok := ref.(dirReference) - require.True(t, ok) - assert.Equal(t, tmpDir+"/"+hex+".tar", dirRef.layerPath("sha256:"+hex)) -} - -func TestReferenceSignaturePath(t *testing.T) { - ref, tmpDir := refToTempDir(t) - defer os.RemoveAll(tmpDir) - dirRef, ok := ref.(dirReference) - require.True(t, ok) - assert.Equal(t, tmpDir+"/signature-1", dirRef.signaturePath(0)) - assert.Equal(t, tmpDir+"/signature-10", dirRef.signaturePath(9)) -} diff --git a/vendor/github.com/containers/image/directory/explicitfilepath/path.go b/vendor/github.com/containers/image/directory/explicitfilepath/path.go deleted file mode 100644 index 71136b880897..000000000000 --- a/vendor/github.com/containers/image/directory/explicitfilepath/path.go +++ /dev/null @@ -1,56 +0,0 @@ -package explicitfilepath - -import ( - "os" - "path/filepath" - - "github.com/pkg/errors" -) - -// ResolvePathToFullyExplicit returns the input path converted to an absolute, no-symlinks, cleaned up path. -// To do so, all elements of the input path must exist; as a special case, the final component may be -// a non-existent name (but not a symlink pointing to a non-existent name) -// This is intended as a a helper for implementations of types.ImageReference.PolicyConfigurationIdentity etc. -func ResolvePathToFullyExplicit(path string) (string, error) { - switch _, err := os.Lstat(path); { - case err == nil: - return resolveExistingPathToFullyExplicit(path) - case os.IsNotExist(err): - parent, file := filepath.Split(path) - resolvedParent, err := resolveExistingPathToFullyExplicit(parent) - if err != nil { - return "", err - } - if file == "." || file == ".." { - // Coverage: This can happen, but very rarely: if we have successfully resolved the parent, both "." and ".." in it should have been resolved as well. - // This can still happen if there is a filesystem race condition, causing the Lstat() above to fail but the later resolution to succeed. - // We do not care to promise anything if such filesystem race conditions can happen, but we definitely don't want to return "."/".." components - // in the resulting path, and especially not at the end. - return "", errors.Errorf("Unexpectedly missing special filename component in %s", path) - } - resolvedPath := filepath.Join(resolvedParent, file) - // As a sanity check, ensure that there are no "." or ".." components. - cleanedResolvedPath := filepath.Clean(resolvedPath) - if cleanedResolvedPath != resolvedPath { - // Coverage: This should never happen. - return "", errors.Errorf("Internal inconsistency: Path %s resolved to %s still cleaned up to %s", path, resolvedPath, cleanedResolvedPath) - } - return resolvedPath, nil - default: // err != nil, unrecognized - return "", err - } -} - -// resolveExistingPathToFullyExplicit is the same as ResolvePathToFullyExplicit, -// but without the special case for missing final component. -func resolveExistingPathToFullyExplicit(path string) (string, error) { - resolved, err := filepath.Abs(path) - if err != nil { - return "", err // Coverage: This can fail only if os.Getwd() fails. - } - resolved, err = filepath.EvalSymlinks(resolved) - if err != nil { - return "", err - } - return filepath.Clean(resolved), nil -} diff --git a/vendor/github.com/containers/image/directory/explicitfilepath/path_test.go b/vendor/github.com/containers/image/directory/explicitfilepath/path_test.go deleted file mode 100644 index e45baf058184..000000000000 --- a/vendor/github.com/containers/image/directory/explicitfilepath/path_test.go +++ /dev/null @@ -1,173 +0,0 @@ -package explicitfilepath - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -type pathResolvingTestCase struct { - setup func(*testing.T, string) string - expected string -} - -var testCases = []pathResolvingTestCase{ - { // A straightforward subdirectory hierarchy - func(t *testing.T, top string) string { - err := os.MkdirAll(filepath.Join(top, "dir1/dir2/dir3"), 0755) - require.NoError(t, err) - return "dir1/dir2/dir3" - }, - "dir1/dir2/dir3", - }, - { // Missing component - func(t *testing.T, top string) string { - return "thisismissing/dir2" - }, - "", - }, - { // Symlink on the path - func(t *testing.T, top string) string { - err := os.MkdirAll(filepath.Join(top, "dir1/dir2"), 0755) - require.NoError(t, err) - err = os.Symlink("dir1", filepath.Join(top, "link1")) - require.NoError(t, err) - return "link1/dir2" - }, - "dir1/dir2", - }, - { // Trailing symlink - func(t *testing.T, top string) string { - err := os.MkdirAll(filepath.Join(top, "dir1/dir2"), 0755) - require.NoError(t, err) - err = os.Symlink("dir2", filepath.Join(top, "dir1/link2")) - require.NoError(t, err) - return "dir1/link2" - }, - "dir1/dir2", - }, - { // Symlink pointing nowhere, as a non-final component - func(t *testing.T, top string) string { - err := os.Symlink("thisismissing", filepath.Join(top, "link1")) - require.NoError(t, err) - return "link1/dir2" - }, - "", - }, - { // Trailing symlink pointing nowhere (but note that a missing non-symlink would be accepted) - func(t *testing.T, top string) string { - err := os.Symlink("thisismissing", filepath.Join(top, "link1")) - require.NoError(t, err) - return "link1" - }, - "", - }, - { // Relative components in a path - func(t *testing.T, top string) string { - err := os.MkdirAll(filepath.Join(top, "dir1/dir2/dir3"), 0755) - require.NoError(t, err) - return "dir1/./dir2/../dir2/dir3" - }, - "dir1/dir2/dir3", - }, - { // Trailing relative components - func(t *testing.T, top string) string { - err := os.MkdirAll(filepath.Join(top, "dir1/dir2"), 0755) - require.NoError(t, err) - return "dir1/dir2/.." - }, - "dir1", - }, - { // Relative components in symlink - func(t *testing.T, top string) string { - err := os.MkdirAll(filepath.Join(top, "dir1/dir2"), 0755) - require.NoError(t, err) - err = os.Symlink("../dir1/dir2", filepath.Join(top, "dir1/link2")) - require.NoError(t, err) - return "dir1/link2" - }, - "dir1/dir2", - }, - { // Relative component pointing "into" a symlink - func(t *testing.T, top string) string { - err := os.MkdirAll(filepath.Join(top, "dir1/dir2/dir3"), 0755) - require.NoError(t, err) - err = os.Symlink("dir3", filepath.Join(top, "dir1/dir2/link3")) - require.NoError(t, err) - return "dir1/dir2/link3/../.." - }, - "dir1", - }, - { // Unreadable directory - func(t *testing.T, top string) string { - err := os.MkdirAll(filepath.Join(top, "unreadable/dir2"), 0755) - require.NoError(t, err) - err = os.Chmod(filepath.Join(top, "unreadable"), 000) - require.NoError(t, err) - return "unreadable/dir2" - }, - "", - }, -} - -func testPathsAreSameFile(t *testing.T, path1, path2, description string) { - fi1, err := os.Stat(path1) - require.NoError(t, err) - fi2, err := os.Stat(path2) - require.NoError(t, err) - assert.True(t, os.SameFile(fi1, fi2), description) -} - -func runPathResolvingTestCase(t *testing.T, f func(string) (string, error), c pathResolvingTestCase, suffix string) { - topDir, err := ioutil.TempDir("", "pathResolving") - defer func() { - // Clean up after the "Unreadable directory" case; os.RemoveAll just fails. - _ = os.Chmod(filepath.Join(topDir, "unreadable"), 0755) // Ignore errors, especially if this does not exist. - os.RemoveAll(topDir) - }() - - input := c.setup(t, topDir) + suffix // Do not call filepath.Join() on input, it calls filepath.Clean() internally! - description := fmt.Sprintf("%s vs. %s%s", input, c.expected, suffix) - - fullOutput, err := ResolvePathToFullyExplicit(topDir + "/" + input) - if c.expected == "" { - assert.Error(t, err, description) - } else { - require.NoError(t, err, input) - fullExpected := topDir + "/" + c.expected + suffix - assert.Equal(t, fullExpected, fullOutput) - - // Either the two paths resolve to the same existing file, or to the same name in the same existing parent. - if _, err := os.Lstat(fullExpected); err == nil { - testPathsAreSameFile(t, fullOutput, fullExpected, description) - } else { - require.True(t, os.IsNotExist(err)) - _, err := os.Stat(fullOutput) - require.Error(t, err) - require.True(t, os.IsNotExist(err)) - - parentExpected, fileExpected := filepath.Split(fullExpected) - parentOutput, fileOutput := filepath.Split(fullOutput) - assert.Equal(t, fileExpected, fileOutput) - testPathsAreSameFile(t, parentOutput, parentExpected, description) - } - } -} - -func TestResolvePathToFullyExplicit(t *testing.T) { - for _, c := range testCases { - runPathResolvingTestCase(t, ResolvePathToFullyExplicit, c, "") - runPathResolvingTestCase(t, ResolvePathToFullyExplicit, c, "/trailing") - } -} - -func TestResolveExistingPathToFullyExplicit(t *testing.T) { - for _, c := range testCases { - runPathResolvingTestCase(t, resolveExistingPathToFullyExplicit, c, "") - } -} diff --git a/vendor/github.com/containers/image/doc.go b/vendor/github.com/containers/image/doc.go deleted file mode 100644 index 253a0835718d..000000000000 --- a/vendor/github.com/containers/image/doc.go +++ /dev/null @@ -1,29 +0,0 @@ -// Package image provides libraries and commands to interact with containers images. -// -// package main -// -// import ( -// "fmt" -// -// "github.com/containers/image/docker" -// ) -// -// func main() { -// ref, err := docker.ParseReference("//fedora") -// if err != nil { -// panic(err) -// } -// img, err := ref.NewImage(nil) -// if err != nil { -// panic(err) -// } -// defer img.Close() -// b, _, err := img.Manifest() -// if err != nil { -// panic(err) -// } -// fmt.Printf("%s", string(b)) -// } -// -// TODO(runcom) -package image diff --git a/vendor/github.com/containers/image/docker/archive/dest.go b/vendor/github.com/containers/image/docker/archive/dest.go deleted file mode 100644 index 9fc85bd85b1b..000000000000 --- a/vendor/github.com/containers/image/docker/archive/dest.go +++ /dev/null @@ -1,66 +0,0 @@ -package archive - -import ( - "io" - "os" - - "github.com/containers/image/docker/tarfile" - "github.com/containers/image/types" - "github.com/pkg/errors" -) - -type archiveImageDestination struct { - *tarfile.Destination // Implements most of types.ImageDestination - ref archiveReference - writer io.Closer -} - -func newImageDestination(ctx *types.SystemContext, ref archiveReference) (types.ImageDestination, error) { - if ref.destinationRef == nil { - return nil, errors.Errorf("docker-archive: destination reference not supplied (must be of form :)") - } - - // ref.path can be either a pipe or a regular file - // in the case of a pipe, we require that we can open it for write - // in the case of a regular file, we don't want to overwrite any pre-existing file - // so we check for Size() == 0 below (This is racy, but using O_EXCL would also be racy, - // only in a different way. Either way, it’s up to the user to not have two writers to the same path.) - fh, err := os.OpenFile(ref.path, os.O_WRONLY|os.O_CREATE, 0644) - if err != nil { - return nil, errors.Wrapf(err, "error opening file %q", ref.path) - } - - fhStat, err := fh.Stat() - if err != nil { - return nil, errors.Wrapf(err, "error statting file %q", ref.path) - } - - if fhStat.Mode().IsRegular() && fhStat.Size() != 0 { - return nil, errors.New("docker-archive doesn't support modifying existing images") - } - - return &archiveImageDestination{ - Destination: tarfile.NewDestination(fh, ref.destinationRef), - ref: ref, - writer: fh, - }, nil -} - -// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, -// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. -func (d *archiveImageDestination) Reference() types.ImageReference { - return d.ref -} - -// Close removes resources associated with an initialized ImageDestination, if any. -func (d *archiveImageDestination) Close() error { - return d.writer.Close() -} - -// Commit marks the process of storing the image as successful and asks for the image to be persisted. -// WARNING: This does not have any transactional semantics: -// - Uploaded data MAY be visible to others before Commit() is called -// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) -func (d *archiveImageDestination) Commit() error { - return d.Destination.Commit() -} diff --git a/vendor/github.com/containers/image/docker/archive/fixtures/almostempty.tar b/vendor/github.com/containers/image/docker/archive/fixtures/almostempty.tar deleted file mode 100644 index ac37c1e4d649..000000000000 Binary files a/vendor/github.com/containers/image/docker/archive/fixtures/almostempty.tar and /dev/null differ diff --git a/vendor/github.com/containers/image/docker/archive/src.go b/vendor/github.com/containers/image/docker/archive/src.go deleted file mode 100644 index aebcaa82abc3..000000000000 --- a/vendor/github.com/containers/image/docker/archive/src.go +++ /dev/null @@ -1,36 +0,0 @@ -package archive - -import ( - "github.com/containers/image/docker/tarfile" - "github.com/containers/image/types" - "github.com/sirupsen/logrus" -) - -type archiveImageSource struct { - *tarfile.Source // Implements most of types.ImageSource - ref archiveReference -} - -// newImageSource returns a types.ImageSource for the specified image reference. -// The caller must call .Close() on the returned ImageSource. -func newImageSource(ctx *types.SystemContext, ref archiveReference) types.ImageSource { - if ref.destinationRef != nil { - logrus.Warnf("docker-archive: references are not supported for sources (ignoring)") - } - src := tarfile.NewSource(ref.path) - return &archiveImageSource{ - Source: src, - ref: ref, - } -} - -// Reference returns the reference used to set up this source, _as specified by the user_ -// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. -func (s *archiveImageSource) Reference() types.ImageReference { - return s.ref -} - -// Close removes resources associated with an initialized ImageSource, if any. -func (s *archiveImageSource) Close() error { - return nil -} diff --git a/vendor/github.com/containers/image/docker/archive/transport.go b/vendor/github.com/containers/image/docker/archive/transport.go deleted file mode 100644 index 59c68c3beb2a..000000000000 --- a/vendor/github.com/containers/image/docker/archive/transport.go +++ /dev/null @@ -1,155 +0,0 @@ -package archive - -import ( - "fmt" - "strings" - - "github.com/containers/image/docker/reference" - ctrImage "github.com/containers/image/image" - "github.com/containers/image/transports" - "github.com/containers/image/types" - "github.com/pkg/errors" -) - -func init() { - transports.Register(Transport) -} - -// Transport is an ImageTransport for local Docker archives. -var Transport = archiveTransport{} - -type archiveTransport struct{} - -func (t archiveTransport) Name() string { - return "docker-archive" -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. -func (t archiveTransport) ParseReference(reference string) (types.ImageReference, error) { - return ParseReference(reference) -} - -// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys -// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). -// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. -// scope passed to this function will not be "", that value is always allowed. -func (t archiveTransport) ValidatePolicyConfigurationScope(scope string) error { - // See the explanation in archiveReference.PolicyConfigurationIdentity. - return errors.New(`docker-archive: does not support any scopes except the default "" one`) -} - -// archiveReference is an ImageReference for Docker images. -type archiveReference struct { - destinationRef reference.NamedTagged // only used for destinations - path string -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference. -func ParseReference(refString string) (types.ImageReference, error) { - if refString == "" { - return nil, errors.Errorf("docker-archive reference %s isn't of the form [:]", refString) - } - - parts := strings.SplitN(refString, ":", 2) - path := parts[0] - var destinationRef reference.NamedTagged - - // A :tag was specified, which is only necessary for destinations. - if len(parts) == 2 { - ref, err := reference.ParseNormalizedNamed(parts[1]) - if err != nil { - return nil, errors.Wrapf(err, "docker-archive parsing reference") - } - ref = reference.TagNameOnly(ref) - - if _, isDigest := ref.(reference.Canonical); isDigest { - return nil, errors.Errorf("docker-archive doesn't support digest references: %s", refString) - } - - refTagged, isTagged := ref.(reference.NamedTagged) - if !isTagged { - // Really shouldn't be hit... - return nil, errors.Errorf("internal error: reference is not tagged even after reference.TagNameOnly: %s", refString) - } - destinationRef = refTagged - } - - return archiveReference{ - destinationRef: destinationRef, - path: path, - }, nil -} - -func (ref archiveReference) Transport() types.ImageTransport { - return Transport -} - -// StringWithinTransport returns a string representation of the reference, which MUST be such that -// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. -// NOTE: The returned string is not promised to be equal to the original input to ParseReference; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. -// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. -func (ref archiveReference) StringWithinTransport() string { - if ref.destinationRef == nil { - return ref.path - } - return fmt.Sprintf("%s:%s", ref.path, ref.destinationRef.String()) -} - -// DockerReference returns a Docker reference associated with this reference -// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, -// not e.g. after redirect or alias processing), or nil if unknown/not applicable. -func (ref archiveReference) DockerReference() reference.Named { - return ref.destinationRef -} - -// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. -// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; -// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical -// (i.e. various references with exactly the same semantics should return the same configuration identity) -// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but -// not required/guaranteed that it will be a valid input to Transport().ParseReference(). -// Returns "" if configuration identities for these references are not supported. -func (ref archiveReference) PolicyConfigurationIdentity() string { - // Punt, the justification is similar to dockerReference.PolicyConfigurationIdentity. - return "" -} - -// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search -// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed -// in order, terminating on first match, and an implicit "" is always checked at the end. -// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), -// and each following element to be a prefix of the element preceding it. -func (ref archiveReference) PolicyConfigurationNamespaces() []string { - // TODO - return []string{} -} - -// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned Image. -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -func (ref archiveReference) NewImage(ctx *types.SystemContext) (types.Image, error) { - src := newImageSource(ctx, ref) - return ctrImage.FromSource(src) -} - -// NewImageSource returns a types.ImageSource for this reference, -// asking the backend to use a manifest from requestedManifestMIMETypes if possible. -// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes. -// The caller must call .Close() on the returned ImageSource. -func (ref archiveReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) { - return newImageSource(ctx, ref), nil -} - -// NewImageDestination returns a types.ImageDestination for this reference. -// The caller must call .Close() on the returned ImageDestination. -func (ref archiveReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) { - return newImageDestination(ctx, ref) -} - -// DeleteImage deletes the named image from the registry, if supported. -func (ref archiveReference) DeleteImage(ctx *types.SystemContext) error { - // Not really supported, for safety reasons. - return errors.New("Deleting images not implemented for docker-archive: images") -} diff --git a/vendor/github.com/containers/image/docker/archive/transport_test.go b/vendor/github.com/containers/image/docker/archive/transport_test.go deleted file mode 100644 index 689a512cff22..000000000000 --- a/vendor/github.com/containers/image/docker/archive/transport_test.go +++ /dev/null @@ -1,198 +0,0 @@ -package archive - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "testing" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/types" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -const ( - sha256digestHex = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" - sha256digest = "@sha256:" + sha256digestHex - tarFixture = "fixtures/almostempty.tar" -) - -func TestTransportName(t *testing.T) { - assert.Equal(t, "docker-archive", Transport.Name()) -} - -func TestTransportParseReference(t *testing.T) { - testParseReference(t, Transport.ParseReference) -} - -func TestTransportValidatePolicyConfigurationScope(t *testing.T) { - for _, scope := range []string{ // A semi-representative assortment of values; everything is rejected. - "docker.io/library/busybox:notlatest", - "docker.io/library/busybox", - "docker.io/library", - "docker.io", - "", - } { - err := Transport.ValidatePolicyConfigurationScope(scope) - assert.Error(t, err, scope) - } -} - -func TestParseReference(t *testing.T) { - testParseReference(t, ParseReference) -} - -// testParseReference is a test shared for Transport.ParseReference and ParseReference. -func testParseReference(t *testing.T, fn func(string) (types.ImageReference, error)) { - for _, c := range []struct{ input, expectedPath, expectedRef string }{ - {"", "", ""}, // Empty input is explicitly rejected - {"/path", "/path", ""}, - {"/path:busybox:notlatest", "/path", "docker.io/library/busybox:notlatest"}, // Explicit tag - {"/path:busybox" + sha256digest, "", ""}, // Digest references are forbidden - {"/path:busybox", "/path", "docker.io/library/busybox:latest"}, // Default tag - // A github.com/distribution/reference value can have a tag and a digest at the same time! - {"/path:busybox:latest" + sha256digest, "", ""}, // Both tag and digest is rejected - {"/path:docker.io/library/busybox:latest", "/path", "docker.io/library/busybox:latest"}, // All implied values explicitly specified - {"/path:UPPERCASEISINVALID", "", ""}, // Invalid input - } { - ref, err := fn(c.input) - if c.expectedPath == "" { - assert.Error(t, err, c.input) - } else { - require.NoError(t, err, c.input) - archiveRef, ok := ref.(archiveReference) - require.True(t, ok, c.input) - assert.Equal(t, c.expectedPath, archiveRef.path, c.input) - if c.expectedRef == "" { - assert.Nil(t, archiveRef.destinationRef, c.input) - } else { - require.NotNil(t, archiveRef.destinationRef, c.input) - assert.Equal(t, c.expectedRef, archiveRef.destinationRef.String(), c.input) - } - } - } -} - -// refWithTagAndDigest is a reference.NamedTagged and reference.Canonical at the same time. -type refWithTagAndDigest struct{ reference.Canonical } - -func (ref refWithTagAndDigest) Tag() string { - return "notLatest" -} - -// A common list of reference formats to test for the various ImageReference methods. -var validReferenceTestCases = []struct{ input, dockerRef, stringWithinTransport string }{ - {"/pathonly", "", "/pathonly"}, - {"/path:busybox:notlatest", "docker.io/library/busybox:notlatest", "/path:docker.io/library/busybox:notlatest"}, // Explicit tag - {"/path:docker.io/library/busybox:latest", "docker.io/library/busybox:latest", "/path:docker.io/library/busybox:latest"}, // All implied values explicitly specified - {"/path:example.com/ns/foo:bar", "example.com/ns/foo:bar", "/path:example.com/ns/foo:bar"}, // All values explicitly specified -} - -func TestReferenceTransport(t *testing.T) { - ref, err := ParseReference("/tmp/archive.tar") - require.NoError(t, err) - assert.Equal(t, Transport, ref.Transport()) -} - -func TestReferenceStringWithinTransport(t *testing.T) { - for _, c := range validReferenceTestCases { - ref, err := ParseReference(c.input) - require.NoError(t, err, c.input) - stringRef := ref.StringWithinTransport() - assert.Equal(t, c.stringWithinTransport, stringRef, c.input) - // Do one more round to verify that the output can be parsed, to an equal value. - ref2, err := Transport.ParseReference(stringRef) - require.NoError(t, err, c.input) - stringRef2 := ref2.StringWithinTransport() - assert.Equal(t, stringRef, stringRef2, c.input) - } -} - -func TestReferenceDockerReference(t *testing.T) { - for _, c := range validReferenceTestCases { - ref, err := ParseReference(c.input) - require.NoError(t, err, c.input) - dockerRef := ref.DockerReference() - if c.dockerRef != "" { - require.NotNil(t, dockerRef, c.input) - assert.Equal(t, c.dockerRef, dockerRef.String(), c.input) - } else { - require.Nil(t, dockerRef, c.input) - } - } -} - -func TestReferencePolicyConfigurationIdentity(t *testing.T) { - for _, c := range validReferenceTestCases { - ref, err := ParseReference(c.input) - require.NoError(t, err, c.input) - assert.Equal(t, "", ref.PolicyConfigurationIdentity(), c.input) - } -} - -func TestReferencePolicyConfigurationNamespaces(t *testing.T) { - for _, c := range validReferenceTestCases { - ref, err := ParseReference(c.input) - require.NoError(t, err, c.input) - assert.Empty(t, "", ref.PolicyConfigurationNamespaces(), c.input) - } -} - -func TestReferenceNewImage(t *testing.T) { - for _, suffix := range []string{"", ":thisisignoredbutaccepted"} { - ref, err := ParseReference(tarFixture + suffix) - require.NoError(t, err, suffix) - img, err := ref.NewImage(nil) - assert.NoError(t, err, suffix) - defer img.Close() - } -} - -func TestReferenceNewImageSource(t *testing.T) { - for _, suffix := range []string{"", ":thisisignoredbutaccepted"} { - ref, err := ParseReference(tarFixture + suffix) - require.NoError(t, err, suffix) - src, err := ref.NewImageSource(nil, nil) - assert.NoError(t, err, suffix) - defer src.Close() - } -} - -func TestReferenceNewImageDestination(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "docker-archive-test") - require.NoError(t, err) - defer os.RemoveAll(tmpDir) - - ref, err := ParseReference(filepath.Join(tmpDir, "no-reference")) - require.NoError(t, err) - dest, err := ref.NewImageDestination(nil) - assert.Error(t, err) - - ref, err = ParseReference(filepath.Join(tmpDir, "with-reference") + "busybox:latest") - require.NoError(t, err) - dest, err = ref.NewImageDestination(nil) - assert.NoError(t, err) - defer dest.Close() -} - -func TestReferenceDeleteImage(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "docker-archive-test") - require.NoError(t, err) - defer os.RemoveAll(tmpDir) - - for i, suffix := range []string{"", ":thisisignoredbutaccepted"} { - testFile := filepath.Join(tmpDir, fmt.Sprintf("file%d.tar", i)) - err := ioutil.WriteFile(testFile, []byte("nonempty"), 0644) - require.NoError(t, err, suffix) - - ref, err := ParseReference(testFile + suffix) - require.NoError(t, err, suffix) - err = ref.DeleteImage(nil) - assert.Error(t, err, suffix) - - _, err = os.Lstat(testFile) - assert.NoError(t, err, suffix) - } -} diff --git a/vendor/github.com/containers/image/docker/daemon/daemon_dest.go b/vendor/github.com/containers/image/docker/daemon/daemon_dest.go deleted file mode 100644 index 559e5c71df28..000000000000 --- a/vendor/github.com/containers/image/docker/daemon/daemon_dest.go +++ /dev/null @@ -1,128 +0,0 @@ -package daemon - -import ( - "io" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/docker/tarfile" - "github.com/containers/image/types" - "github.com/docker/docker/client" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/net/context" -) - -type daemonImageDestination struct { - ref daemonReference - *tarfile.Destination // Implements most of types.ImageDestination - // For talking to imageLoadGoroutine - goroutineCancel context.CancelFunc - statusChannel <-chan error - writer *io.PipeWriter - // Other state - committed bool // writer has been closed -} - -// newImageDestination returns a types.ImageDestination for the specified image reference. -func newImageDestination(systemCtx *types.SystemContext, ref daemonReference) (types.ImageDestination, error) { - if ref.ref == nil { - return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport()) - } - namedTaggedRef, ok := ref.ref.(reference.NamedTagged) - if !ok { - return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport()) - } - - c, err := client.NewClient(client.DefaultDockerHost, "1.22", nil, nil) // FIXME: overridable host - if err != nil { - return nil, errors.Wrap(err, "Error initializing docker engine client") - } - - reader, writer := io.Pipe() - // Commit() may never be called, so we may never read from this channel; so, make this buffered to allow imageLoadGoroutine to write status and terminate even if we never read it. - statusChannel := make(chan error, 1) - - ctx, goroutineCancel := context.WithCancel(context.Background()) - go imageLoadGoroutine(ctx, c, reader, statusChannel) - - return &daemonImageDestination{ - ref: ref, - Destination: tarfile.NewDestination(writer, namedTaggedRef), - goroutineCancel: goroutineCancel, - statusChannel: statusChannel, - writer: writer, - committed: false, - }, nil -} - -// imageLoadGoroutine accepts tar stream on reader, sends it to c, and reports error or success by writing to statusChannel -func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeReader, statusChannel chan<- error) { - err := errors.New("Internal error: unexpected panic in imageLoadGoroutine") - defer func() { - logrus.Debugf("docker-daemon: sending done, status %v", err) - statusChannel <- err - }() - defer func() { - if err == nil { - reader.Close() - } else { - reader.CloseWithError(err) - } - }() - - resp, err := c.ImageLoad(ctx, reader, true) - if err != nil { - err = errors.Wrap(err, "Error saving image to docker engine") - return - } - defer resp.Body.Close() -} - -// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. -func (d *daemonImageDestination) MustMatchRuntimeOS() bool { - return true -} - -// Close removes resources associated with an initialized ImageDestination, if any. -func (d *daemonImageDestination) Close() error { - if !d.committed { - logrus.Debugf("docker-daemon: Closing tar stream to abort loading") - // In principle, goroutineCancel() should abort the HTTP request and stop the process from continuing. - // In practice, though, various HTTP implementations used by client.Client.ImageLoad() (including - // https://github.com/golang/net/blob/master/context/ctxhttp/ctxhttp_pre17.go and the - // net/http version with native Context support in Go 1.7) do not always actually immediately cancel - // the operation: they may process the HTTP request, or a part of it, to completion in a goroutine, and - // return early if the context is canceled without terminating the goroutine at all. - // So we need this CloseWithError to terminate sending the HTTP request Body - // immediately, and hopefully, through terminating the sending which uses "Transfer-Encoding: chunked"" without sending - // the terminating zero-length chunk, prevent the docker daemon from processing the tar stream at all. - // Whether that works or not, closing the PipeWriter seems desirable in any case. - d.writer.CloseWithError(errors.New("Aborting upload, daemonImageDestination closed without a previous .Commit()")) - } - d.goroutineCancel() - - return nil -} - -func (d *daemonImageDestination) Reference() types.ImageReference { - return d.ref -} - -// Commit marks the process of storing the image as successful and asks for the image to be persisted. -// WARNING: This does not have any transactional semantics: -// - Uploaded data MAY be visible to others before Commit() is called -// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) -func (d *daemonImageDestination) Commit() error { - logrus.Debugf("docker-daemon: Closing tar stream") - if err := d.Destination.Commit(); err != nil { - return err - } - if err := d.writer.Close(); err != nil { - return err - } - d.committed = true // We may still fail, but we are done sending to imageLoadGoroutine. - - logrus.Debugf("docker-daemon: Waiting for status") - err := <-d.statusChannel - return err -} diff --git a/vendor/github.com/containers/image/docker/daemon/daemon_src.go b/vendor/github.com/containers/image/docker/daemon/daemon_src.go deleted file mode 100644 index 644dbeecde20..000000000000 --- a/vendor/github.com/containers/image/docker/daemon/daemon_src.go +++ /dev/null @@ -1,85 +0,0 @@ -package daemon - -import ( - "io" - "io/ioutil" - "os" - - "github.com/containers/image/docker/tarfile" - "github.com/containers/image/types" - "github.com/docker/docker/client" - "github.com/pkg/errors" - "golang.org/x/net/context" -) - -const temporaryDirectoryForBigFiles = "/var/tmp" // Do not use the system default of os.TempDir(), usually /tmp, because with systemd it could be a tmpfs. - -type daemonImageSource struct { - ref daemonReference - *tarfile.Source // Implements most of types.ImageSource - tarCopyPath string -} - -type layerInfo struct { - path string - size int64 -} - -// newImageSource returns a types.ImageSource for the specified image reference. -// The caller must call .Close() on the returned ImageSource. -// -// It would be great if we were able to stream the input tar as it is being -// sent; but Docker sends the top-level manifest, which determines which paths -// to look for, at the end, so in we will need to seek back and re-read, several times. -// (We could, perhaps, expect an exact sequence, assume that the first plaintext file -// is the config, and that the following len(RootFS) files are the layers, but that feels -// way too brittle.) -func newImageSource(ctx *types.SystemContext, ref daemonReference) (types.ImageSource, error) { - c, err := client.NewClient(client.DefaultDockerHost, "1.22", nil, nil) // FIXME: overridable host - if err != nil { - return nil, errors.Wrap(err, "Error initializing docker engine client") - } - // Per NewReference(), ref.StringWithinTransport() is either an image ID (config digest), or a !reference.NameOnly() reference. - // Either way ImageSave should create a tarball with exactly one image. - inputStream, err := c.ImageSave(context.TODO(), []string{ref.StringWithinTransport()}) - if err != nil { - return nil, errors.Wrap(err, "Error loading image from docker engine") - } - defer inputStream.Close() - - // FIXME: use SystemContext here. - tarCopyFile, err := ioutil.TempFile(temporaryDirectoryForBigFiles, "docker-daemon-tar") - if err != nil { - return nil, err - } - defer tarCopyFile.Close() - - succeeded := false - defer func() { - if !succeeded { - os.Remove(tarCopyFile.Name()) - } - }() - - if _, err := io.Copy(tarCopyFile, inputStream); err != nil { - return nil, err - } - - succeeded = true - return &daemonImageSource{ - ref: ref, - Source: tarfile.NewSource(tarCopyFile.Name()), - tarCopyPath: tarCopyFile.Name(), - }, nil -} - -// Reference returns the reference used to set up this source, _as specified by the user_ -// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. -func (s *daemonImageSource) Reference() types.ImageReference { - return s.ref -} - -// Close removes resources associated with an initialized ImageSource, if any. -func (s *daemonImageSource) Close() error { - return os.Remove(s.tarCopyPath) -} diff --git a/vendor/github.com/containers/image/docker/daemon/daemon_transport.go b/vendor/github.com/containers/image/docker/daemon/daemon_transport.go deleted file mode 100644 index 41ccd1f19701..000000000000 --- a/vendor/github.com/containers/image/docker/daemon/daemon_transport.go +++ /dev/null @@ -1,184 +0,0 @@ -package daemon - -import ( - "github.com/pkg/errors" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/image" - "github.com/containers/image/transports" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" -) - -func init() { - transports.Register(Transport) -} - -// Transport is an ImageTransport for images managed by a local Docker daemon. -var Transport = daemonTransport{} - -type daemonTransport struct{} - -// Name returns the name of the transport, which must be unique among other transports. -func (t daemonTransport) Name() string { - return "docker-daemon" -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. -func (t daemonTransport) ParseReference(reference string) (types.ImageReference, error) { - return ParseReference(reference) -} - -// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys -// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). -// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. -// scope passed to this function will not be "", that value is always allowed. -func (t daemonTransport) ValidatePolicyConfigurationScope(scope string) error { - // See the explanation in daemonReference.PolicyConfigurationIdentity. - return errors.New(`docker-daemon: does not support any scopes except the default "" one`) -} - -// daemonReference is an ImageReference for images managed by a local Docker daemon -// Exactly one of id and ref can be set. -// For daemonImageSource, both id and ref are acceptable, ref must not be a NameOnly (interpreted as all tags in that repository by the daemon) -// For daemonImageDestination, it must be a ref, which is NamedTagged. -// (We could, in principle, also allow storing images without tagging them, and the user would have to refer to them using the docker image ID = config digest. -// Using the config digest requires the caller to parse the manifest themselves, which is very cumbersome; so, for now, we don’t bother.) -type daemonReference struct { - id digest.Digest - ref reference.Named // !reference.IsNameOnly -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. -func ParseReference(refString string) (types.ImageReference, error) { - // This is intended to be compatible with reference.ParseAnyReference, but more strict about refusing some of the ambiguous cases. - // In particular, this rejects unprefixed digest values (64 hex chars), and sha256 digest prefixes (sha256:fewer-than-64-hex-chars). - - // digest:hexstring is structurally the same as a reponame:tag (meaning docker.io/library/reponame:tag). - // reference.ParseAnyReference interprets such strings as digests. - if dgst, err := digest.Parse(refString); err == nil { - // The daemon explicitly refuses to tag images with a reponame equal to digest.Canonical - but _only_ this digest name. - // Other digest references are ambiguous, so refuse them. - if dgst.Algorithm() != digest.Canonical { - return nil, errors.Errorf("Invalid docker-daemon: reference %s: only digest algorithm %s accepted", refString, digest.Canonical) - } - return NewReference(dgst, nil) - } - - ref, err := reference.ParseNormalizedNamed(refString) // This also rejects unprefixed digest values - if err != nil { - return nil, err - } - if reference.FamiliarName(ref) == digest.Canonical.String() { - return nil, errors.Errorf("Invalid docker-daemon: reference %s: The %s repository name is reserved for (non-shortened) digest references", refString, digest.Canonical) - } - return NewReference("", ref) -} - -// NewReference returns a docker-daemon reference for either the supplied image ID (config digest) or the supplied reference (which must satisfy !reference.IsNameOnly) -func NewReference(id digest.Digest, ref reference.Named) (types.ImageReference, error) { - if id != "" && ref != nil { - return nil, errors.New("docker-daemon: reference must not have an image ID and a reference string specified at the same time") - } - if ref != nil { - if reference.IsNameOnly(ref) { - return nil, errors.Errorf("docker-daemon: reference %s has neither a tag nor a digest", reference.FamiliarString(ref)) - } - // A github.com/distribution/reference value can have a tag and a digest at the same time! - // Most versions of docker/reference do not handle that (ignoring the tag), so reject such input. - // This MAY be accepted in the future. - _, isTagged := ref.(reference.NamedTagged) - _, isDigested := ref.(reference.Canonical) - if isTagged && isDigested { - return nil, errors.Errorf("docker-daemon: references with both a tag and digest are currently not supported") - } - } - return daemonReference{ - id: id, - ref: ref, - }, nil -} - -func (ref daemonReference) Transport() types.ImageTransport { - return Transport -} - -// StringWithinTransport returns a string representation of the reference, which MUST be such that -// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. -// NOTE: The returned string is not promised to be equal to the original input to ParseReference; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. -// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix; -// instead, see transports.ImageName(). -func (ref daemonReference) StringWithinTransport() string { - switch { - case ref.id != "": - return ref.id.String() - case ref.ref != nil: - return reference.FamiliarString(ref.ref) - default: // Coverage: Should never happen, NewReference above should refuse such values. - panic("Internal inconsistency: daemonReference has empty id and nil ref") - } -} - -// DockerReference returns a Docker reference associated with this reference -// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, -// not e.g. after redirect or alias processing), or nil if unknown/not applicable. -func (ref daemonReference) DockerReference() reference.Named { - return ref.ref // May be nil -} - -// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. -// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; -// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical -// (i.e. various references with exactly the same semantics should return the same configuration identity) -// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but -// not required/guaranteed that it will be a valid input to Transport().ParseReference(). -// Returns "" if configuration identities for these references are not supported. -func (ref daemonReference) PolicyConfigurationIdentity() string { - // We must allow referring to images in the daemon by image ID, otherwise untagged images would not be accessible. - // But the existence of image IDs means that we can’t truly well namespace the input; the untagged images would have to fall into the default policy, - // which can be unexpected. So, punt. - return "" // This still allows using the default "" scope to define a policy for this transport. -} - -// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search -// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed -// in order, terminating on first match, and an implicit "" is always checked at the end. -// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), -// and each following element to be a prefix of the element preceding it. -func (ref daemonReference) PolicyConfigurationNamespaces() []string { - // See the explanation in daemonReference.PolicyConfigurationIdentity. - return []string{} -} - -// NewImage returns a types.Image for this reference. -// The caller must call .Close() on the returned Image. -func (ref daemonReference) NewImage(ctx *types.SystemContext) (types.Image, error) { - src, err := newImageSource(ctx, ref) - if err != nil { - return nil, err - } - return image.FromSource(src) -} - -// NewImageSource returns a types.ImageSource for this reference, -// asking the backend to use a manifest from requestedManifestMIMETypes if possible. -// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes. -// The caller must call .Close() on the returned ImageSource. -func (ref daemonReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) { - return newImageSource(ctx, ref) -} - -// NewImageDestination returns a types.ImageDestination for this reference. -// The caller must call .Close() on the returned ImageDestination. -func (ref daemonReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) { - return newImageDestination(ctx, ref) -} - -// DeleteImage deletes the named image from the registry, if supported. -func (ref daemonReference) DeleteImage(ctx *types.SystemContext) error { - // Should this just untag the image? Should this stop running containers? - // The semantics is not quite as clear as for remote repositories. - // The user can run (docker rmi) directly anyway, so, for now(?), punt instead of trying to guess what the user meant. - return errors.Errorf("Deleting images not implemented for docker-daemon: images") -} diff --git a/vendor/github.com/containers/image/docker/daemon/daemon_transport_test.go b/vendor/github.com/containers/image/docker/daemon/daemon_transport_test.go deleted file mode 100644 index 2a60c6b29c07..000000000000 --- a/vendor/github.com/containers/image/docker/daemon/daemon_transport_test.go +++ /dev/null @@ -1,228 +0,0 @@ -package daemon - -import ( - "testing" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -const ( - sha256digestHex = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" - sha256digest = "sha256:" + sha256digestHex -) - -func TestTransportName(t *testing.T) { - assert.Equal(t, "docker-daemon", Transport.Name()) -} - -func TestTransportParseReference(t *testing.T) { - testParseReference(t, Transport.ParseReference) -} - -func TestTransportValidatePolicyConfigurationScope(t *testing.T) { - for _, scope := range []string{ // A semi-representative assortment of values; everything is rejected. - sha256digestHex, - sha256digest, - "docker.io/library/busybox:latest", - "docker.io", - "", - } { - err := Transport.ValidatePolicyConfigurationScope(scope) - assert.Error(t, err, scope) - } -} - -func TestParseReference(t *testing.T) { - testParseReference(t, ParseReference) -} - -// testParseReference is a test shared for Transport.ParseReference and ParseReference. -func testParseReference(t *testing.T, fn func(string) (types.ImageReference, error)) { - for _, c := range []struct{ input, expectedID, expectedRef string }{ - {sha256digest, sha256digest, ""}, // Valid digest format - {"sha512:" + sha256digestHex + sha256digestHex, "", ""}, // Non-digest.Canonical digest - {"sha256:ab", "", ""}, // Invalid digest value (too short) - {sha256digest + "ab", "", ""}, // Invalid digest value (too long) - {"sha256:XX23456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef", "", ""}, // Invalid digest value - {"UPPERCASEISINVALID", "", ""}, // Invalid reference input - {"busybox", "", ""}, // Missing tag or digest - {"busybox:latest", "", "docker.io/library/busybox:latest"}, // Explicit tag - {"busybox@" + sha256digest, "", "docker.io/library/busybox@" + sha256digest}, // Explicit digest - // A github.com/distribution/reference value can have a tag and a digest at the same time! - // Most versions of docker/reference do not handle that (ignoring the tag), so we reject such input. - {"busybox:latest@" + sha256digest, "", ""}, // Both tag and digest - {"docker.io/library/busybox:latest", "", "docker.io/library/busybox:latest"}, // All implied values explicitly specified - } { - ref, err := fn(c.input) - if c.expectedID == "" && c.expectedRef == "" { - assert.Error(t, err, c.input) - } else { - require.NoError(t, err, c.input) - daemonRef, ok := ref.(daemonReference) - require.True(t, ok, c.input) - // If we don't reject the input, the interpretation must be consistent with reference.ParseAnyReference - dockerRef, err := reference.ParseAnyReference(c.input) - require.NoError(t, err, c.input) - - if c.expectedRef == "" { - assert.Equal(t, c.expectedID, daemonRef.id.String(), c.input) - assert.Nil(t, daemonRef.ref, c.input) - - _, ok := dockerRef.(reference.Digested) - require.True(t, ok, c.input) - assert.Equal(t, c.expectedID, dockerRef.String(), c.input) - } else { - assert.Equal(t, "", daemonRef.id.String(), c.input) - require.NotNil(t, daemonRef.ref, c.input) - assert.Equal(t, c.expectedRef, daemonRef.ref.String(), c.input) - - _, ok := dockerRef.(reference.Named) - require.True(t, ok, c.input) - assert.Equal(t, c.expectedRef, dockerRef.String(), c.input) - } - } - } -} - -// A common list of reference formats to test for the various ImageReference methods. -// (For IDs it is much simpler, we simply use them unmodified) -var validNamedReferenceTestCases = []struct{ input, dockerRef, stringWithinTransport string }{ - {"busybox:notlatest", "docker.io/library/busybox:notlatest", "busybox:notlatest"}, // Explicit tag - {"busybox" + sha256digest, "docker.io/library/busybox" + sha256digest, "busybox" + sha256digest}, // Explicit digest - {"docker.io/library/busybox:latest", "docker.io/library/busybox:latest", "busybox:latest"}, // All implied values explicitly specified - {"example.com/ns/foo:bar", "example.com/ns/foo:bar", "example.com/ns/foo:bar"}, // All values explicitly specified -} - -func TestNewReference(t *testing.T) { - // An ID reference. - id, err := digest.Parse(sha256digest) - require.NoError(t, err) - ref, err := NewReference(id, nil) - require.NoError(t, err) - daemonRef, ok := ref.(daemonReference) - require.True(t, ok) - assert.Equal(t, id, daemonRef.id) - assert.Nil(t, daemonRef.ref) - - // Named references - for _, c := range validNamedReferenceTestCases { - parsed, err := reference.ParseNormalizedNamed(c.input) - require.NoError(t, err) - ref, err := NewReference("", parsed) - require.NoError(t, err, c.input) - daemonRef, ok := ref.(daemonReference) - require.True(t, ok, c.input) - assert.Equal(t, "", daemonRef.id.String()) - require.NotNil(t, daemonRef.ref) - assert.Equal(t, c.dockerRef, daemonRef.ref.String(), c.input) - } - - // Both an ID and a named reference provided - parsed, err := reference.ParseNormalizedNamed("busybox:latest") - require.NoError(t, err) - _, err = NewReference(id, parsed) - assert.Error(t, err) - - // A reference with neither a tag nor digest - parsed, err = reference.ParseNormalizedNamed("busybox") - require.NoError(t, err) - _, err = NewReference("", parsed) - assert.Error(t, err) - - // A github.com/distribution/reference value can have a tag and a digest at the same time! - parsed, err = reference.ParseNormalizedNamed("busybox:notlatest@" + sha256digest) - require.NoError(t, err) - _, ok = parsed.(reference.Canonical) - require.True(t, ok) - _, ok = parsed.(reference.NamedTagged) - require.True(t, ok) - _, err = NewReference("", parsed) - assert.Error(t, err) -} - -func TestReferenceTransport(t *testing.T) { - ref, err := ParseReference(sha256digest) - require.NoError(t, err) - assert.Equal(t, Transport, ref.Transport()) - - ref, err = ParseReference("busybox:latest") - require.NoError(t, err) - assert.Equal(t, Transport, ref.Transport()) -} - -func TestReferenceStringWithinTransport(t *testing.T) { - ref, err := ParseReference(sha256digest) - require.NoError(t, err) - assert.Equal(t, sha256digest, ref.StringWithinTransport()) - - for _, c := range validNamedReferenceTestCases { - ref, err := ParseReference(c.input) - require.NoError(t, err, c.input) - stringRef := ref.StringWithinTransport() - assert.Equal(t, c.stringWithinTransport, stringRef, c.input) - // Do one more round to verify that the output can be parsed, to an equal value. - ref2, err := Transport.ParseReference(stringRef) - require.NoError(t, err, c.input) - stringRef2 := ref2.StringWithinTransport() - assert.Equal(t, stringRef, stringRef2, c.input) - } -} - -func TestReferenceDockerReference(t *testing.T) { - ref, err := ParseReference(sha256digest) - require.NoError(t, err) - assert.Nil(t, ref.DockerReference()) - - for _, c := range validNamedReferenceTestCases { - ref, err := ParseReference(c.input) - require.NoError(t, err, c.input) - dockerRef := ref.DockerReference() - require.NotNil(t, dockerRef, c.input) - assert.Equal(t, c.dockerRef, dockerRef.String(), c.input) - } -} - -func TestReferencePolicyConfigurationIdentity(t *testing.T) { - ref, err := ParseReference(sha256digest) - require.NoError(t, err) - assert.Equal(t, "", ref.PolicyConfigurationIdentity()) - - for _, c := range validNamedReferenceTestCases { - ref, err := ParseReference(c.input) - require.NoError(t, err, c.input) - assert.Equal(t, "", ref.PolicyConfigurationIdentity(), c.input) - } -} - -func TestReferencePolicyConfigurationNamespaces(t *testing.T) { - ref, err := ParseReference(sha256digest) - require.NoError(t, err) - assert.Empty(t, ref.PolicyConfigurationNamespaces()) - - for _, c := range validNamedReferenceTestCases { - ref, err := ParseReference(c.input) - require.NoError(t, err, c.input) - assert.Empty(t, ref.PolicyConfigurationNamespaces(), c.input) - } -} - -// daemonReference.NewImage, daemonReference.NewImageSource, openshiftReference.NewImageDestination -// untested because just creating the objects immediately connects to the daemon. - -func TestReferenceDeleteImage(t *testing.T) { - ref, err := ParseReference(sha256digest) - require.NoError(t, err) - err = ref.DeleteImage(nil) - assert.Error(t, err) - - for _, c := range validNamedReferenceTestCases { - ref, err := ParseReference(c.input) - require.NoError(t, err, c.input) - err = ref.DeleteImage(nil) - assert.Error(t, err, c.input) - } -} diff --git a/vendor/github.com/containers/image/docker/docker_client.go b/vendor/github.com/containers/image/docker/docker_client.go deleted file mode 100644 index 99102a6c244e..000000000000 --- a/vendor/github.com/containers/image/docker/docker_client.go +++ /dev/null @@ -1,584 +0,0 @@ -package docker - -import ( - "context" - "crypto/tls" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - "os" - "path/filepath" - "strings" - "time" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/types" - "github.com/containers/storage/pkg/homedir" - "github.com/docker/distribution/registry/client" - "github.com/docker/go-connections/sockets" - "github.com/docker/go-connections/tlsconfig" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -const ( - dockerHostname = "docker.io" - dockerRegistry = "registry-1.docker.io" - dockerAuthRegistry = "https://index.docker.io/v1/" - - dockerCfg = ".docker" - dockerCfgFileName = "config.json" - dockerCfgObsolete = ".dockercfg" - - systemPerHostCertDirPath = "/etc/docker/certs.d" - - resolvedPingV2URL = "%s://%s/v2/" - resolvedPingV1URL = "%s://%s/v1/_ping" - tagsPath = "/v2/%s/tags/list" - manifestPath = "/v2/%s/manifests/%s" - blobsPath = "/v2/%s/blobs/%s" - blobUploadPath = "/v2/%s/blobs/uploads/" - extensionsSignaturePath = "/extensions/v2/%s/signatures/%s" - - minimumTokenLifetimeSeconds = 60 - - extensionSignatureSchemaVersion = 2 // extensionSignature.Version - extensionSignatureTypeAtomic = "atomic" // extensionSignature.Type -) - -// ErrV1NotSupported is returned when we're trying to talk to a -// docker V1 registry. -var ErrV1NotSupported = errors.New("can't talk to a V1 docker registry") - -// extensionSignature and extensionSignatureList come from github.com/openshift/origin/pkg/dockerregistry/server/signaturedispatcher.go: -// signature represents a Docker image signature. -type extensionSignature struct { - Version int `json:"schemaVersion"` // Version specifies the schema version - Name string `json:"name"` // Name must be in "sha256:@signatureName" format - Type string `json:"type"` // Type is optional, of not set it will be defaulted to "AtomicImageV1" - Content []byte `json:"content"` // Content contains the signature -} - -// signatureList represents list of Docker image signatures. -type extensionSignatureList struct { - Signatures []extensionSignature `json:"signatures"` -} - -type bearerToken struct { - Token string `json:"token"` - ExpiresIn int `json:"expires_in"` - IssuedAt time.Time `json:"issued_at"` -} - -// dockerClient is configuration for dealing with a single Docker registry. -type dockerClient struct { - // The following members are set by newDockerClient and do not change afterwards. - ctx *types.SystemContext - registry string - username string - password string - client *http.Client - signatureBase signatureStorageBase - scope authScope - // The following members are detected registry properties: - // They are set after a successful detectProperties(), and never change afterwards. - scheme string // Empty value also used to indicate detectProperties() has not yet succeeded. - challenges []challenge - supportsSignatures bool - // The following members are private state for setupRequestAuth, both are valid if token != nil. - token *bearerToken - tokenExpiration time.Time -} - -type authScope struct { - remoteName string - actions string -} - -// this is cloned from docker/go-connections because upstream docker has changed -// it and make deps here fails otherwise. -// We'll drop this once we upgrade to docker 1.13.x deps. -func serverDefault() *tls.Config { - return &tls.Config{ - // Avoid fallback to SSL protocols < TLS1.0 - MinVersion: tls.VersionTLS10, - PreferServerCipherSuites: true, - CipherSuites: tlsconfig.DefaultServerAcceptedCiphers, - } -} - -func newTransport() *http.Transport { - direct := &net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - } - tr := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: direct.Dial, - TLSHandshakeTimeout: 10 * time.Second, - // TODO(dmcgowan): Call close idle connections when complete and use keep alive - DisableKeepAlives: true, - } - proxyDialer, err := sockets.DialerFromEnvironment(direct) - if err == nil { - tr.Dial = proxyDialer.Dial - } - return tr -} - -// dockerCertDir returns a path to a directory to be consumed by setupCertificates() depending on ctx and hostPort. -func dockerCertDir(ctx *types.SystemContext, hostPort string) string { - if ctx != nil && ctx.DockerCertPath != "" { - return ctx.DockerCertPath - } - var hostCertDir string - if ctx != nil && ctx.DockerPerHostCertDirPath != "" { - hostCertDir = ctx.DockerPerHostCertDirPath - } else if ctx != nil && ctx.RootForImplicitAbsolutePaths != "" { - hostCertDir = filepath.Join(ctx.RootForImplicitAbsolutePaths, systemPerHostCertDirPath) - } else { - hostCertDir = systemPerHostCertDirPath - } - return filepath.Join(hostCertDir, hostPort) -} - -func setupCertificates(dir string, tlsc *tls.Config) error { - logrus.Debugf("Looking for TLS certificates and private keys in %s", dir) - fs, err := ioutil.ReadDir(dir) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - - for _, f := range fs { - fullPath := filepath.Join(dir, f.Name()) - if strings.HasSuffix(f.Name(), ".crt") { - systemPool, err := tlsconfig.SystemCertPool() - if err != nil { - return errors.Wrap(err, "unable to get system cert pool") - } - tlsc.RootCAs = systemPool - logrus.Debugf(" crt: %s", fullPath) - data, err := ioutil.ReadFile(fullPath) - if err != nil { - return err - } - tlsc.RootCAs.AppendCertsFromPEM(data) - } - if strings.HasSuffix(f.Name(), ".cert") { - certName := f.Name() - keyName := certName[:len(certName)-5] + ".key" - logrus.Debugf(" cert: %s", fullPath) - if !hasFile(fs, keyName) { - return errors.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName) - } - cert, err := tls.LoadX509KeyPair(filepath.Join(dir, certName), filepath.Join(dir, keyName)) - if err != nil { - return err - } - tlsc.Certificates = append(tlsc.Certificates, cert) - } - if strings.HasSuffix(f.Name(), ".key") { - keyName := f.Name() - certName := keyName[:len(keyName)-4] + ".cert" - logrus.Debugf(" key: %s", fullPath) - if !hasFile(fs, certName) { - return errors.Errorf("missing client certificate %s for key %s", certName, keyName) - } - } - } - return nil -} - -func hasFile(files []os.FileInfo, name string) bool { - for _, f := range files { - if f.Name() == name { - return true - } - } - return false -} - -// newDockerClient returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry) -// “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection) -func newDockerClient(ctx *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) { - registry := reference.Domain(ref.ref) - if registry == dockerHostname { - registry = dockerRegistry - } - username, password, err := getAuth(ctx, reference.Domain(ref.ref)) - if err != nil { - return nil, err - } - tr := newTransport() - tr.TLSClientConfig = serverDefault() - // It is undefined whether the host[:port] string for dockerHostname should be dockerHostname or dockerRegistry, - // because docker/docker does not read the certs.d subdirectory at all in that case. We use the user-visible - // dockerHostname here, because it is more symmetrical to read the configuration in that case as well, and because - // generally the UI hides the existence of the different dockerRegistry. But note that this behavior is - // undocumented and may change if docker/docker changes. - certDir := dockerCertDir(ctx, reference.Domain(ref.ref)) - if err := setupCertificates(certDir, tr.TLSClientConfig); err != nil { - return nil, err - } - if ctx != nil && ctx.DockerInsecureSkipTLSVerify { - tr.TLSClientConfig.InsecureSkipVerify = true - } - client := &http.Client{Transport: tr} - - sigBase, err := configuredSignatureStorageBase(ctx, ref, write) - if err != nil { - return nil, err - } - - return &dockerClient{ - ctx: ctx, - registry: registry, - username: username, - password: password, - client: client, - signatureBase: sigBase, - scope: authScope{ - actions: actions, - remoteName: reference.Path(ref.ref), - }, - }, nil -} - -// makeRequest creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client. -// The host name and schema is taken from the client or autodetected, and the path is relative to it, i.e. the path usually starts with /v2/. -func (c *dockerClient) makeRequest(ctx context.Context, method, path string, headers map[string][]string, stream io.Reader) (*http.Response, error) { - if err := c.detectProperties(ctx); err != nil { - return nil, err - } - - url := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path) - return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, -1, true) -} - -// makeRequestToResolvedURL creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client. -// streamLen, if not -1, specifies the length of the data expected on stream. -// makeRequest should generally be preferred. -// TODO(runcom): too many arguments here, use a struct -func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, sendAuth bool) (*http.Response, error) { - req, err := http.NewRequest(method, url, stream) - if err != nil { - return nil, err - } - req = req.WithContext(ctx) - if streamLen != -1 { // Do not blindly overwrite if streamLen == -1, http.NewRequest above can figure out the length of bytes.Reader and similar objects without us having to compute it. - req.ContentLength = streamLen - } - req.Header.Set("Docker-Distribution-API-Version", "registry/2.0") - for n, h := range headers { - for _, hh := range h { - req.Header.Add(n, hh) - } - } - if c.ctx != nil && c.ctx.DockerRegistryUserAgent != "" { - req.Header.Add("User-Agent", c.ctx.DockerRegistryUserAgent) - } - if sendAuth { - if err := c.setupRequestAuth(req); err != nil { - return nil, err - } - } - logrus.Debugf("%s %s", method, url) - res, err := c.client.Do(req) - if err != nil { - return nil, err - } - return res, nil -} - -// we're using the challenges from the /v2/ ping response and not the one from the destination -// URL in this request because: -// -// 1) docker does that as well -// 2) gcr.io is sending 401 without a WWW-Authenticate header in the real request -// -// debugging: https://github.com/containers/image/pull/211#issuecomment-273426236 and follows up -func (c *dockerClient) setupRequestAuth(req *http.Request) error { - if len(c.challenges) == 0 { - return nil - } - schemeNames := make([]string, 0, len(c.challenges)) - for _, challenge := range c.challenges { - schemeNames = append(schemeNames, challenge.Scheme) - switch challenge.Scheme { - case "basic": - req.SetBasicAuth(c.username, c.password) - return nil - case "bearer": - if c.token == nil || time.Now().After(c.tokenExpiration) { - realm, ok := challenge.Parameters["realm"] - if !ok { - return errors.Errorf("missing realm in bearer auth challenge") - } - service, _ := challenge.Parameters["service"] // Will be "" if not present - scope := fmt.Sprintf("repository:%s:%s", c.scope.remoteName, c.scope.actions) - token, err := c.getBearerToken(req.Context(), realm, service, scope) - if err != nil { - return err - } - c.token = token - c.tokenExpiration = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second) - } - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.token.Token)) - return nil - default: - logrus.Debugf("no handler for %s authentication", challenge.Scheme) - } - } - logrus.Infof("None of the challenges sent by server (%s) are supported, trying an unauthenticated request anyway", strings.Join(schemeNames, ", ")) - return nil -} - -func (c *dockerClient) getBearerToken(ctx context.Context, realm, service, scope string) (*bearerToken, error) { - authReq, err := http.NewRequest("GET", realm, nil) - if err != nil { - return nil, err - } - authReq = authReq.WithContext(ctx) - getParams := authReq.URL.Query() - if service != "" { - getParams.Add("service", service) - } - if scope != "" { - getParams.Add("scope", scope) - } - authReq.URL.RawQuery = getParams.Encode() - if c.username != "" && c.password != "" { - authReq.SetBasicAuth(c.username, c.password) - } - tr := newTransport() - // TODO(runcom): insecure for now to contact the external token service - tr.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} - client := &http.Client{Transport: tr} - res, err := client.Do(authReq) - if err != nil { - return nil, err - } - defer res.Body.Close() - switch res.StatusCode { - case http.StatusUnauthorized: - return nil, errors.Errorf("unable to retrieve auth token: 401 unauthorized") - case http.StatusOK: - break - default: - return nil, errors.Errorf("unexpected http code: %d, URL: %s", res.StatusCode, authReq.URL) - } - tokenBlob, err := ioutil.ReadAll(res.Body) - if err != nil { - return nil, err - } - var token bearerToken - if err := json.Unmarshal(tokenBlob, &token); err != nil { - return nil, err - } - if token.ExpiresIn < minimumTokenLifetimeSeconds { - token.ExpiresIn = minimumTokenLifetimeSeconds - logrus.Debugf("Increasing token expiration to: %d seconds", token.ExpiresIn) - } - if token.IssuedAt.IsZero() { - token.IssuedAt = time.Now().UTC() - } - return &token, nil -} - -func getAuth(ctx *types.SystemContext, registry string) (string, string, error) { - if ctx != nil && ctx.DockerAuthConfig != nil { - return ctx.DockerAuthConfig.Username, ctx.DockerAuthConfig.Password, nil - } - var dockerAuth dockerConfigFile - dockerCfgPath := filepath.Join(getDefaultConfigDir(".docker"), dockerCfgFileName) - if _, err := os.Stat(dockerCfgPath); err == nil { - j, err := ioutil.ReadFile(dockerCfgPath) - if err != nil { - return "", "", err - } - if err := json.Unmarshal(j, &dockerAuth); err != nil { - return "", "", err - } - - } else if os.IsNotExist(err) { - // try old config path - oldDockerCfgPath := filepath.Join(getDefaultConfigDir(dockerCfgObsolete)) - if _, err := os.Stat(oldDockerCfgPath); err != nil { - if os.IsNotExist(err) { - return "", "", nil - } - return "", "", errors.Wrap(err, oldDockerCfgPath) - } - - j, err := ioutil.ReadFile(oldDockerCfgPath) - if err != nil { - return "", "", err - } - if err := json.Unmarshal(j, &dockerAuth.AuthConfigs); err != nil { - return "", "", err - } - - } else if err != nil { - return "", "", errors.Wrap(err, dockerCfgPath) - } - - // I'm feeling lucky - if c, exists := dockerAuth.AuthConfigs[registry]; exists { - return decodeDockerAuth(c.Auth) - } - - // bad luck; let's normalize the entries first - registry = normalizeRegistry(registry) - normalizedAuths := map[string]dockerAuthConfig{} - for k, v := range dockerAuth.AuthConfigs { - normalizedAuths[normalizeRegistry(k)] = v - } - if c, exists := normalizedAuths[registry]; exists { - return decodeDockerAuth(c.Auth) - } - return "", "", nil -} - -// detectProperties detects various properties of the registry. -// See the dockerClient documentation for members which are affected by this. -func (c *dockerClient) detectProperties(ctx context.Context) error { - if c.scheme != "" { - return nil - } - - ping := func(scheme string) error { - url := fmt.Sprintf(resolvedPingV2URL, scheme, c.registry) - resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, true) - logrus.Debugf("Ping %s err %#v", url, err) - if err != nil { - return err - } - defer resp.Body.Close() - logrus.Debugf("Ping %s status %d", url, resp.StatusCode) - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { - return errors.Errorf("error pinging repository, response code %d", resp.StatusCode) - } - c.challenges = parseAuthHeader(resp.Header) - c.scheme = scheme - c.supportsSignatures = resp.Header.Get("X-Registry-Supports-Signatures") == "1" - return nil - } - err := ping("https") - if err != nil && c.ctx != nil && c.ctx.DockerInsecureSkipTLSVerify { - err = ping("http") - } - if err != nil { - err = errors.Wrap(err, "pinging docker registry returned") - if c.ctx != nil && c.ctx.DockerDisableV1Ping { - return err - } - // best effort to understand if we're talking to a V1 registry - pingV1 := func(scheme string) bool { - url := fmt.Sprintf(resolvedPingV1URL, scheme, c.registry) - resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, true) - logrus.Debugf("Ping %s err %#v", url, err) - if err != nil { - return false - } - defer resp.Body.Close() - logrus.Debugf("Ping %s status %d", url, resp.StatusCode) - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { - return false - } - return true - } - isV1 := pingV1("https") - if !isV1 && c.ctx != nil && c.ctx.DockerInsecureSkipTLSVerify { - isV1 = pingV1("http") - } - if isV1 { - err = ErrV1NotSupported - } - } - return err -} - -// getExtensionsSignatures returns signatures from the X-Registry-Supports-Signatures API extension, -// using the original data structures. -func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) { - path := fmt.Sprintf(extensionsSignaturePath, reference.Path(ref.ref), manifestDigest) - res, err := c.makeRequest(ctx, "GET", path, nil, nil) - if err != nil { - return nil, err - } - defer res.Body.Close() - if res.StatusCode != http.StatusOK { - return nil, client.HandleErrorResponse(res) - } - body, err := ioutil.ReadAll(res.Body) - if err != nil { - return nil, err - } - - var parsedBody extensionSignatureList - if err := json.Unmarshal(body, &parsedBody); err != nil { - return nil, errors.Wrapf(err, "Error decoding signature list") - } - return &parsedBody, nil -} - -func getDefaultConfigDir(confPath string) string { - return filepath.Join(homedir.Get(), confPath) -} - -type dockerAuthConfig struct { - Auth string `json:"auth,omitempty"` -} - -type dockerConfigFile struct { - AuthConfigs map[string]dockerAuthConfig `json:"auths"` -} - -func decodeDockerAuth(s string) (string, string, error) { - decoded, err := base64.StdEncoding.DecodeString(s) - if err != nil { - return "", "", err - } - parts := strings.SplitN(string(decoded), ":", 2) - if len(parts) != 2 { - // if it's invalid just skip, as docker does - return "", "", nil - } - user := parts[0] - password := strings.Trim(parts[1], "\x00") - return user, password, nil -} - -// convertToHostname converts a registry url which has http|https prepended -// to just an hostname. -// Copied from github.com/docker/docker/registry/auth.go -func convertToHostname(url string) string { - stripped := url - if strings.HasPrefix(url, "http://") { - stripped = strings.TrimPrefix(url, "http://") - } else if strings.HasPrefix(url, "https://") { - stripped = strings.TrimPrefix(url, "https://") - } - - nameParts := strings.SplitN(stripped, "/", 2) - - return nameParts[0] -} - -func normalizeRegistry(registry string) string { - normalized := convertToHostname(registry) - switch normalized { - case "registry-1.docker.io", "docker.io": - return "index.docker.io" - } - return normalized -} diff --git a/vendor/github.com/containers/image/docker/docker_client_test.go b/vendor/github.com/containers/image/docker/docker_client_test.go deleted file mode 100644 index a063a540f7d3..000000000000 --- a/vendor/github.com/containers/image/docker/docker_client_test.go +++ /dev/null @@ -1,505 +0,0 @@ -package docker - -import ( - "encoding/base64" - "encoding/json" - //"fmt" - "io/ioutil" - "os" - "path/filepath" - "reflect" - "testing" - - "github.com/containers/image/types" - "github.com/containers/storage/pkg/homedir" - "github.com/stretchr/testify/assert" -) - -func TestDockerCertDir(t *testing.T) { - const nondefaultFullPath = "/this/is/not/the/default/full/path" - const nondefaultPerHostDir = "/this/is/not/the/default/certs.d" - const variableReference = "$HOME" - const rootPrefix = "/root/prefix" - const registryHostPort = "localhost:5000" - - systemPerHostResult := filepath.Join(systemPerHostCertDirPath, registryHostPort) - for _, c := range []struct { - ctx *types.SystemContext - expected string - }{ - // The common case - {nil, systemPerHostResult}, - // There is a context, but it does not override the path. - {&types.SystemContext{}, systemPerHostResult}, - // Full path overridden - {&types.SystemContext{DockerCertPath: nondefaultFullPath}, nondefaultFullPath}, - // Per-host path overridden - { - &types.SystemContext{DockerPerHostCertDirPath: nondefaultPerHostDir}, - filepath.Join(nondefaultPerHostDir, registryHostPort), - }, - // Both overridden - { - &types.SystemContext{ - DockerCertPath: nondefaultFullPath, - DockerPerHostCertDirPath: nondefaultPerHostDir, - }, - nondefaultFullPath, - }, - // Root overridden - { - &types.SystemContext{RootForImplicitAbsolutePaths: rootPrefix}, - filepath.Join(rootPrefix, systemPerHostResult), - }, - // Root and path overrides present simultaneously, - { - &types.SystemContext{ - DockerCertPath: nondefaultFullPath, - RootForImplicitAbsolutePaths: rootPrefix, - }, - nondefaultFullPath, - }, - { - &types.SystemContext{ - DockerPerHostCertDirPath: nondefaultPerHostDir, - RootForImplicitAbsolutePaths: rootPrefix, - }, - filepath.Join(nondefaultPerHostDir, registryHostPort), - }, - // … and everything at once - { - &types.SystemContext{ - DockerCertPath: nondefaultFullPath, - DockerPerHostCertDirPath: nondefaultPerHostDir, - RootForImplicitAbsolutePaths: rootPrefix, - }, - nondefaultFullPath, - }, - // No environment expansion happens in the overridden paths - {&types.SystemContext{DockerCertPath: variableReference}, variableReference}, - { - &types.SystemContext{DockerPerHostCertDirPath: variableReference}, - filepath.Join(variableReference, registryHostPort), - }, - } { - path := dockerCertDir(c.ctx, registryHostPort) - assert.Equal(t, c.expected, path) - } -} - -func TestGetAuth(t *testing.T) { - origHomeDir := homedir.Get() - tmpDir, err := ioutil.TempDir("", "test_docker_client_get_auth") - if err != nil { - t.Fatal(err) - } - t.Logf("using temporary home directory: %q", tmpDir) - // override homedir - os.Setenv(homedir.Key(), tmpDir) - defer func() { - err := os.RemoveAll(tmpDir) - if err != nil { - t.Logf("failed to cleanup temporary home directory %q: %v", tmpDir, err) - } - os.Setenv(homedir.Key(), origHomeDir) - }() - - configDir := filepath.Join(tmpDir, ".docker") - if err := os.Mkdir(configDir, 0750); err != nil { - t.Fatal(err) - } - configPath := filepath.Join(configDir, "config.json") - - for _, tc := range []struct { - name string - hostname string - authConfig testAuthConfig - expectedUsername string - expectedPassword string - expectedError error - ctx *types.SystemContext - }{ - { - name: "empty hostname", - authConfig: makeTestAuthConfig(testAuthConfigDataMap{"localhost:5000": testAuthConfigData{"bob", "password"}}), - }, - { - name: "no auth config", - hostname: "index.docker.io", - }, - { - name: "match one", - hostname: "example.org", - authConfig: makeTestAuthConfig(testAuthConfigDataMap{"example.org": testAuthConfigData{"joe", "mypass"}}), - expectedUsername: "joe", - expectedPassword: "mypass", - }, - { - name: "match none", - hostname: "registry.example.org", - authConfig: makeTestAuthConfig(testAuthConfigDataMap{"example.org": testAuthConfigData{"joe", "mypass"}}), - }, - { - name: "match docker.io", - hostname: "docker.io", - authConfig: makeTestAuthConfig(testAuthConfigDataMap{ - "example.org": testAuthConfigData{"example", "org"}, - "index.docker.io": testAuthConfigData{"index", "docker.io"}, - "docker.io": testAuthConfigData{"docker", "io"}, - }), - expectedUsername: "docker", - expectedPassword: "io", - }, - { - name: "match docker.io normalized", - hostname: "docker.io", - authConfig: makeTestAuthConfig(testAuthConfigDataMap{ - "example.org": testAuthConfigData{"bob", "pw"}, - "https://index.docker.io/v1": testAuthConfigData{"alice", "wp"}, - }), - expectedUsername: "alice", - expectedPassword: "wp", - }, - { - name: "normalize registry", - hostname: "https://docker.io/v1", - authConfig: makeTestAuthConfig(testAuthConfigDataMap{ - "docker.io": testAuthConfigData{"user", "pw"}, - "localhost:5000": testAuthConfigData{"joe", "pass"}, - }), - expectedUsername: "user", - expectedPassword: "pw", - }, - { - name: "match localhost", - hostname: "http://localhost", - authConfig: makeTestAuthConfig(testAuthConfigDataMap{ - "docker.io": testAuthConfigData{"user", "pw"}, - "localhost": testAuthConfigData{"joe", "pass"}, - "example.com": testAuthConfigData{"alice", "pwd"}, - }), - expectedUsername: "joe", - expectedPassword: "pass", - }, - { - name: "match ip", - hostname: "10.10.3.56:5000", - authConfig: makeTestAuthConfig(testAuthConfigDataMap{ - "10.10.30.45": testAuthConfigData{"user", "pw"}, - "localhost": testAuthConfigData{"joe", "pass"}, - "10.10.3.56": testAuthConfigData{"alice", "pwd"}, - "10.10.3.56:5000": testAuthConfigData{"me", "mine"}, - }), - expectedUsername: "me", - expectedPassword: "mine", - }, - { - name: "match port", - hostname: "https://localhost:5000", - authConfig: makeTestAuthConfig(testAuthConfigDataMap{ - "https://127.0.0.1:5000": testAuthConfigData{"user", "pw"}, - "http://localhost": testAuthConfigData{"joe", "pass"}, - "https://localhost:5001": testAuthConfigData{"alice", "pwd"}, - "localhost:5000": testAuthConfigData{"me", "mine"}, - }), - expectedUsername: "me", - expectedPassword: "mine", - }, - { - name: "use system context", - hostname: "example.org", - authConfig: makeTestAuthConfig(testAuthConfigDataMap{ - "example.org": testAuthConfigData{"user", "pw"}, - }), - expectedUsername: "foo", - expectedPassword: "bar", - ctx: &types.SystemContext{ - DockerAuthConfig: &types.DockerAuthConfig{ - Username: "foo", - Password: "bar", - }, - }, - }, - } { - contents, err := json.MarshalIndent(&tc.authConfig, "", " ") - if err != nil { - t.Errorf("[%s] failed to marshal authConfig: %v", tc.name, err) - continue - } - if err := ioutil.WriteFile(configPath, contents, 0640); err != nil { - t.Errorf("[%s] failed to write file %q: %v", tc.name, configPath, err) - continue - } - - var ctx *types.SystemContext - if tc.ctx != nil { - ctx = tc.ctx - } - username, password, err := getAuth(ctx, tc.hostname) - if err == nil && tc.expectedError != nil { - t.Errorf("[%s] got unexpected non error and username=%q, password=%q", tc.name, username, password) - continue - } - if err != nil && tc.expectedError == nil { - t.Errorf("[%s] got unexpected error: %#+v", tc.name, err) - continue - } - if !reflect.DeepEqual(err, tc.expectedError) { - t.Errorf("[%s] got unexpected error: %#+v != %#+v", tc.name, err, tc.expectedError) - continue - } - - if username != tc.expectedUsername { - t.Errorf("[%s] got unexpected user name: %q != %q", tc.name, username, tc.expectedUsername) - } - if password != tc.expectedPassword { - t.Errorf("[%s] got unexpected user name: %q != %q", tc.name, password, tc.expectedPassword) - } - } -} - -func TestGetAuthFromLegacyFile(t *testing.T) { - origHomeDir := homedir.Get() - tmpDir, err := ioutil.TempDir("", "test_docker_client_get_auth") - if err != nil { - t.Fatal(err) - } - t.Logf("using temporary home directory: %q", tmpDir) - // override homedir - os.Setenv(homedir.Key(), tmpDir) - defer func() { - err := os.RemoveAll(tmpDir) - if err != nil { - t.Logf("failed to cleanup temporary home directory %q: %v", tmpDir, err) - } - os.Setenv(homedir.Key(), origHomeDir) - }() - - configPath := filepath.Join(tmpDir, ".dockercfg") - - for _, tc := range []struct { - name string - hostname string - authConfig testAuthConfig - expectedUsername string - expectedPassword string - expectedError error - }{ - { - name: "normalize registry", - hostname: "https://docker.io/v1", - authConfig: makeTestAuthConfig(testAuthConfigDataMap{ - "docker.io": testAuthConfigData{"user", "pw"}, - "localhost:5000": testAuthConfigData{"joe", "pass"}, - }), - expectedUsername: "user", - expectedPassword: "pw", - }, - { - name: "ignore schema and path", - hostname: "http://index.docker.io/v1", - authConfig: makeTestAuthConfig(testAuthConfigDataMap{ - "docker.io/v2": testAuthConfigData{"user", "pw"}, - "https://localhost/v1": testAuthConfigData{"joe", "pwd"}, - }), - expectedUsername: "user", - expectedPassword: "pw", - }, - } { - contents, err := json.MarshalIndent(&tc.authConfig.Auths, "", " ") - if err != nil { - t.Errorf("[%s] failed to marshal authConfig: %v", tc.name, err) - continue - } - if err := ioutil.WriteFile(configPath, contents, 0640); err != nil { - t.Errorf("[%s] failed to write file %q: %v", tc.name, configPath, err) - continue - } - - username, password, err := getAuth(nil, tc.hostname) - if err == nil && tc.expectedError != nil { - t.Errorf("[%s] got unexpected non error and username=%q, password=%q", tc.name, username, password) - continue - } - if err != nil && tc.expectedError == nil { - t.Errorf("[%s] got unexpected error: %#+v", tc.name, err) - continue - } - if !reflect.DeepEqual(err, tc.expectedError) { - t.Errorf("[%s] got unexpected error: %#+v != %#+v", tc.name, err, tc.expectedError) - continue - } - - if username != tc.expectedUsername { - t.Errorf("[%s] got unexpected user name: %q != %q", tc.name, username, tc.expectedUsername) - } - if password != tc.expectedPassword { - t.Errorf("[%s] got unexpected user name: %q != %q", tc.name, password, tc.expectedPassword) - } - } -} - -func TestGetAuthPreferNewConfig(t *testing.T) { - origHomeDir := homedir.Get() - tmpDir, err := ioutil.TempDir("", "test_docker_client_get_auth") - if err != nil { - t.Fatal(err) - } - t.Logf("using temporary home directory: %q", tmpDir) - // override homedir - os.Setenv(homedir.Key(), tmpDir) - defer func() { - err := os.RemoveAll(tmpDir) - if err != nil { - t.Logf("failed to cleanup temporary home directory %q: %v", tmpDir, err) - } - os.Setenv(homedir.Key(), origHomeDir) - }() - - configDir := filepath.Join(tmpDir, ".docker") - if err := os.Mkdir(configDir, 0750); err != nil { - t.Fatal(err) - } - - for _, data := range []struct { - path string - ac interface{} - }{ - { - filepath.Join(configDir, "config.json"), - makeTestAuthConfig(testAuthConfigDataMap{ - "https://index.docker.io/v1/": testAuthConfigData{"alice", "pass"}, - }), - }, - { - filepath.Join(tmpDir, ".dockercfg"), - makeTestAuthConfig(testAuthConfigDataMap{ - "https://index.docker.io/v1/": testAuthConfigData{"bob", "pw"}, - }).Auths, - }, - } { - contents, err := json.MarshalIndent(&data.ac, "", " ") - if err != nil { - t.Fatalf("failed to marshal authConfig: %v", err) - } - if err := ioutil.WriteFile(data.path, contents, 0640); err != nil { - t.Fatalf("failed to write file %q: %v", data.path, err) - } - } - - username, password, err := getAuth(nil, "index.docker.io") - if err != nil { - t.Fatalf("got unexpected error: %#+v", err) - } - - if username != "alice" { - t.Fatalf("got unexpected user name: %q != %q", username, "alice") - } - if password != "pass" { - t.Fatalf("got unexpected user name: %q != %q", password, "pass") - } -} - -func TestGetAuthFailsOnBadInput(t *testing.T) { - origHomeDir := homedir.Get() - tmpDir, err := ioutil.TempDir("", "test_docker_client_get_auth") - if err != nil { - t.Fatal(err) - } - t.Logf("using temporary home directory: %q", tmpDir) - // override homedir - os.Setenv(homedir.Key(), tmpDir) - defer func() { - err := os.RemoveAll(tmpDir) - if err != nil { - t.Logf("failed to cleanup temporary home directory %q: %v", tmpDir, err) - } - os.Setenv(homedir.Key(), origHomeDir) - }() - - configDir := filepath.Join(tmpDir, ".docker") - if err := os.Mkdir(configDir, 0750); err != nil { - t.Fatal(err) - } - configPath := filepath.Join(configDir, "config.json") - - // no config file present - username, password, err := getAuth(nil, "index.docker.io") - if err != nil { - t.Fatalf("got unexpected error: %#+v", err) - } - if len(username) > 0 || len(password) > 0 { - t.Fatalf("got unexpected not empty username/password: %q/%q", username, password) - } - - if err := ioutil.WriteFile(configPath, []byte("Json rocks! Unless it doesn't."), 0640); err != nil { - t.Fatalf("failed to write file %q: %v", configPath, err) - } - username, password, err = getAuth(nil, "index.docker.io") - if err == nil { - t.Fatalf("got unexpected non-error: username=%q, password=%q", username, password) - } - if _, ok := err.(*json.SyntaxError); !ok { - t.Fatalf("expected os.PathError, not: %#+v", err) - } - - // remove the invalid config file - os.RemoveAll(configPath) - // no config file present - username, password, err = getAuth(nil, "index.docker.io") - if err != nil { - t.Fatalf("got unexpected error: %#+v", err) - } - if len(username) > 0 || len(password) > 0 { - t.Fatalf("got unexpected not empty username/password: %q/%q", username, password) - } - - configPath = filepath.Join(tmpDir, ".dockercfg") - if err := ioutil.WriteFile(configPath, []byte("I'm certainly not a json string."), 0640); err != nil { - t.Fatalf("failed to write file %q: %v", configPath, err) - } - username, password, err = getAuth(nil, "index.docker.io") - if err == nil { - t.Fatalf("got unexpected non-error: username=%q, password=%q", username, password) - } - if _, ok := err.(*json.SyntaxError); !ok { - t.Fatalf("expected os.PathError, not: %#+v", err) - } -} - -type testAuthConfigData struct { - username string - password string -} - -type testAuthConfigDataMap map[string]testAuthConfigData - -type testAuthConfigEntry struct { - Auth string `json:"auth,omitempty"` -} - -type testAuthConfig struct { - Auths map[string]testAuthConfigEntry `json:"auths"` -} - -// encodeAuth creates an auth value from given authConfig data to be stored in auth config file. -// Inspired by github.com/docker/docker/cliconfig/config.go v1.10.3. -func encodeAuth(authConfig *testAuthConfigData) string { - authStr := authConfig.username + ":" + authConfig.password - msg := []byte(authStr) - encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg))) - base64.StdEncoding.Encode(encoded, msg) - return string(encoded) -} - -func makeTestAuthConfig(authConfigData map[string]testAuthConfigData) testAuthConfig { - ac := testAuthConfig{ - Auths: make(map[string]testAuthConfigEntry), - } - for host, data := range authConfigData { - ac.Auths[host] = testAuthConfigEntry{ - Auth: encodeAuth(&data), - } - } - return ac -} diff --git a/vendor/github.com/containers/image/docker/docker_image.go b/vendor/github.com/containers/image/docker/docker_image.go deleted file mode 100644 index 992d92035465..000000000000 --- a/vendor/github.com/containers/image/docker/docker_image.go +++ /dev/null @@ -1,63 +0,0 @@ -package docker - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/image" - "github.com/containers/image/types" - "github.com/pkg/errors" -) - -// Image is a Docker-specific implementation of types.Image with a few extra methods -// which are specific to Docker. -type Image struct { - types.Image - src *dockerImageSource -} - -// newImage returns a new Image interface type after setting up -// a client to the registry hosting the given image. -// The caller must call .Close() on the returned Image. -func newImage(ctx *types.SystemContext, ref dockerReference) (types.Image, error) { - s, err := newImageSource(ctx, ref, nil) - if err != nil { - return nil, err - } - img, err := image.FromSource(s) - if err != nil { - return nil, err - } - return &Image{Image: img, src: s}, nil -} - -// SourceRefFullName returns a fully expanded name for the repository this image is in. -func (i *Image) SourceRefFullName() string { - return i.src.ref.ref.Name() -} - -// GetRepositoryTags list all tags available in the repository. Note that this has no connection with the tag(s) used for this specific image, if any. -func (i *Image) GetRepositoryTags() ([]string, error) { - path := fmt.Sprintf(tagsPath, reference.Path(i.src.ref.ref)) - // FIXME: Pass the context.Context - res, err := i.src.c.makeRequest(context.TODO(), "GET", path, nil, nil) - if err != nil { - return nil, err - } - defer res.Body.Close() - if res.StatusCode != http.StatusOK { - // print url also - return nil, errors.Errorf("Invalid status code returned when fetching tags list %d", res.StatusCode) - } - type tagsRes struct { - Tags []string - } - tags := &tagsRes{} - if err := json.NewDecoder(res.Body).Decode(tags); err != nil { - return nil, err - } - return tags.Tags, nil -} diff --git a/vendor/github.com/containers/image/docker/docker_image_dest.go b/vendor/github.com/containers/image/docker/docker_image_dest.go deleted file mode 100644 index 68404bda54d6..000000000000 --- a/vendor/github.com/containers/image/docker/docker_image_dest.go +++ /dev/null @@ -1,466 +0,0 @@ -package docker - -import ( - "bytes" - "context" - "crypto/rand" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "os" - "path/filepath" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/manifest" - "github.com/containers/image/types" - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/registry/client" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -var manifestMIMETypes = []string{ - // TODO(runcom): we'll add OCI as part of another PR here - manifest.DockerV2Schema2MediaType, - manifest.DockerV2Schema1SignedMediaType, - manifest.DockerV2Schema1MediaType, -} - -func supportedManifestMIMETypesMap() map[string]bool { - m := make(map[string]bool, len(manifestMIMETypes)) - for _, mt := range manifestMIMETypes { - m[mt] = true - } - return m -} - -type dockerImageDestination struct { - ref dockerReference - c *dockerClient - // State - manifestDigest digest.Digest // or "" if not yet known. -} - -// newImageDestination creates a new ImageDestination for the specified image reference. -func newImageDestination(ctx *types.SystemContext, ref dockerReference) (types.ImageDestination, error) { - c, err := newDockerClient(ctx, ref, true, "pull,push") - if err != nil { - return nil, err - } - return &dockerImageDestination{ - ref: ref, - c: c, - }, nil -} - -// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, -// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. -func (d *dockerImageDestination) Reference() types.ImageReference { - return d.ref -} - -// Close removes resources associated with an initialized ImageDestination, if any. -func (d *dockerImageDestination) Close() error { - return nil -} - -func (d *dockerImageDestination) SupportedManifestMIMETypes() []string { - return manifestMIMETypes -} - -// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. -// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. -func (d *dockerImageDestination) SupportsSignatures() error { - if err := d.c.detectProperties(context.TODO()); err != nil { - return err - } - switch { - case d.c.signatureBase != nil: - return nil - case d.c.supportsSignatures: - return nil - default: - return errors.Errorf("X-Registry-Supports-Signatures extension not supported, and lookaside is not configured") - } -} - -// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination. -func (d *dockerImageDestination) ShouldCompressLayers() bool { - return true -} - -// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually -// uploaded to the image destination, true otherwise. -func (d *dockerImageDestination) AcceptsForeignLayerURLs() bool { - return true -} - -// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. -func (d *dockerImageDestination) MustMatchRuntimeOS() bool { - return false -} - -// sizeCounter is an io.Writer which only counts the total size of its input. -type sizeCounter struct{ size int64 } - -func (c *sizeCounter) Write(p []byte) (n int, err error) { - c.size += int64(len(p)) - return len(p), nil -} - -// PutBlob writes contents of stream and returns data representing the result (with all data filled in). -// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. -// inputInfo.Size is the expected length of stream, if known. -// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available -// to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *dockerImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) { - if inputInfo.Digest.String() != "" { - haveBlob, size, err := d.HasBlob(inputInfo) - if err != nil { - return types.BlobInfo{}, err - } - if haveBlob { - return types.BlobInfo{Digest: inputInfo.Digest, Size: size}, nil - } - } - - // FIXME? Chunked upload, progress reporting, etc. - uploadPath := fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref)) - logrus.Debugf("Uploading %s", uploadPath) - res, err := d.c.makeRequest(context.TODO(), "POST", uploadPath, nil, nil) - if err != nil { - return types.BlobInfo{}, err - } - defer res.Body.Close() - if res.StatusCode != http.StatusAccepted { - logrus.Debugf("Error initiating layer upload, response %#v", *res) - return types.BlobInfo{}, errors.Errorf("Error initiating layer upload to %s, status %d", uploadPath, res.StatusCode) - } - uploadLocation, err := res.Location() - if err != nil { - return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL") - } - - digester := digest.Canonical.Digester() - sizeCounter := &sizeCounter{} - tee := io.TeeReader(stream, io.MultiWriter(digester.Hash(), sizeCounter)) - res, err = d.c.makeRequestToResolvedURL(context.TODO(), "PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, tee, inputInfo.Size, true) - if err != nil { - logrus.Debugf("Error uploading layer chunked, response %#v", res) - return types.BlobInfo{}, err - } - defer res.Body.Close() - computedDigest := digester.Digest() - - uploadLocation, err = res.Location() - if err != nil { - return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL") - } - - // FIXME: DELETE uploadLocation on failure - - locationQuery := uploadLocation.Query() - // TODO: check inputInfo.Digest == computedDigest https://github.com/containers/image/pull/70#discussion_r77646717 - locationQuery.Set("digest", computedDigest.String()) - uploadLocation.RawQuery = locationQuery.Encode() - res, err = d.c.makeRequestToResolvedURL(context.TODO(), "PUT", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, true) - if err != nil { - return types.BlobInfo{}, err - } - defer res.Body.Close() - if res.StatusCode != http.StatusCreated { - logrus.Debugf("Error uploading layer, response %#v", *res) - return types.BlobInfo{}, errors.Errorf("Error uploading layer to %s, status %d", uploadLocation, res.StatusCode) - } - - logrus.Debugf("Upload of layer %s complete", computedDigest) - return types.BlobInfo{Digest: computedDigest, Size: sizeCounter.size}, nil -} - -// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob. -// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned. -// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil); -// it returns a non-nil error only on an unexpected failure. -func (d *dockerImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) { - if info.Digest == "" { - return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`) - } - checkPath := fmt.Sprintf(blobsPath, reference.Path(d.ref.ref), info.Digest.String()) - - logrus.Debugf("Checking %s", checkPath) - res, err := d.c.makeRequest(context.TODO(), "HEAD", checkPath, nil, nil) - if err != nil { - return false, -1, err - } - defer res.Body.Close() - switch res.StatusCode { - case http.StatusOK: - logrus.Debugf("... already exists") - return true, getBlobSize(res), nil - case http.StatusUnauthorized: - logrus.Debugf("... not authorized") - return false, -1, errors.Errorf("not authorized to read from destination repository %s", reference.Path(d.ref.ref)) - case http.StatusNotFound: - logrus.Debugf("... not present") - return false, -1, nil - default: - return false, -1, errors.Errorf("failed to read from destination repository %s: %v", reference.Path(d.ref.ref), http.StatusText(res.StatusCode)) - } -} - -func (d *dockerImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) { - return info, nil -} - -// PutManifest writes manifest to the destination. -// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. -// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), -// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. -func (d *dockerImageDestination) PutManifest(m []byte) error { - digest, err := manifest.Digest(m) - if err != nil { - return err - } - d.manifestDigest = digest - - refTail, err := d.ref.tagOrDigest() - if err != nil { - return err - } - path := fmt.Sprintf(manifestPath, reference.Path(d.ref.ref), refTail) - - headers := map[string][]string{} - mimeType := manifest.GuessMIMEType(m) - if mimeType != "" { - headers["Content-Type"] = []string{mimeType} - } - res, err := d.c.makeRequest(context.TODO(), "PUT", path, headers, bytes.NewReader(m)) - if err != nil { - return err - } - defer res.Body.Close() - if res.StatusCode != http.StatusCreated { - err = errors.Wrapf(client.HandleErrorResponse(res), "Error uploading manifest to %s", path) - if isManifestInvalidError(errors.Cause(err)) { - err = types.ManifestTypeRejectedError{Err: err} - } - return err - } - return nil -} - -// isManifestInvalidError returns true iff err from client.HandleErrorReponse is a “manifest invalid” error. -func isManifestInvalidError(err error) bool { - errors, ok := err.(errcode.Errors) - if !ok || len(errors) == 0 { - return false - } - ec, ok := errors[0].(errcode.ErrorCoder) - if !ok { - return false - } - // ErrorCodeManifestInvalid is returned by OpenShift with acceptschema2=false. - // ErrorCodeTagInvalid is returned by docker/distribution (at least as of commit ec87e9b6971d831f0eff752ddb54fb64693e51cd) - // when uploading to a tag (because it can’t find a matching tag inside the manifest) - return ec.ErrorCode() == v2.ErrorCodeManifestInvalid || ec.ErrorCode() == v2.ErrorCodeTagInvalid -} - -func (d *dockerImageDestination) PutSignatures(signatures [][]byte) error { - // Do not fail if we don’t really need to support signatures. - if len(signatures) == 0 { - return nil - } - if err := d.c.detectProperties(context.TODO()); err != nil { - return err - } - switch { - case d.c.signatureBase != nil: - return d.putSignaturesToLookaside(signatures) - case d.c.supportsSignatures: - return d.putSignaturesToAPIExtension(signatures) - default: - return errors.Errorf("X-Registry-Supports-Signatures extension not supported, and lookaside is not configured") - } -} - -// putSignaturesToLookaside implements PutSignatures() from the lookaside location configured in s.c.signatureBase, -// which is not nil. -func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte) error { - // FIXME? This overwrites files one at a time, definitely not atomic. - // A failure when updating signatures with a reordered copy could lose some of them. - - // Skip dealing with the manifest digest if not necessary. - if len(signatures) == 0 { - return nil - } - - if d.manifestDigest.String() == "" { - // This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures - return errors.Errorf("Unknown manifest digest, can't add signatures") - } - - // NOTE: Keep this in sync with docs/signature-protocols.md! - for i, signature := range signatures { - url := signatureStorageURL(d.c.signatureBase, d.manifestDigest, i) - if url == nil { - return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil") - } - err := d.putOneSignature(url, signature) - if err != nil { - return err - } - } - // Remove any other signatures, if present. - // We stop at the first missing signature; if a previous deleting loop aborted - // prematurely, this may not clean up all of them, but one missing signature - // is enough for dockerImageSource to stop looking for other signatures, so that - // is sufficient. - for i := len(signatures); ; i++ { - url := signatureStorageURL(d.c.signatureBase, d.manifestDigest, i) - if url == nil { - return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil") - } - missing, err := d.c.deleteOneSignature(url) - if err != nil { - return err - } - if missing { - break - } - } - - return nil -} - -// putOneSignature stores one signature to url. -// NOTE: Keep this in sync with docs/signature-protocols.md! -func (d *dockerImageDestination) putOneSignature(url *url.URL, signature []byte) error { - switch url.Scheme { - case "file": - logrus.Debugf("Writing to %s", url.Path) - err := os.MkdirAll(filepath.Dir(url.Path), 0755) - if err != nil { - return err - } - err = ioutil.WriteFile(url.Path, signature, 0644) - if err != nil { - return err - } - return nil - - case "http", "https": - return errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String()) - default: - return errors.Errorf("Unsupported scheme when writing signature to %s", url.String()) - } -} - -// deleteOneSignature deletes a signature from url, if it exists. -// If it successfully determines that the signature does not exist, returns (true, nil) -// NOTE: Keep this in sync with docs/signature-protocols.md! -func (c *dockerClient) deleteOneSignature(url *url.URL) (missing bool, err error) { - switch url.Scheme { - case "file": - logrus.Debugf("Deleting %s", url.Path) - err := os.Remove(url.Path) - if err != nil && os.IsNotExist(err) { - return true, nil - } - return false, err - - case "http", "https": - return false, errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String()) - default: - return false, errors.Errorf("Unsupported scheme when deleting signature from %s", url.String()) - } -} - -// putSignaturesToAPIExtension implements PutSignatures() using the X-Registry-Supports-Signatures API extension. -func (d *dockerImageDestination) putSignaturesToAPIExtension(signatures [][]byte) error { - // Skip dealing with the manifest digest, or reading the old state, if not necessary. - if len(signatures) == 0 { - return nil - } - - if d.manifestDigest.String() == "" { - // This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures - return errors.Errorf("Unknown manifest digest, can't add signatures") - } - - // Because image signatures are a shared resource in Atomic Registry, the default upload - // always adds signatures. Eventually we should also allow removing signatures, - // but the X-Registry-Supports-Signatures API extension does not support that yet. - - existingSignatures, err := d.c.getExtensionsSignatures(context.TODO(), d.ref, d.manifestDigest) - if err != nil { - return err - } - existingSigNames := map[string]struct{}{} - for _, sig := range existingSignatures.Signatures { - existingSigNames[sig.Name] = struct{}{} - } - -sigExists: - for _, newSig := range signatures { - for _, existingSig := range existingSignatures.Signatures { - if existingSig.Version == extensionSignatureSchemaVersion && existingSig.Type == extensionSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) { - continue sigExists - } - } - - // The API expect us to invent a new unique name. This is racy, but hopefully good enough. - var signatureName string - for { - randBytes := make([]byte, 16) - n, err := rand.Read(randBytes) - if err != nil || n != 16 { - return errors.Wrapf(err, "Error generating random signature len %d", n) - } - signatureName = fmt.Sprintf("%s@%032x", d.manifestDigest.String(), randBytes) - if _, ok := existingSigNames[signatureName]; !ok { - break - } - } - sig := extensionSignature{ - Version: extensionSignatureSchemaVersion, - Name: signatureName, - Type: extensionSignatureTypeAtomic, - Content: newSig, - } - body, err := json.Marshal(sig) - if err != nil { - return err - } - - path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), d.manifestDigest.String()) - res, err := d.c.makeRequest(context.TODO(), "PUT", path, nil, bytes.NewReader(body)) - if err != nil { - return err - } - defer res.Body.Close() - if res.StatusCode != http.StatusCreated { - body, err := ioutil.ReadAll(res.Body) - if err == nil { - logrus.Debugf("Error body %s", string(body)) - } - logrus.Debugf("Error uploading signature, status %d, %#v", res.StatusCode, res) - return errors.Errorf("Error uploading signature to %s, status %d", path, res.StatusCode) - } - } - - return nil -} - -// Commit marks the process of storing the image as successful and asks for the image to be persisted. -// WARNING: This does not have any transactional semantics: -// - Uploaded data MAY be visible to others before Commit() is called -// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) -func (d *dockerImageDestination) Commit() error { - return nil -} diff --git a/vendor/github.com/containers/image/docker/docker_image_src.go b/vendor/github.com/containers/image/docker/docker_image_src.go deleted file mode 100644 index 88607210e769..000000000000 --- a/vendor/github.com/containers/image/docker/docker_image_src.go +++ /dev/null @@ -1,391 +0,0 @@ -package docker - -import ( - "context" - "fmt" - "io" - "io/ioutil" - "mime" - "net/http" - "net/url" - "os" - "strconv" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/manifest" - "github.com/containers/image/types" - "github.com/docker/distribution/registry/client" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -type dockerImageSource struct { - ref dockerReference - requestedManifestMIMETypes []string - c *dockerClient - // State - cachedManifest []byte // nil if not loaded yet - cachedManifestMIMEType string // Only valid if cachedManifest != nil -} - -// newImageSource creates a new ImageSource for the specified image reference, -// asking the backend to use a manifest from requestedManifestMIMETypes if possible. -// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes. -// The caller must call .Close() on the returned ImageSource. -func newImageSource(ctx *types.SystemContext, ref dockerReference, requestedManifestMIMETypes []string) (*dockerImageSource, error) { - c, err := newDockerClient(ctx, ref, false, "pull") - if err != nil { - return nil, err - } - if requestedManifestMIMETypes == nil { - requestedManifestMIMETypes = manifest.DefaultRequestedManifestMIMETypes - } - supportedMIMEs := supportedManifestMIMETypesMap() - acceptableRequestedMIMEs := false - for _, mtrequested := range requestedManifestMIMETypes { - if supportedMIMEs[mtrequested] { - acceptableRequestedMIMEs = true - break - } - } - if !acceptableRequestedMIMEs { - requestedManifestMIMETypes = manifest.DefaultRequestedManifestMIMETypes - } - return &dockerImageSource{ - ref: ref, - requestedManifestMIMETypes: requestedManifestMIMETypes, - c: c, - }, nil -} - -// Reference returns the reference used to set up this source, _as specified by the user_ -// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. -func (s *dockerImageSource) Reference() types.ImageReference { - return s.ref -} - -// Close removes resources associated with an initialized ImageSource, if any. -func (s *dockerImageSource) Close() error { - return nil -} - -// simplifyContentType drops parameters from a HTTP media type (see https://tools.ietf.org/html/rfc7231#section-3.1.1.1) -// Alternatively, an empty string is returned unchanged, and invalid values are "simplified" to an empty string. -func simplifyContentType(contentType string) string { - if contentType == "" { - return contentType - } - mimeType, _, err := mime.ParseMediaType(contentType) - if err != nil { - return "" - } - return mimeType -} - -// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). -// It may use a remote (= slow) service. -func (s *dockerImageSource) GetManifest() ([]byte, string, error) { - err := s.ensureManifestIsLoaded(context.TODO()) - if err != nil { - return nil, "", err - } - return s.cachedManifest, s.cachedManifestMIMEType, nil -} - -func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest string) ([]byte, string, error) { - path := fmt.Sprintf(manifestPath, reference.Path(s.ref.ref), tagOrDigest) - headers := make(map[string][]string) - headers["Accept"] = s.requestedManifestMIMETypes - res, err := s.c.makeRequest(ctx, "GET", path, headers, nil) - if err != nil { - return nil, "", err - } - defer res.Body.Close() - if res.StatusCode != http.StatusOK { - return nil, "", client.HandleErrorResponse(res) - } - manblob, err := ioutil.ReadAll(res.Body) - if err != nil { - return nil, "", err - } - return manblob, simplifyContentType(res.Header.Get("Content-Type")), nil -} - -// GetTargetManifest returns an image's manifest given a digest. -// This is mainly used to retrieve a single image's manifest out of a manifest list. -func (s *dockerImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { - return s.fetchManifest(context.TODO(), digest.String()) -} - -// ensureManifestIsLoaded sets s.cachedManifest and s.cachedManifestMIMEType -// -// ImageSource implementations are not required or expected to do any caching, -// but because our signatures are “attached” to the manifest digest, -// we need to ensure that the digest of the manifest returned by GetManifest -// and used by GetSignatures are consistent, otherwise we would get spurious -// signature verification failures when pulling while a tag is being updated. -func (s *dockerImageSource) ensureManifestIsLoaded(ctx context.Context) error { - if s.cachedManifest != nil { - return nil - } - - reference, err := s.ref.tagOrDigest() - if err != nil { - return err - } - - manblob, mt, err := s.fetchManifest(ctx, reference) - if err != nil { - return err - } - // We might validate manblob against the Docker-Content-Digest header here to protect against transport errors. - s.cachedManifest = manblob - s.cachedManifestMIMEType = mt - return nil -} - -func (s *dockerImageSource) getExternalBlob(urls []string) (io.ReadCloser, int64, error) { - var ( - resp *http.Response - err error - ) - for _, url := range urls { - resp, err = s.c.makeRequestToResolvedURL(context.TODO(), "GET", url, nil, nil, -1, false) - if err == nil { - if resp.StatusCode != http.StatusOK { - err = errors.Errorf("error fetching external blob from %q: %d", url, resp.StatusCode) - logrus.Debug(err) - continue - } - } - } - if resp.Body != nil && err == nil { - return resp.Body, getBlobSize(resp), nil - } - return nil, 0, err -} - -func getBlobSize(resp *http.Response) int64 { - size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) - if err != nil { - size = -1 - } - return size -} - -// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). -func (s *dockerImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) { - if len(info.URLs) != 0 { - return s.getExternalBlob(info.URLs) - } - - path := fmt.Sprintf(blobsPath, reference.Path(s.ref.ref), info.Digest.String()) - logrus.Debugf("Downloading %s", path) - res, err := s.c.makeRequest(context.TODO(), "GET", path, nil, nil) - if err != nil { - return nil, 0, err - } - if res.StatusCode != http.StatusOK { - // print url also - return nil, 0, errors.Errorf("Invalid status code returned when fetching blob %d", res.StatusCode) - } - return res.Body, getBlobSize(res), nil -} - -func (s *dockerImageSource) GetSignatures(ctx context.Context) ([][]byte, error) { - if err := s.c.detectProperties(ctx); err != nil { - return nil, err - } - switch { - case s.c.signatureBase != nil: - return s.getSignaturesFromLookaside(ctx) - case s.c.supportsSignatures: - return s.getSignaturesFromAPIExtension(ctx) - default: - return [][]byte{}, nil - } -} - -// manifestDigest returns a digest of the manifest, either from the supplied reference or from a fetched manifest. -func (s *dockerImageSource) manifestDigest(ctx context.Context) (digest.Digest, error) { - if digested, ok := s.ref.ref.(reference.Digested); ok { - d := digested.Digest() - if d.Algorithm() == digest.Canonical { - return d, nil - } - } - if err := s.ensureManifestIsLoaded(ctx); err != nil { - return "", err - } - return manifest.Digest(s.cachedManifest) -} - -// getSignaturesFromLookaside implements GetSignatures() from the lookaside location configured in s.c.signatureBase, -// which is not nil. -func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context) ([][]byte, error) { - manifestDigest, err := s.manifestDigest(ctx) - if err != nil { - return nil, err - } - - // NOTE: Keep this in sync with docs/signature-protocols.md! - signatures := [][]byte{} - for i := 0; ; i++ { - url := signatureStorageURL(s.c.signatureBase, manifestDigest, i) - if url == nil { - return nil, errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil") - } - signature, missing, err := s.getOneSignature(ctx, url) - if err != nil { - return nil, err - } - if missing { - break - } - signatures = append(signatures, signature) - } - return signatures, nil -} - -// getOneSignature downloads one signature from url. -// If it successfully determines that the signature does not exist, returns with missing set to true and error set to nil. -// NOTE: Keep this in sync with docs/signature-protocols.md! -func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) (signature []byte, missing bool, err error) { - switch url.Scheme { - case "file": - logrus.Debugf("Reading %s", url.Path) - sig, err := ioutil.ReadFile(url.Path) - if err != nil { - if os.IsNotExist(err) { - return nil, true, nil - } - return nil, false, err - } - return sig, false, nil - - case "http", "https": - logrus.Debugf("GET %s", url) - req, err := http.NewRequest("GET", url.String(), nil) - if err != nil { - return nil, false, err - } - req = req.WithContext(ctx) - res, err := s.c.client.Do(req) - if err != nil { - return nil, false, err - } - defer res.Body.Close() - if res.StatusCode == http.StatusNotFound { - return nil, true, nil - } else if res.StatusCode != http.StatusOK { - return nil, false, errors.Errorf("Error reading signature from %s: status %d", url.String(), res.StatusCode) - } - sig, err := ioutil.ReadAll(res.Body) - if err != nil { - return nil, false, err - } - return sig, false, nil - - default: - return nil, false, errors.Errorf("Unsupported scheme when reading signature from %s", url.String()) - } -} - -// getSignaturesFromAPIExtension implements GetSignatures() using the X-Registry-Supports-Signatures API extension. -func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context) ([][]byte, error) { - manifestDigest, err := s.manifestDigest(ctx) - if err != nil { - return nil, err - } - - parsedBody, err := s.c.getExtensionsSignatures(ctx, s.ref, manifestDigest) - if err != nil { - return nil, err - } - - var sigs [][]byte - for _, sig := range parsedBody.Signatures { - if sig.Version == extensionSignatureSchemaVersion && sig.Type == extensionSignatureTypeAtomic { - sigs = append(sigs, sig.Content) - } - } - return sigs, nil -} - -// deleteImage deletes the named image from the registry, if supported. -func deleteImage(ctx *types.SystemContext, ref dockerReference) error { - c, err := newDockerClient(ctx, ref, true, "push") - if err != nil { - return err - } - - // When retrieving the digest from a registry >= 2.3 use the following header: - // "Accept": "application/vnd.docker.distribution.manifest.v2+json" - headers := make(map[string][]string) - headers["Accept"] = []string{manifest.DockerV2Schema2MediaType} - - refTail, err := ref.tagOrDigest() - if err != nil { - return err - } - getPath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), refTail) - get, err := c.makeRequest(context.TODO(), "GET", getPath, headers, nil) - if err != nil { - return err - } - defer get.Body.Close() - manifestBody, err := ioutil.ReadAll(get.Body) - if err != nil { - return err - } - switch get.StatusCode { - case http.StatusOK: - case http.StatusNotFound: - return errors.Errorf("Unable to delete %v. Image may not exist or is not stored with a v2 Schema in a v2 registry", ref.ref) - default: - return errors.Errorf("Failed to delete %v: %s (%v)", ref.ref, manifestBody, get.Status) - } - - digest := get.Header.Get("Docker-Content-Digest") - deletePath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), digest) - - // When retrieving the digest from a registry >= 2.3 use the following header: - // "Accept": "application/vnd.docker.distribution.manifest.v2+json" - delete, err := c.makeRequest(context.TODO(), "DELETE", deletePath, headers, nil) - if err != nil { - return err - } - defer delete.Body.Close() - - body, err := ioutil.ReadAll(delete.Body) - if err != nil { - return err - } - if delete.StatusCode != http.StatusAccepted { - return errors.Errorf("Failed to delete %v: %s (%v)", deletePath, string(body), delete.Status) - } - - if c.signatureBase != nil { - manifestDigest, err := manifest.Digest(manifestBody) - if err != nil { - return err - } - - for i := 0; ; i++ { - url := signatureStorageURL(c.signatureBase, manifestDigest, i) - if url == nil { - return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil") - } - missing, err := c.deleteOneSignature(url) - if err != nil { - return err - } - if missing { - break - } - } - } - - return nil -} diff --git a/vendor/github.com/containers/image/docker/docker_image_src_test.go b/vendor/github.com/containers/image/docker/docker_image_src_test.go deleted file mode 100644 index 43e262a24629..000000000000 --- a/vendor/github.com/containers/image/docker/docker_image_src_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package docker - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestSimplifyContentType(t *testing.T) { - for _, c := range []struct{ input, expected string }{ - {"", ""}, - {"application/json", "application/json"}, - {"application/json;charset=utf-8", "application/json"}, - {"application/json; charset=utf-8", "application/json"}, - {"application/json ; charset=utf-8", "application/json"}, - {"application/json\t;\tcharset=utf-8", "application/json"}, - {"application/json ;charset=utf-8", "application/json"}, - {`application/json; charset="utf-8"`, "application/json"}, - {"completely invalid", ""}, - } { - out := simplifyContentType(c.input) - assert.Equal(t, c.expected, out, c.input) - } -} diff --git a/vendor/github.com/containers/image/docker/docker_transport.go b/vendor/github.com/containers/image/docker/docker_transport.go deleted file mode 100644 index 15d68e993c1d..000000000000 --- a/vendor/github.com/containers/image/docker/docker_transport.go +++ /dev/null @@ -1,162 +0,0 @@ -package docker - -import ( - "fmt" - "strings" - - "github.com/containers/image/docker/policyconfiguration" - "github.com/containers/image/docker/reference" - "github.com/containers/image/transports" - "github.com/containers/image/types" - "github.com/pkg/errors" -) - -func init() { - transports.Register(Transport) -} - -// Transport is an ImageTransport for Docker registry-hosted images. -var Transport = dockerTransport{} - -type dockerTransport struct{} - -func (t dockerTransport) Name() string { - return "docker" -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. -func (t dockerTransport) ParseReference(reference string) (types.ImageReference, error) { - return ParseReference(reference) -} - -// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys -// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). -// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. -// scope passed to this function will not be "", that value is always allowed. -func (t dockerTransport) ValidatePolicyConfigurationScope(scope string) error { - // FIXME? We could be verifying the various character set and length restrictions - // from docker/distribution/reference.regexp.go, but other than that there - // are few semantically invalid strings. - return nil -} - -// dockerReference is an ImageReference for Docker images. -type dockerReference struct { - ref reference.Named // By construction we know that !reference.IsNameOnly(ref) -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference. -func ParseReference(refString string) (types.ImageReference, error) { - if !strings.HasPrefix(refString, "//") { - return nil, errors.Errorf("docker: image reference %s does not start with //", refString) - } - ref, err := reference.ParseNormalizedNamed(strings.TrimPrefix(refString, "//")) - if err != nil { - return nil, err - } - ref = reference.TagNameOnly(ref) - return NewReference(ref) -} - -// NewReference returns a Docker reference for a named reference. The reference must satisfy !reference.IsNameOnly(). -func NewReference(ref reference.Named) (types.ImageReference, error) { - if reference.IsNameOnly(ref) { - return nil, errors.Errorf("Docker reference %s has neither a tag nor a digest", reference.FamiliarString(ref)) - } - // A github.com/distribution/reference value can have a tag and a digest at the same time! - // The docker/distribution API does not really support that (we can’t ask for an image with a specific - // tag and digest), so fail. This MAY be accepted in the future. - // (Even if it were supported, the semantics of policy namespaces are unclear - should we drop - // the tag or the digest first?) - _, isTagged := ref.(reference.NamedTagged) - _, isDigested := ref.(reference.Canonical) - if isTagged && isDigested { - return nil, errors.Errorf("Docker references with both a tag and digest are currently not supported") - } - return dockerReference{ - ref: ref, - }, nil -} - -func (ref dockerReference) Transport() types.ImageTransport { - return Transport -} - -// StringWithinTransport returns a string representation of the reference, which MUST be such that -// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. -// NOTE: The returned string is not promised to be equal to the original input to ParseReference; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. -// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. -func (ref dockerReference) StringWithinTransport() string { - return "//" + reference.FamiliarString(ref.ref) -} - -// DockerReference returns a Docker reference associated with this reference -// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, -// not e.g. after redirect or alias processing), or nil if unknown/not applicable. -func (ref dockerReference) DockerReference() reference.Named { - return ref.ref -} - -// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. -// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; -// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical -// (i.e. various references with exactly the same semantics should return the same configuration identity) -// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but -// not required/guaranteed that it will be a valid input to Transport().ParseReference(). -// Returns "" if configuration identities for these references are not supported. -func (ref dockerReference) PolicyConfigurationIdentity() string { - res, err := policyconfiguration.DockerReferenceIdentity(ref.ref) - if res == "" || err != nil { // Coverage: Should never happen, NewReference above should refuse values which could cause a failure. - panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err)) - } - return res -} - -// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search -// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed -// in order, terminating on first match, and an implicit "" is always checked at the end. -// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), -// and each following element to be a prefix of the element preceding it. -func (ref dockerReference) PolicyConfigurationNamespaces() []string { - return policyconfiguration.DockerReferenceNamespaces(ref.ref) -} - -// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned Image. -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -func (ref dockerReference) NewImage(ctx *types.SystemContext) (types.Image, error) { - return newImage(ctx, ref) -} - -// NewImageSource returns a types.ImageSource for this reference, -// asking the backend to use a manifest from requestedManifestMIMETypes if possible. -// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes. -// The caller must call .Close() on the returned ImageSource. -func (ref dockerReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) { - return newImageSource(ctx, ref, requestedManifestMIMETypes) -} - -// NewImageDestination returns a types.ImageDestination for this reference. -// The caller must call .Close() on the returned ImageDestination. -func (ref dockerReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) { - return newImageDestination(ctx, ref) -} - -// DeleteImage deletes the named image from the registry, if supported. -func (ref dockerReference) DeleteImage(ctx *types.SystemContext) error { - return deleteImage(ctx, ref) -} - -// tagOrDigest returns a tag or digest from the reference. -func (ref dockerReference) tagOrDigest() (string, error) { - if ref, ok := ref.ref.(reference.Canonical); ok { - return ref.Digest().String(), nil - } - if ref, ok := ref.ref.(reference.NamedTagged); ok { - return ref.Tag(), nil - } - // This should not happen, NewReference above refuses reference.IsNameOnly values. - return "", errors.Errorf("Internal inconsistency: Reference %s unexpectedly has neither a digest nor a tag", reference.FamiliarString(ref.ref)) -} diff --git a/vendor/github.com/containers/image/docker/docker_transport_test.go b/vendor/github.com/containers/image/docker/docker_transport_test.go deleted file mode 100644 index a542b3c77f1a..000000000000 --- a/vendor/github.com/containers/image/docker/docker_transport_test.go +++ /dev/null @@ -1,196 +0,0 @@ -package docker - -import ( - "testing" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/types" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -const ( - sha256digestHex = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" - sha256digest = "@sha256:" + sha256digestHex -) - -func TestTransportName(t *testing.T) { - assert.Equal(t, "docker", Transport.Name()) -} - -func TestTransportParseReference(t *testing.T) { - testParseReference(t, Transport.ParseReference) -} - -func TestTransportValidatePolicyConfigurationScope(t *testing.T) { - for _, scope := range []string{ - "docker.io/library/busybox" + sha256digest, - "docker.io/library/busybox:notlatest", - "docker.io/library/busybox", - "docker.io/library", - "docker.io", - } { - err := Transport.ValidatePolicyConfigurationScope(scope) - assert.NoError(t, err, scope) - } -} - -func TestParseReference(t *testing.T) { - testParseReference(t, ParseReference) -} - -// testParseReference is a test shared for Transport.ParseReference and ParseReference. -func testParseReference(t *testing.T, fn func(string) (types.ImageReference, error)) { - for _, c := range []struct{ input, expected string }{ - {"busybox", ""}, // Missing // prefix - {"//busybox:notlatest", "docker.io/library/busybox:notlatest"}, // Explicit tag - {"//busybox" + sha256digest, "docker.io/library/busybox" + sha256digest}, // Explicit digest - {"//busybox", "docker.io/library/busybox:latest"}, // Default tag - // A github.com/distribution/reference value can have a tag and a digest at the same time! - // The docker/distribution API does not really support that (we can’t ask for an image with a specific - // tag and digest), so fail. This MAY be accepted in the future. - {"//busybox:latest" + sha256digest, ""}, // Both tag and digest - {"//docker.io/library/busybox:latest", "docker.io/library/busybox:latest"}, // All implied values explicitly specified - {"//UPPERCASEISINVALID", ""}, // Invalid input - } { - ref, err := fn(c.input) - if c.expected == "" { - assert.Error(t, err, c.input) - } else { - require.NoError(t, err, c.input) - dockerRef, ok := ref.(dockerReference) - require.True(t, ok, c.input) - assert.Equal(t, c.expected, dockerRef.ref.String(), c.input) - } - } -} - -// A common list of reference formats to test for the various ImageReference methods. -var validReferenceTestCases = []struct{ input, dockerRef, stringWithinTransport string }{ - {"busybox:notlatest", "docker.io/library/busybox:notlatest", "//busybox:notlatest"}, // Explicit tag - {"busybox" + sha256digest, "docker.io/library/busybox" + sha256digest, "//busybox" + sha256digest}, // Explicit digest - {"docker.io/library/busybox:latest", "docker.io/library/busybox:latest", "//busybox:latest"}, // All implied values explicitly specified - {"example.com/ns/foo:bar", "example.com/ns/foo:bar", "//example.com/ns/foo:bar"}, // All values explicitly specified -} - -func TestNewReference(t *testing.T) { - for _, c := range validReferenceTestCases { - parsed, err := reference.ParseNormalizedNamed(c.input) - require.NoError(t, err) - ref, err := NewReference(parsed) - require.NoError(t, err, c.input) - dockerRef, ok := ref.(dockerReference) - require.True(t, ok, c.input) - assert.Equal(t, c.dockerRef, dockerRef.ref.String(), c.input) - } - - // Neither a tag nor digest - parsed, err := reference.ParseNormalizedNamed("busybox") - require.NoError(t, err) - _, err = NewReference(parsed) - assert.Error(t, err) - - // A github.com/distribution/reference value can have a tag and a digest at the same time! - parsed, err = reference.ParseNormalizedNamed("busybox:notlatest" + sha256digest) - require.NoError(t, err) - _, ok := parsed.(reference.Canonical) - require.True(t, ok) - _, ok = parsed.(reference.NamedTagged) - require.True(t, ok) - _, err = NewReference(parsed) - assert.Error(t, err) -} - -func TestReferenceTransport(t *testing.T) { - ref, err := ParseReference("//busybox") - require.NoError(t, err) - assert.Equal(t, Transport, ref.Transport()) -} - -func TestReferenceStringWithinTransport(t *testing.T) { - for _, c := range validReferenceTestCases { - ref, err := ParseReference("//" + c.input) - require.NoError(t, err, c.input) - stringRef := ref.StringWithinTransport() - assert.Equal(t, c.stringWithinTransport, stringRef, c.input) - // Do one more round to verify that the output can be parsed, to an equal value. - ref2, err := Transport.ParseReference(stringRef) - require.NoError(t, err, c.input) - stringRef2 := ref2.StringWithinTransport() - assert.Equal(t, stringRef, stringRef2, c.input) - } -} - -func TestReferenceDockerReference(t *testing.T) { - for _, c := range validReferenceTestCases { - ref, err := ParseReference("//" + c.input) - require.NoError(t, err, c.input) - dockerRef := ref.DockerReference() - require.NotNil(t, dockerRef, c.input) - assert.Equal(t, c.dockerRef, dockerRef.String(), c.input) - } -} - -func TestReferencePolicyConfigurationIdentity(t *testing.T) { - // Just a smoke test, the substance is tested in policyconfiguration.TestDockerReference. - ref, err := ParseReference("//busybox") - require.NoError(t, err) - assert.Equal(t, "docker.io/library/busybox:latest", ref.PolicyConfigurationIdentity()) -} - -func TestReferencePolicyConfigurationNamespaces(t *testing.T) { - // Just a smoke test, the substance is tested in policyconfiguration.TestDockerReference. - ref, err := ParseReference("//busybox") - require.NoError(t, err) - assert.Equal(t, []string{ - "docker.io/library/busybox", - "docker.io/library", - "docker.io", - }, ref.PolicyConfigurationNamespaces()) -} - -func TestReferenceNewImage(t *testing.T) { - ref, err := ParseReference("//busybox") - require.NoError(t, err) - img, err := ref.NewImage(&types.SystemContext{RegistriesDirPath: "/this/doesnt/exist", DockerPerHostCertDirPath: "/this/doesnt/exist"}) - require.NoError(t, err) - defer img.Close() -} - -func TestReferenceNewImageSource(t *testing.T) { - ref, err := ParseReference("//busybox") - require.NoError(t, err) - src, err := ref.NewImageSource(&types.SystemContext{RegistriesDirPath: "/this/doesnt/exist", DockerPerHostCertDirPath: "/this/doesnt/exist"}, nil) - assert.NoError(t, err) - defer src.Close() -} - -func TestReferenceNewImageDestination(t *testing.T) { - ref, err := ParseReference("//busybox") - require.NoError(t, err) - dest, err := ref.NewImageDestination(&types.SystemContext{RegistriesDirPath: "/this/doesnt/exist", DockerPerHostCertDirPath: "/this/doesnt/exist"}) - require.NoError(t, err) - defer dest.Close() -} - -func TestReferenceTagOrDigest(t *testing.T) { - for input, expected := range map[string]string{ - "//busybox:notlatest": "notlatest", - "//busybox" + sha256digest: "sha256:" + sha256digestHex, - } { - ref, err := ParseReference(input) - require.NoError(t, err, input) - dockerRef, ok := ref.(dockerReference) - require.True(t, ok, input) - tod, err := dockerRef.tagOrDigest() - require.NoError(t, err, input) - assert.Equal(t, expected, tod, input) - } - - // Invalid input - ref, err := reference.ParseNormalizedNamed("busybox") - require.NoError(t, err) - dockerRef := dockerReference{ref: ref} - _, err = dockerRef.tagOrDigest() - assert.Error(t, err) -} diff --git a/vendor/github.com/containers/image/docker/fixtures/registries.d/emptyConfig.yaml b/vendor/github.com/containers/image/docker/fixtures/registries.d/emptyConfig.yaml deleted file mode 100644 index 9e26dfeeb6e6..000000000000 --- a/vendor/github.com/containers/image/docker/fixtures/registries.d/emptyConfig.yaml +++ /dev/null @@ -1 +0,0 @@ -{} \ No newline at end of file diff --git a/vendor/github.com/containers/image/docker/fixtures/registries.d/internal-example.com.yaml b/vendor/github.com/containers/image/docker/fixtures/registries.d/internal-example.com.yaml deleted file mode 100644 index 526402b10787..000000000000 --- a/vendor/github.com/containers/image/docker/fixtures/registries.d/internal-example.com.yaml +++ /dev/null @@ -1,14 +0,0 @@ -docker: - example.com: - sigstore: https://sigstore.example.com - registry.test.example.com: - sigstore: http://registry.test.example.com/sigstore - registry.test.example.com:8888: - sigstore: http://registry.test.example.com:8889/sigstore - sigstore-staging: https://registry.test.example.com:8889/sigstore/specialAPIserverWhichDoesntExist - localhost: - sigstore: file:///home/mitr/mydevelopment1 - localhost:8080: - sigstore: file:///home/mitr/mydevelopment2 - localhost/invalid/url/test: - sigstore: ":emptyscheme" diff --git a/vendor/github.com/containers/image/docker/fixtures/registries.d/internet-user.yaml b/vendor/github.com/containers/image/docker/fixtures/registries.d/internet-user.yaml deleted file mode 100644 index 53969d24650c..000000000000 --- a/vendor/github.com/containers/image/docker/fixtures/registries.d/internet-user.yaml +++ /dev/null @@ -1,12 +0,0 @@ -default-docker: - sigstore: file:///mnt/companywide/signatures/for/other/repositories -docker: - docker.io/contoso: - sigstore: https://sigstore.contoso.com/fordocker - docker.io/centos: - sigstore: https://sigstore.centos.org/ - docker.io/centos/mybetaprooduct: - sigstore: http://localhost:9999/mybetaWIP/sigstore - sigstore-staging: file:///srv/mybetaWIP/sigstore - docker.io/centos/mybetaproduct:latest: - sigstore: https://sigstore.centos.org/ diff --git a/vendor/github.com/containers/image/docker/fixtures/registries.d/invalid-but.notyaml b/vendor/github.com/containers/image/docker/fixtures/registries.d/invalid-but.notyaml deleted file mode 100644 index 5c34318c2147..000000000000 --- a/vendor/github.com/containers/image/docker/fixtures/registries.d/invalid-but.notyaml +++ /dev/null @@ -1 +0,0 @@ -} diff --git a/vendor/github.com/containers/image/docker/lookaside.go b/vendor/github.com/containers/image/docker/lookaside.go deleted file mode 100644 index 18e7733b9356..000000000000 --- a/vendor/github.com/containers/image/docker/lookaside.go +++ /dev/null @@ -1,202 +0,0 @@ -package docker - -import ( - "fmt" - "io/ioutil" - "net/url" - "os" - "path" - "path/filepath" - "strings" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/types" - "github.com/ghodss/yaml" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// systemRegistriesDirPath is the path to registries.d, used for locating lookaside Docker signature storage. -// You can override this at build time with -// -ldflags '-X github.com/containers/image/docker.systemRegistriesDirPath=$your_path' -var systemRegistriesDirPath = builtinRegistriesDirPath - -// builtinRegistriesDirPath is the path to registries.d. -// DO NOT change this, instead see systemRegistriesDirPath above. -const builtinRegistriesDirPath = "/etc/containers/registries.d" - -// registryConfiguration is one of the files in registriesDirPath configuring lookaside locations, or the result of merging them all. -// NOTE: Keep this in sync with docs/registries.d.md! -type registryConfiguration struct { - DefaultDocker *registryNamespace `json:"default-docker"` - // The key is a namespace, using fully-expanded Docker reference format or parent namespaces (per dockerReference.PolicyConfiguration*), - Docker map[string]registryNamespace `json:"docker"` -} - -// registryNamespace defines lookaside locations for a single namespace. -type registryNamespace struct { - SigStore string `json:"sigstore"` // For reading, and if SigStoreStaging is not present, for writing. - SigStoreStaging string `json:"sigstore-staging"` // For writing only. -} - -// signatureStorageBase is an "opaque" type representing a lookaside Docker signature storage. -// Users outside of this file should use configuredSignatureStorageBase and signatureStorageURL below. -type signatureStorageBase *url.URL // The only documented value is nil, meaning storage is not supported. - -// configuredSignatureStorageBase reads configuration to find an appropriate signature storage URL for ref, for write access if “write”. -func configuredSignatureStorageBase(ctx *types.SystemContext, ref dockerReference, write bool) (signatureStorageBase, error) { - // FIXME? Loading and parsing the config could be cached across calls. - dirPath := registriesDirPath(ctx) - logrus.Debugf(`Using registries.d directory %s for sigstore configuration`, dirPath) - config, err := loadAndMergeConfig(dirPath) - if err != nil { - return nil, err - } - - topLevel := config.signatureTopLevel(ref, write) - if topLevel == "" { - return nil, nil - } - - url, err := url.Parse(topLevel) - if err != nil { - return nil, errors.Wrapf(err, "Invalid signature storage URL %s", topLevel) - } - // NOTE: Keep this in sync with docs/signature-protocols.md! - // FIXME? Restrict to explicitly supported schemes? - repo := reference.Path(ref.ref) // Note that this is without a tag or digest. - if path.Clean(repo) != repo { // Coverage: This should not be reachable because /./ and /../ components are not valid in docker references - return nil, errors.Errorf("Unexpected path elements in Docker reference %s for signature storage", ref.ref.String()) - } - url.Path = url.Path + "/" + repo - return url, nil -} - -// registriesDirPath returns a path to registries.d -func registriesDirPath(ctx *types.SystemContext) string { - if ctx != nil { - if ctx.RegistriesDirPath != "" { - return ctx.RegistriesDirPath - } - if ctx.RootForImplicitAbsolutePaths != "" { - return filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesDirPath) - } - } - return systemRegistriesDirPath -} - -// loadAndMergeConfig loads configuration files in dirPath -func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) { - mergedConfig := registryConfiguration{Docker: map[string]registryNamespace{}} - dockerDefaultMergedFrom := "" - nsMergedFrom := map[string]string{} - - dir, err := os.Open(dirPath) - if err != nil { - if os.IsNotExist(err) { - return &mergedConfig, nil - } - return nil, err - } - configNames, err := dir.Readdirnames(0) - if err != nil { - return nil, err - } - for _, configName := range configNames { - if !strings.HasSuffix(configName, ".yaml") { - continue - } - configPath := filepath.Join(dirPath, configName) - configBytes, err := ioutil.ReadFile(configPath) - if err != nil { - return nil, err - } - - var config registryConfiguration - err = yaml.Unmarshal(configBytes, &config) - if err != nil { - return nil, errors.Wrapf(err, "Error parsing %s", configPath) - } - - if config.DefaultDocker != nil { - if mergedConfig.DefaultDocker != nil { - return nil, errors.Errorf(`Error parsing signature storage configuration: "default-docker" defined both in "%s" and "%s"`, - dockerDefaultMergedFrom, configPath) - } - mergedConfig.DefaultDocker = config.DefaultDocker - dockerDefaultMergedFrom = configPath - } - - for nsName, nsConfig := range config.Docker { // includes config.Docker == nil - if _, ok := mergedConfig.Docker[nsName]; ok { - return nil, errors.Errorf(`Error parsing signature storage configuration: "docker" namespace "%s" defined both in "%s" and "%s"`, - nsName, nsMergedFrom[nsName], configPath) - } - mergedConfig.Docker[nsName] = nsConfig - nsMergedFrom[nsName] = configPath - } - } - - return &mergedConfig, nil -} - -// config.signatureTopLevel returns an URL string configured in config for ref, for write access if “write”. -// (the top level of the storage, namespaced by repo.FullName etc.), or "" if no signature storage should be used. -func (config *registryConfiguration) signatureTopLevel(ref dockerReference, write bool) string { - if config.Docker != nil { - // Look for a full match. - identity := ref.PolicyConfigurationIdentity() - if ns, ok := config.Docker[identity]; ok { - logrus.Debugf(` Using "docker" namespace %s`, identity) - if url := ns.signatureTopLevel(write); url != "" { - return url - } - } - - // Look for a match of the possible parent namespaces. - for _, name := range ref.PolicyConfigurationNamespaces() { - if ns, ok := config.Docker[name]; ok { - logrus.Debugf(` Using "docker" namespace %s`, name) - if url := ns.signatureTopLevel(write); url != "" { - return url - } - } - } - } - // Look for a default location - if config.DefaultDocker != nil { - logrus.Debugf(` Using "default-docker" configuration`) - if url := config.DefaultDocker.signatureTopLevel(write); url != "" { - return url - } - } - logrus.Debugf(" No signature storage configuration found for %s", ref.PolicyConfigurationIdentity()) - return "" -} - -// ns.signatureTopLevel returns an URL string configured in ns for ref, for write access if “write”. -// or "" if nothing has been configured. -func (ns registryNamespace) signatureTopLevel(write bool) string { - if write && ns.SigStoreStaging != "" { - logrus.Debugf(` Using %s`, ns.SigStoreStaging) - return ns.SigStoreStaging - } - if ns.SigStore != "" { - logrus.Debugf(` Using %s`, ns.SigStore) - return ns.SigStore - } - return "" -} - -// signatureStorageURL returns an URL usable for acessing signature index in base with known manifestDigest, or nil if not applicable. -// Returns nil iff base == nil. -// NOTE: Keep this in sync with docs/signature-protocols.md! -func signatureStorageURL(base signatureStorageBase, manifestDigest digest.Digest, index int) *url.URL { - if base == nil { - return nil - } - url := *base - url.Path = fmt.Sprintf("%s@%s=%s/signature-%d", url.Path, manifestDigest.Algorithm(), manifestDigest.Hex(), index+1) - return &url -} diff --git a/vendor/github.com/containers/image/docker/lookaside_test.go b/vendor/github.com/containers/image/docker/lookaside_test.go deleted file mode 100644 index 43eed7822b15..000000000000 --- a/vendor/github.com/containers/image/docker/lookaside_test.go +++ /dev/null @@ -1,278 +0,0 @@ -package docker - -import ( - "fmt" - "io/ioutil" - "net/url" - "os" - "path/filepath" - "testing" - - "github.com/containers/image/types" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func dockerRefFromString(t *testing.T, s string) dockerReference { - ref, err := ParseReference(s) - require.NoError(t, err, s) - dockerRef, ok := ref.(dockerReference) - require.True(t, ok, s) - return dockerRef -} - -func TestConfiguredSignatureStorageBase(t *testing.T) { - // Error reading configuration directory (/dev/null is not a directory) - _, err := configuredSignatureStorageBase(&types.SystemContext{RegistriesDirPath: "/dev/null"}, - dockerRefFromString(t, "//busybox"), false) - assert.Error(t, err) - - // No match found - emptyDir, err := ioutil.TempDir("", "empty-dir") - require.NoError(t, err) - defer os.RemoveAll(emptyDir) - base, err := configuredSignatureStorageBase(&types.SystemContext{RegistriesDirPath: emptyDir}, - dockerRefFromString(t, "//this/is/not/in/the:configuration"), false) - assert.NoError(t, err) - assert.Nil(t, base) - - // Invalid URL - _, err = configuredSignatureStorageBase(&types.SystemContext{RegistriesDirPath: "fixtures/registries.d"}, - dockerRefFromString(t, "//localhost/invalid/url/test"), false) - assert.Error(t, err) - - // Success - base, err = configuredSignatureStorageBase(&types.SystemContext{RegistriesDirPath: "fixtures/registries.d"}, - dockerRefFromString(t, "//example.com/my/project"), false) - assert.NoError(t, err) - require.NotNil(t, base) - assert.Equal(t, "https://sigstore.example.com/my/project", (*url.URL)(base).String()) -} - -func TestRegistriesDirPath(t *testing.T) { - const nondefaultPath = "/this/is/not/the/default/registries.d" - const variableReference = "$HOME" - const rootPrefix = "/root/prefix" - - for _, c := range []struct { - ctx *types.SystemContext - expected string - }{ - // The common case - {nil, systemRegistriesDirPath}, - // There is a context, but it does not override the path. - {&types.SystemContext{}, systemRegistriesDirPath}, - // Path overridden - {&types.SystemContext{RegistriesDirPath: nondefaultPath}, nondefaultPath}, - // Root overridden - { - &types.SystemContext{RootForImplicitAbsolutePaths: rootPrefix}, - filepath.Join(rootPrefix, systemRegistriesDirPath), - }, - // Root and path overrides present simultaneously, - { - &types.SystemContext{ - RootForImplicitAbsolutePaths: rootPrefix, - RegistriesDirPath: nondefaultPath, - }, - nondefaultPath, - }, - // No environment expansion happens in the overridden paths - {&types.SystemContext{RegistriesDirPath: variableReference}, variableReference}, - } { - path := registriesDirPath(c.ctx) - assert.Equal(t, c.expected, path) - } -} - -func TestLoadAndMergeConfig(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "merge-config") - require.NoError(t, err) - defer os.RemoveAll(tmpDir) - - // No registries.d exists - config, err := loadAndMergeConfig(filepath.Join(tmpDir, "thisdoesnotexist")) - require.NoError(t, err) - assert.Equal(t, ®istryConfiguration{Docker: map[string]registryNamespace{}}, config) - - // Empty registries.d directory - emptyDir := filepath.Join(tmpDir, "empty") - err = os.Mkdir(emptyDir, 0755) - require.NoError(t, err) - config, err = loadAndMergeConfig(emptyDir) - require.NoError(t, err) - assert.Equal(t, ®istryConfiguration{Docker: map[string]registryNamespace{}}, config) - - // Unreadable registries.d directory - unreadableDir := filepath.Join(tmpDir, "unreadable") - err = os.Mkdir(unreadableDir, 0000) - require.NoError(t, err) - config, err = loadAndMergeConfig(unreadableDir) - assert.Error(t, err) - - // An unreadable file in a registries.d directory - unreadableFileDir := filepath.Join(tmpDir, "unreadableFile") - err = os.Mkdir(unreadableFileDir, 0755) - require.NoError(t, err) - err = ioutil.WriteFile(filepath.Join(unreadableFileDir, "0.yaml"), []byte("{}"), 0644) - require.NoError(t, err) - err = ioutil.WriteFile(filepath.Join(unreadableFileDir, "1.yaml"), nil, 0000) - require.NoError(t, err) - config, err = loadAndMergeConfig(unreadableFileDir) - assert.Error(t, err) - - // Invalid YAML - invalidYAMLDir := filepath.Join(tmpDir, "invalidYAML") - err = os.Mkdir(invalidYAMLDir, 0755) - require.NoError(t, err) - err = ioutil.WriteFile(filepath.Join(invalidYAMLDir, "0.yaml"), []byte("}"), 0644) - require.NoError(t, err) - config, err = loadAndMergeConfig(invalidYAMLDir) - assert.Error(t, err) - - // Duplicate DefaultDocker - duplicateDefault := filepath.Join(tmpDir, "duplicateDefault") - err = os.Mkdir(duplicateDefault, 0755) - require.NoError(t, err) - err = ioutil.WriteFile(filepath.Join(duplicateDefault, "0.yaml"), - []byte("default-docker:\n sigstore: file:////tmp/something"), 0644) - require.NoError(t, err) - err = ioutil.WriteFile(filepath.Join(duplicateDefault, "1.yaml"), - []byte("default-docker:\n sigstore: file:////tmp/different"), 0644) - require.NoError(t, err) - config, err = loadAndMergeConfig(duplicateDefault) - require.Error(t, err) - assert.Contains(t, err.Error(), "0.yaml") - assert.Contains(t, err.Error(), "1.yaml") - - // Duplicate DefaultDocker - duplicateNS := filepath.Join(tmpDir, "duplicateNS") - err = os.Mkdir(duplicateNS, 0755) - require.NoError(t, err) - err = ioutil.WriteFile(filepath.Join(duplicateNS, "0.yaml"), - []byte("docker:\n example.com:\n sigstore: file:////tmp/something"), 0644) - require.NoError(t, err) - err = ioutil.WriteFile(filepath.Join(duplicateNS, "1.yaml"), - []byte("docker:\n example.com:\n sigstore: file:////tmp/different"), 0644) - require.NoError(t, err) - config, err = loadAndMergeConfig(duplicateNS) - assert.Error(t, err) - assert.Contains(t, err.Error(), "0.yaml") - assert.Contains(t, err.Error(), "1.yaml") - - // A fully worked example, including an empty-dictionary file and a non-.yaml file - config, err = loadAndMergeConfig("fixtures/registries.d") - require.NoError(t, err) - assert.Equal(t, ®istryConfiguration{ - DefaultDocker: ®istryNamespace{SigStore: "file:///mnt/companywide/signatures/for/other/repositories"}, - Docker: map[string]registryNamespace{ - "example.com": {SigStore: "https://sigstore.example.com"}, - "registry.test.example.com": {SigStore: "http://registry.test.example.com/sigstore"}, - "registry.test.example.com:8888": {SigStore: "http://registry.test.example.com:8889/sigstore", SigStoreStaging: "https://registry.test.example.com:8889/sigstore/specialAPIserverWhichDoesntExist"}, - "localhost": {SigStore: "file:///home/mitr/mydevelopment1"}, - "localhost:8080": {SigStore: "file:///home/mitr/mydevelopment2"}, - "localhost/invalid/url/test": {SigStore: ":emptyscheme"}, - "docker.io/contoso": {SigStore: "https://sigstore.contoso.com/fordocker"}, - "docker.io/centos": {SigStore: "https://sigstore.centos.org/"}, - "docker.io/centos/mybetaprooduct": { - SigStore: "http://localhost:9999/mybetaWIP/sigstore", - SigStoreStaging: "file:///srv/mybetaWIP/sigstore", - }, - "docker.io/centos/mybetaproduct:latest": {SigStore: "https://sigstore.centos.org/"}, - }, - }, config) -} - -func TestRegistryConfigurationSignaureTopLevel(t *testing.T) { - config := registryConfiguration{ - DefaultDocker: ®istryNamespace{SigStore: "=default", SigStoreStaging: "=default+w"}, - Docker: map[string]registryNamespace{}, - } - for _, ns := range []string{ - "localhost", - "localhost:5000", - "example.com", - "example.com/ns1", - "example.com/ns1/ns2", - "example.com/ns1/ns2/repo", - "example.com/ns1/ns2/repo:notlatest", - } { - config.Docker[ns] = registryNamespace{SigStore: ns, SigStoreStaging: ns + "+w"} - } - - for _, c := range []struct{ input, expected string }{ - {"example.com/ns1/ns2/repo:notlatest", "example.com/ns1/ns2/repo:notlatest"}, - {"example.com/ns1/ns2/repo:unmatched", "example.com/ns1/ns2/repo"}, - {"example.com/ns1/ns2/notrepo:notlatest", "example.com/ns1/ns2"}, - {"example.com/ns1/notns2/repo:notlatest", "example.com/ns1"}, - {"example.com/notns1/ns2/repo:notlatest", "example.com"}, - {"unknown.example.com/busybox", "=default"}, - {"localhost:5000/busybox", "localhost:5000"}, - {"localhost/busybox", "localhost"}, - {"localhost:9999/busybox", "=default"}, - } { - dr := dockerRefFromString(t, "//"+c.input) - - res := config.signatureTopLevel(dr, false) - assert.Equal(t, c.expected, res, c.input) - res = config.signatureTopLevel(dr, true) // test that forWriting is correctly propagated - assert.Equal(t, c.expected+"+w", res, c.input) - } - - config = registryConfiguration{ - Docker: map[string]registryNamespace{ - "unmatched": {SigStore: "a", SigStoreStaging: "b"}, - }, - } - dr := dockerRefFromString(t, "//thisisnotmatched") - res := config.signatureTopLevel(dr, false) - assert.Equal(t, "", res) - res = config.signatureTopLevel(dr, true) - assert.Equal(t, "", res) -} - -func TestRegistryNamespaceSignatureTopLevel(t *testing.T) { - for _, c := range []struct { - ns registryNamespace - forWriting bool - expected string - }{ - {registryNamespace{SigStoreStaging: "a", SigStore: "b"}, true, "a"}, - {registryNamespace{SigStoreStaging: "a", SigStore: "b"}, false, "b"}, - {registryNamespace{SigStore: "b"}, true, "b"}, - {registryNamespace{SigStore: "b"}, false, "b"}, - {registryNamespace{SigStoreStaging: "a"}, true, "a"}, - {registryNamespace{SigStoreStaging: "a"}, false, ""}, - {registryNamespace{}, true, ""}, - {registryNamespace{}, false, ""}, - } { - res := c.ns.signatureTopLevel(c.forWriting) - assert.Equal(t, c.expected, res, fmt.Sprintf("%#v %v", c.ns, c.forWriting)) - } -} - -func TestSignatureStorageBaseSignatureStorageURL(t *testing.T) { - const mdInput = "sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" - const mdMapped = "sha256=0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" - - assert.True(t, signatureStorageURL(nil, mdInput, 0) == nil) - for _, c := range []struct { - base string - index int - expected string - }{ - {"file:///tmp", 0, "file:///tmp@" + mdMapped + "/signature-1"}, - {"file:///tmp", 1, "file:///tmp@" + mdMapped + "/signature-2"}, - {"https://localhost:5555/root", 0, "https://localhost:5555/root@" + mdMapped + "/signature-1"}, - {"https://localhost:5555/root", 1, "https://localhost:5555/root@" + mdMapped + "/signature-2"}, - {"http://localhost:5555/root", 0, "http://localhost:5555/root@" + mdMapped + "/signature-1"}, - {"http://localhost:5555/root", 1, "http://localhost:5555/root@" + mdMapped + "/signature-2"}, - } { - url, err := url.Parse(c.base) - require.NoError(t, err) - expectedURL, err := url.Parse(c.expected) - require.NoError(t, err) - res := signatureStorageURL(url, mdInput, c.index) - assert.Equal(t, expectedURL, res, c.expected) - } -} diff --git a/vendor/github.com/containers/image/docker/policyconfiguration/naming.go b/vendor/github.com/containers/image/docker/policyconfiguration/naming.go deleted file mode 100644 index 31bbb544c64d..000000000000 --- a/vendor/github.com/containers/image/docker/policyconfiguration/naming.go +++ /dev/null @@ -1,56 +0,0 @@ -package policyconfiguration - -import ( - "strings" - - "github.com/containers/image/docker/reference" - "github.com/pkg/errors" -) - -// DockerReferenceIdentity returns a string representation of the reference, suitable for policy lookup, -// as a backend for ImageReference.PolicyConfigurationIdentity. -// The reference must satisfy !reference.IsNameOnly(). -func DockerReferenceIdentity(ref reference.Named) (string, error) { - res := ref.Name() - tagged, isTagged := ref.(reference.NamedTagged) - digested, isDigested := ref.(reference.Canonical) - switch { - case isTagged && isDigested: // Note that this CAN actually happen. - return "", errors.Errorf("Unexpected Docker reference %s with both a name and a digest", reference.FamiliarString(ref)) - case !isTagged && !isDigested: // This should not happen, the caller is expected to ensure !reference.IsNameOnly() - return "", errors.Errorf("Internal inconsistency: Docker reference %s with neither a tag nor a digest", reference.FamiliarString(ref)) - case isTagged: - res = res + ":" + tagged.Tag() - case isDigested: - res = res + "@" + digested.Digest().String() - default: // Coverage: The above was supposed to be exhaustive. - return "", errors.New("Internal inconsistency, unexpected default branch") - } - return res, nil -} - -// DockerReferenceNamespaces returns a list of other policy configuration namespaces to search, -// as a backend for ImageReference.PolicyConfigurationIdentity. -// The reference must satisfy !reference.IsNameOnly(). -func DockerReferenceNamespaces(ref reference.Named) []string { - // Look for a match of the repository, and then of the possible parent - // namespaces. Note that this only happens on the expanded host names - // and repository names, i.e. "busybox" is looked up as "docker.io/library/busybox", - // then in its parent "docker.io/library"; in none of "busybox", - // un-namespaced "library" nor in "" supposedly implicitly representing "library/". - // - // ref.FullName() == ref.Hostname() + "/" + ref.RemoteName(), so the last - // iteration matches the host name (for any namespace). - res := []string{} - name := ref.Name() - for { - res = append(res, name) - - lastSlash := strings.LastIndex(name, "/") - if lastSlash == -1 { - break - } - name = name[:lastSlash] - } - return res -} diff --git a/vendor/github.com/containers/image/docker/policyconfiguration/naming_test.go b/vendor/github.com/containers/image/docker/policyconfiguration/naming_test.go deleted file mode 100644 index 5998faa81f06..000000000000 --- a/vendor/github.com/containers/image/docker/policyconfiguration/naming_test.go +++ /dev/null @@ -1,79 +0,0 @@ -package policyconfiguration - -import ( - "fmt" - "strings" - "testing" - - "github.com/containers/image/docker/reference" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// TestDockerReference tests DockerReferenceIdentity and DockerReferenceNamespaces simulatenously -// to ensure they are consistent. -func TestDockerReference(t *testing.T) { - sha256Digest := "@sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" - // Test both that DockerReferenceIdentity returns the expected value (fullName+suffix), - // and that DockerReferenceNamespaces starts with the expected value (fullName), i.e. that the two functions are - // consistent. - for inputName, expectedNS := range map[string][]string{ - "example.com/ns/repo": {"example.com/ns/repo", "example.com/ns", "example.com"}, - "example.com/repo": {"example.com/repo", "example.com"}, - "localhost/ns/repo": {"localhost/ns/repo", "localhost/ns", "localhost"}, - // Note that "localhost" is special here: notlocalhost/repo is parsed as docker.io/notlocalhost.repo: - "localhost/repo": {"localhost/repo", "localhost"}, - "notlocalhost/repo": {"docker.io/notlocalhost/repo", "docker.io/notlocalhost", "docker.io"}, - "docker.io/ns/repo": {"docker.io/ns/repo", "docker.io/ns", "docker.io"}, - "docker.io/library/repo": {"docker.io/library/repo", "docker.io/library", "docker.io"}, - "docker.io/repo": {"docker.io/library/repo", "docker.io/library", "docker.io"}, - "ns/repo": {"docker.io/ns/repo", "docker.io/ns", "docker.io"}, - "library/repo": {"docker.io/library/repo", "docker.io/library", "docker.io"}, - "repo": {"docker.io/library/repo", "docker.io/library", "docker.io"}, - } { - for inputSuffix, mappedSuffix := range map[string]string{ - ":tag": ":tag", - sha256Digest: sha256Digest, - } { - fullInput := inputName + inputSuffix - ref, err := reference.ParseNormalizedNamed(fullInput) - require.NoError(t, err, fullInput) - - identity, err := DockerReferenceIdentity(ref) - require.NoError(t, err, fullInput) - assert.Equal(t, expectedNS[0]+mappedSuffix, identity, fullInput) - - ns := DockerReferenceNamespaces(ref) - require.NotNil(t, ns, fullInput) - require.Len(t, ns, len(expectedNS), fullInput) - moreSpecific := identity - for i := range expectedNS { - assert.Equal(t, ns[i], expectedNS[i], fmt.Sprintf("%s item %d", fullInput, i)) - assert.True(t, strings.HasPrefix(moreSpecific, ns[i])) - moreSpecific = ns[i] - } - } - } -} - -func TestDockerReferenceIdentity(t *testing.T) { - // TestDockerReference above has tested the core of the functionality, this tests only the failure cases. - - // Neither a tag nor digest - parsed, err := reference.ParseNormalizedNamed("busybox") - require.NoError(t, err) - id, err := DockerReferenceIdentity(parsed) - assert.Equal(t, "", id) - assert.Error(t, err) - - // A github.com/distribution/reference value can have a tag and a digest at the same time! - parsed, err = reference.ParseNormalizedNamed("busybox:notlatest@sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef") - require.NoError(t, err) - _, ok := parsed.(reference.Canonical) - require.True(t, ok) - _, ok = parsed.(reference.NamedTagged) - require.True(t, ok) - id, err = DockerReferenceIdentity(parsed) - assert.Equal(t, "", id) - assert.Error(t, err) -} diff --git a/vendor/github.com/containers/image/docker/reference/README.md b/vendor/github.com/containers/image/docker/reference/README.md deleted file mode 100644 index 53a88de82646..000000000000 --- a/vendor/github.com/containers/image/docker/reference/README.md +++ /dev/null @@ -1,2 +0,0 @@ -This is a copy of github.com/docker/distribution/reference as of commit fb0bebc4b64e3881cc52a2478d749845ed76d2a8, -except that ParseAnyReferenceWithSet has been removed to drop the dependency on github.com/docker/distribution/digestset. \ No newline at end of file diff --git a/vendor/github.com/containers/image/docker/reference/helpers.go b/vendor/github.com/containers/image/docker/reference/helpers.go deleted file mode 100644 index 978df7eabbf1..000000000000 --- a/vendor/github.com/containers/image/docker/reference/helpers.go +++ /dev/null @@ -1,42 +0,0 @@ -package reference - -import "path" - -// IsNameOnly returns true if reference only contains a repo name. -func IsNameOnly(ref Named) bool { - if _, ok := ref.(NamedTagged); ok { - return false - } - if _, ok := ref.(Canonical); ok { - return false - } - return true -} - -// FamiliarName returns the familiar name string -// for the given named, familiarizing if needed. -func FamiliarName(ref Named) string { - if nn, ok := ref.(normalizedNamed); ok { - return nn.Familiar().Name() - } - return ref.Name() -} - -// FamiliarString returns the familiar string representation -// for the given reference, familiarizing if needed. -func FamiliarString(ref Reference) string { - if nn, ok := ref.(normalizedNamed); ok { - return nn.Familiar().String() - } - return ref.String() -} - -// FamiliarMatch reports whether ref matches the specified pattern. -// See https://godoc.org/path#Match for supported patterns. -func FamiliarMatch(pattern string, ref Reference) (bool, error) { - matched, err := path.Match(pattern, FamiliarString(ref)) - if namedRef, isNamed := ref.(Named); isNamed && !matched { - matched, _ = path.Match(pattern, FamiliarName(namedRef)) - } - return matched, err -} diff --git a/vendor/github.com/containers/image/docker/reference/normalize.go b/vendor/github.com/containers/image/docker/reference/normalize.go deleted file mode 100644 index fcc436a395dd..000000000000 --- a/vendor/github.com/containers/image/docker/reference/normalize.go +++ /dev/null @@ -1,152 +0,0 @@ -package reference - -import ( - "errors" - "fmt" - "strings" - - "github.com/opencontainers/go-digest" -) - -var ( - legacyDefaultDomain = "index.docker.io" - defaultDomain = "docker.io" - officialRepoName = "library" - defaultTag = "latest" -) - -// normalizedNamed represents a name which has been -// normalized and has a familiar form. A familiar name -// is what is used in Docker UI. An example normalized -// name is "docker.io/library/ubuntu" and corresponding -// familiar name of "ubuntu". -type normalizedNamed interface { - Named - Familiar() Named -} - -// ParseNormalizedNamed parses a string into a named reference -// transforming a familiar name from Docker UI to a fully -// qualified reference. If the value may be an identifier -// use ParseAnyReference. -func ParseNormalizedNamed(s string) (Named, error) { - if ok := anchoredIdentifierRegexp.MatchString(s); ok { - return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) - } - domain, remainder := splitDockerDomain(s) - var remoteName string - if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { - remoteName = remainder[:tagSep] - } else { - remoteName = remainder - } - if strings.ToLower(remoteName) != remoteName { - return nil, errors.New("invalid reference format: repository name must be lowercase") - } - - ref, err := Parse(domain + "/" + remainder) - if err != nil { - return nil, err - } - named, isNamed := ref.(Named) - if !isNamed { - return nil, fmt.Errorf("reference %s has no name", ref.String()) - } - return named, nil -} - -// splitDockerDomain splits a repository name to domain and remotename string. -// If no valid domain is found, the default domain is used. Repository name -// needs to be already validated before. -func splitDockerDomain(name string) (domain, remainder string) { - i := strings.IndexRune(name, '/') - if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { - domain, remainder = defaultDomain, name - } else { - domain, remainder = name[:i], name[i+1:] - } - if domain == legacyDefaultDomain { - domain = defaultDomain - } - if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { - remainder = officialRepoName + "/" + remainder - } - return -} - -// familiarizeName returns a shortened version of the name familiar -// to to the Docker UI. Familiar names have the default domain -// "docker.io" and "library/" repository prefix removed. -// For example, "docker.io/library/redis" will have the familiar -// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". -// Returns a familiarized named only reference. -func familiarizeName(named namedRepository) repository { - repo := repository{ - domain: named.Domain(), - path: named.Path(), - } - - if repo.domain == defaultDomain { - repo.domain = "" - // Handle official repositories which have the pattern "library/" - if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { - repo.path = split[1] - } - } - return repo -} - -func (r reference) Familiar() Named { - return reference{ - namedRepository: familiarizeName(r.namedRepository), - tag: r.tag, - digest: r.digest, - } -} - -func (r repository) Familiar() Named { - return familiarizeName(r) -} - -func (t taggedReference) Familiar() Named { - return taggedReference{ - namedRepository: familiarizeName(t.namedRepository), - tag: t.tag, - } -} - -func (c canonicalReference) Familiar() Named { - return canonicalReference{ - namedRepository: familiarizeName(c.namedRepository), - digest: c.digest, - } -} - -// TagNameOnly adds the default tag "latest" to a reference if it only has -// a repo name. -func TagNameOnly(ref Named) Named { - if IsNameOnly(ref) { - namedTagged, err := WithTag(ref, defaultTag) - if err != nil { - // Default tag must be valid, to create a NamedTagged - // type with non-validated input the WithTag function - // should be used instead - panic(err) - } - return namedTagged - } - return ref -} - -// ParseAnyReference parses a reference string as a possible identifier, -// full digest, or familiar name. -func ParseAnyReference(ref string) (Reference, error) { - if ok := anchoredIdentifierRegexp.MatchString(ref); ok { - return digestReference("sha256:" + ref), nil - } - if dgst, err := digest.Parse(ref); err == nil { - return digestReference(dgst), nil - } - - return ParseNormalizedNamed(ref) -} diff --git a/vendor/github.com/containers/image/docker/reference/normalize_test.go b/vendor/github.com/containers/image/docker/reference/normalize_test.go deleted file mode 100644 index 064ee749c0e2..000000000000 --- a/vendor/github.com/containers/image/docker/reference/normalize_test.go +++ /dev/null @@ -1,573 +0,0 @@ -package reference - -import ( - "strconv" - "testing" - - "github.com/opencontainers/go-digest" -) - -func TestValidateReferenceName(t *testing.T) { - validRepoNames := []string{ - "docker/docker", - "library/debian", - "debian", - "docker.io/docker/docker", - "docker.io/library/debian", - "docker.io/debian", - "index.docker.io/docker/docker", - "index.docker.io/library/debian", - "index.docker.io/debian", - "127.0.0.1:5000/docker/docker", - "127.0.0.1:5000/library/debian", - "127.0.0.1:5000/debian", - "thisisthesongthatneverendsitgoesonandonandonthisisthesongthatnev", - - // This test case was moved from invalid to valid since it is valid input - // when specified with a hostname, it removes the ambiguity from about - // whether the value is an identifier or repository name - "docker.io/1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", - } - invalidRepoNames := []string{ - "https://github.com/docker/docker", - "docker/Docker", - "-docker", - "-docker/docker", - "-docker.io/docker/docker", - "docker///docker", - "docker.io/docker/Docker", - "docker.io/docker///docker", - "1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", - } - - for _, name := range invalidRepoNames { - _, err := ParseNormalizedNamed(name) - if err == nil { - t.Fatalf("Expected invalid repo name for %q", name) - } - } - - for _, name := range validRepoNames { - _, err := ParseNormalizedNamed(name) - if err != nil { - t.Fatalf("Error parsing repo name %s, got: %q", name, err) - } - } -} - -func TestValidateRemoteName(t *testing.T) { - validRepositoryNames := []string{ - // Sanity check. - "docker/docker", - - // Allow 64-character non-hexadecimal names (hexadecimal names are forbidden). - "thisisthesongthatneverendsitgoesonandonandonthisisthesongthatnev", - - // Allow embedded hyphens. - "docker-rules/docker", - - // Allow multiple hyphens as well. - "docker---rules/docker", - - //Username doc and image name docker being tested. - "doc/docker", - - // single character names are now allowed. - "d/docker", - "jess/t", - - // Consecutive underscores. - "dock__er/docker", - } - for _, repositoryName := range validRepositoryNames { - _, err := ParseNormalizedNamed(repositoryName) - if err != nil { - t.Errorf("Repository name should be valid: %v. Error: %v", repositoryName, err) - } - } - - invalidRepositoryNames := []string{ - // Disallow capital letters. - "docker/Docker", - - // Only allow one slash. - "docker///docker", - - // Disallow 64-character hexadecimal. - "1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", - - // Disallow leading and trailing hyphens in namespace. - "-docker/docker", - "docker-/docker", - "-docker-/docker", - - // Don't allow underscores everywhere (as opposed to hyphens). - "____/____", - - "_docker/_docker", - - // Disallow consecutive periods. - "dock..er/docker", - "dock_.er/docker", - "dock-.er/docker", - - // No repository. - "docker/", - - //namespace too long - "this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255/docker", - } - for _, repositoryName := range invalidRepositoryNames { - if _, err := ParseNormalizedNamed(repositoryName); err == nil { - t.Errorf("Repository name should be invalid: %v", repositoryName) - } - } -} - -func TestParseRepositoryInfo(t *testing.T) { - type tcase struct { - RemoteName, FamiliarName, FullName, AmbiguousName, Domain string - } - - tcases := []tcase{ - { - RemoteName: "fooo/bar", - FamiliarName: "fooo/bar", - FullName: "docker.io/fooo/bar", - AmbiguousName: "index.docker.io/fooo/bar", - Domain: "docker.io", - }, - { - RemoteName: "library/ubuntu", - FamiliarName: "ubuntu", - FullName: "docker.io/library/ubuntu", - AmbiguousName: "library/ubuntu", - Domain: "docker.io", - }, - { - RemoteName: "nonlibrary/ubuntu", - FamiliarName: "nonlibrary/ubuntu", - FullName: "docker.io/nonlibrary/ubuntu", - AmbiguousName: "", - Domain: "docker.io", - }, - { - RemoteName: "other/library", - FamiliarName: "other/library", - FullName: "docker.io/other/library", - AmbiguousName: "", - Domain: "docker.io", - }, - { - RemoteName: "private/moonbase", - FamiliarName: "127.0.0.1:8000/private/moonbase", - FullName: "127.0.0.1:8000/private/moonbase", - AmbiguousName: "", - Domain: "127.0.0.1:8000", - }, - { - RemoteName: "privatebase", - FamiliarName: "127.0.0.1:8000/privatebase", - FullName: "127.0.0.1:8000/privatebase", - AmbiguousName: "", - Domain: "127.0.0.1:8000", - }, - { - RemoteName: "private/moonbase", - FamiliarName: "example.com/private/moonbase", - FullName: "example.com/private/moonbase", - AmbiguousName: "", - Domain: "example.com", - }, - { - RemoteName: "privatebase", - FamiliarName: "example.com/privatebase", - FullName: "example.com/privatebase", - AmbiguousName: "", - Domain: "example.com", - }, - { - RemoteName: "private/moonbase", - FamiliarName: "example.com:8000/private/moonbase", - FullName: "example.com:8000/private/moonbase", - AmbiguousName: "", - Domain: "example.com:8000", - }, - { - RemoteName: "privatebasee", - FamiliarName: "example.com:8000/privatebasee", - FullName: "example.com:8000/privatebasee", - AmbiguousName: "", - Domain: "example.com:8000", - }, - { - RemoteName: "library/ubuntu-12.04-base", - FamiliarName: "ubuntu-12.04-base", - FullName: "docker.io/library/ubuntu-12.04-base", - AmbiguousName: "index.docker.io/library/ubuntu-12.04-base", - Domain: "docker.io", - }, - { - RemoteName: "library/foo", - FamiliarName: "foo", - FullName: "docker.io/library/foo", - AmbiguousName: "docker.io/foo", - Domain: "docker.io", - }, - { - RemoteName: "library/foo/bar", - FamiliarName: "library/foo/bar", - FullName: "docker.io/library/foo/bar", - AmbiguousName: "", - Domain: "docker.io", - }, - { - RemoteName: "store/foo/bar", - FamiliarName: "store/foo/bar", - FullName: "docker.io/store/foo/bar", - AmbiguousName: "", - Domain: "docker.io", - }, - } - - for _, tcase := range tcases { - refStrings := []string{tcase.FamiliarName, tcase.FullName} - if tcase.AmbiguousName != "" { - refStrings = append(refStrings, tcase.AmbiguousName) - } - - var refs []Named - for _, r := range refStrings { - named, err := ParseNormalizedNamed(r) - if err != nil { - t.Fatal(err) - } - refs = append(refs, named) - } - - for _, r := range refs { - if expected, actual := tcase.FamiliarName, FamiliarName(r); expected != actual { - t.Fatalf("Invalid normalized reference for %q. Expected %q, got %q", r, expected, actual) - } - if expected, actual := tcase.FullName, r.String(); expected != actual { - t.Fatalf("Invalid canonical reference for %q. Expected %q, got %q", r, expected, actual) - } - if expected, actual := tcase.Domain, Domain(r); expected != actual { - t.Fatalf("Invalid domain for %q. Expected %q, got %q", r, expected, actual) - } - if expected, actual := tcase.RemoteName, Path(r); expected != actual { - t.Fatalf("Invalid remoteName for %q. Expected %q, got %q", r, expected, actual) - } - - } - } -} - -func TestParseReferenceWithTagAndDigest(t *testing.T) { - shortRef := "busybox:latest@sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa" - ref, err := ParseNormalizedNamed(shortRef) - if err != nil { - t.Fatal(err) - } - if expected, actual := "docker.io/library/"+shortRef, ref.String(); actual != expected { - t.Fatalf("Invalid parsed reference for %q: expected %q, got %q", ref, expected, actual) - } - - if _, isTagged := ref.(NamedTagged); !isTagged { - t.Fatalf("Reference from %q should support tag", ref) - } - if _, isCanonical := ref.(Canonical); !isCanonical { - t.Fatalf("Reference from %q should support digest", ref) - } - if expected, actual := shortRef, FamiliarString(ref); actual != expected { - t.Fatalf("Invalid parsed reference for %q: expected %q, got %q", ref, expected, actual) - } -} - -func TestInvalidReferenceComponents(t *testing.T) { - if _, err := ParseNormalizedNamed("-foo"); err == nil { - t.Fatal("Expected WithName to detect invalid name") - } - ref, err := ParseNormalizedNamed("busybox") - if err != nil { - t.Fatal(err) - } - if _, err := WithTag(ref, "-foo"); err == nil { - t.Fatal("Expected WithName to detect invalid tag") - } - if _, err := WithDigest(ref, digest.Digest("foo")); err == nil { - t.Fatal("Expected WithDigest to detect invalid digest") - } -} - -func equalReference(r1, r2 Reference) bool { - switch v1 := r1.(type) { - case digestReference: - if v2, ok := r2.(digestReference); ok { - return v1 == v2 - } - case repository: - if v2, ok := r2.(repository); ok { - return v1 == v2 - } - case taggedReference: - if v2, ok := r2.(taggedReference); ok { - return v1 == v2 - } - case canonicalReference: - if v2, ok := r2.(canonicalReference); ok { - return v1 == v2 - } - case reference: - if v2, ok := r2.(reference); ok { - return v1 == v2 - } - } - return false -} - -func TestParseAnyReference(t *testing.T) { - tcases := []struct { - Reference string - Equivalent string - Expected Reference - }{ - { - Reference: "redis", - Equivalent: "docker.io/library/redis", - }, - { - Reference: "redis:latest", - Equivalent: "docker.io/library/redis:latest", - }, - { - Reference: "docker.io/library/redis:latest", - Equivalent: "docker.io/library/redis:latest", - }, - { - Reference: "redis@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - Equivalent: "docker.io/library/redis@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - }, - { - Reference: "docker.io/library/redis@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - Equivalent: "docker.io/library/redis@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - }, - { - Reference: "dmcgowan/myapp", - Equivalent: "docker.io/dmcgowan/myapp", - }, - { - Reference: "dmcgowan/myapp:latest", - Equivalent: "docker.io/dmcgowan/myapp:latest", - }, - { - Reference: "docker.io/mcgowan/myapp:latest", - Equivalent: "docker.io/mcgowan/myapp:latest", - }, - { - Reference: "dmcgowan/myapp@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - Equivalent: "docker.io/dmcgowan/myapp@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - }, - { - Reference: "docker.io/dmcgowan/myapp@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - Equivalent: "docker.io/dmcgowan/myapp@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - }, - { - Reference: "dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - Expected: digestReference("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), - Equivalent: "sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - }, - { - Reference: "sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - Expected: digestReference("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), - Equivalent: "sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", - }, - { - Reference: "dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9", - Equivalent: "docker.io/library/dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9", - }, - } - - for _, tcase := range tcases { - var ref Reference - var err error - ref, err = ParseAnyReference(tcase.Reference) - if err != nil { - t.Fatalf("Error parsing reference %s: %v", tcase.Reference, err) - } - if ref.String() != tcase.Equivalent { - t.Fatalf("Unexpected string: %s, expected %s", ref.String(), tcase.Equivalent) - } - - expected := tcase.Expected - if expected == nil { - expected, err = Parse(tcase.Equivalent) - if err != nil { - t.Fatalf("Error parsing reference %s: %v", tcase.Equivalent, err) - } - } - if !equalReference(ref, expected) { - t.Errorf("Unexpected reference %#v, expected %#v", ref, expected) - } - } -} - -func TestNormalizedSplitHostname(t *testing.T) { - testcases := []struct { - input string - domain string - name string - }{ - { - input: "test.com/foo", - domain: "test.com", - name: "foo", - }, - { - input: "test_com/foo", - domain: "docker.io", - name: "test_com/foo", - }, - { - input: "docker/migrator", - domain: "docker.io", - name: "docker/migrator", - }, - { - input: "test.com:8080/foo", - domain: "test.com:8080", - name: "foo", - }, - { - input: "test-com:8080/foo", - domain: "test-com:8080", - name: "foo", - }, - { - input: "foo", - domain: "docker.io", - name: "library/foo", - }, - { - input: "xn--n3h.com/foo", - domain: "xn--n3h.com", - name: "foo", - }, - { - input: "xn--n3h.com:18080/foo", - domain: "xn--n3h.com:18080", - name: "foo", - }, - { - input: "docker.io/foo", - domain: "docker.io", - name: "library/foo", - }, - { - input: "docker.io/library/foo", - domain: "docker.io", - name: "library/foo", - }, - { - input: "docker.io/library/foo/bar", - domain: "docker.io", - name: "library/foo/bar", - }, - } - for _, testcase := range testcases { - failf := func(format string, v ...interface{}) { - t.Logf(strconv.Quote(testcase.input)+": "+format, v...) - t.Fail() - } - - named, err := ParseNormalizedNamed(testcase.input) - if err != nil { - failf("error parsing name: %s", err) - } - domain, name := SplitHostname(named) - if domain != testcase.domain { - failf("unexpected domain: got %q, expected %q", domain, testcase.domain) - } - if name != testcase.name { - failf("unexpected name: got %q, expected %q", name, testcase.name) - } - } -} - -func TestMatchError(t *testing.T) { - named, err := ParseAnyReference("foo") - if err != nil { - t.Fatal(err) - } - _, err = FamiliarMatch("[-x]", named) - if err == nil { - t.Fatalf("expected an error, got nothing") - } -} - -func TestMatch(t *testing.T) { - matchCases := []struct { - reference string - pattern string - expected bool - }{ - { - reference: "foo", - pattern: "foo/**/ba[rz]", - expected: false, - }, - { - reference: "foo/any/bat", - pattern: "foo/**/ba[rz]", - expected: false, - }, - { - reference: "foo/a/bar", - pattern: "foo/**/ba[rz]", - expected: true, - }, - { - reference: "foo/b/baz", - pattern: "foo/**/ba[rz]", - expected: true, - }, - { - reference: "foo/c/baz:tag", - pattern: "foo/**/ba[rz]", - expected: true, - }, - { - reference: "foo/c/baz:tag", - pattern: "foo/*/baz:tag", - expected: true, - }, - { - reference: "foo/c/baz:tag", - pattern: "foo/c/baz:tag", - expected: true, - }, - { - reference: "example.com/foo/c/baz:tag", - pattern: "*/foo/c/baz", - expected: true, - }, - { - reference: "example.com/foo/c/baz:tag", - pattern: "example.com/foo/c/baz", - expected: true, - }, - } - for _, c := range matchCases { - named, err := ParseAnyReference(c.reference) - if err != nil { - t.Fatal(err) - } - actual, err := FamiliarMatch(c.pattern, named) - if err != nil { - t.Fatal(err) - } - if actual != c.expected { - t.Fatalf("expected %s match %s to be %v, was %v", c.reference, c.pattern, c.expected, actual) - } - } -} diff --git a/vendor/github.com/containers/image/docker/reference/reference.go b/vendor/github.com/containers/image/docker/reference/reference.go deleted file mode 100644 index fd3510e9eead..000000000000 --- a/vendor/github.com/containers/image/docker/reference/reference.go +++ /dev/null @@ -1,433 +0,0 @@ -// Package reference provides a general type to represent any way of referencing images within the registry. -// Its main purpose is to abstract tags and digests (content-addressable hash). -// -// Grammar -// -// reference := name [ ":" tag ] [ "@" digest ] -// name := [domain '/'] path-component ['/' path-component]* -// domain := domain-component ['.' domain-component]* [':' port-number] -// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ -// port-number := /[0-9]+/ -// path-component := alpha-numeric [separator alpha-numeric]* -// alpha-numeric := /[a-z0-9]+/ -// separator := /[_.]|__|[-]*/ -// -// tag := /[\w][\w.-]{0,127}/ -// -// digest := digest-algorithm ":" digest-hex -// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ] -// digest-algorithm-separator := /[+.-_]/ -// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ -// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value -// -// identifier := /[a-f0-9]{64}/ -// short-identifier := /[a-f0-9]{6,64}/ -package reference - -import ( - "errors" - "fmt" - "strings" - - "github.com/opencontainers/go-digest" -) - -const ( - // NameTotalLengthMax is the maximum total number of characters in a repository name. - NameTotalLengthMax = 255 -) - -var ( - // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. - ErrReferenceInvalidFormat = errors.New("invalid reference format") - - // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. - ErrTagInvalidFormat = errors.New("invalid tag format") - - // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. - ErrDigestInvalidFormat = errors.New("invalid digest format") - - // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. - ErrNameContainsUppercase = errors.New("repository name must be lowercase") - - // ErrNameEmpty is returned for empty, invalid repository names. - ErrNameEmpty = errors.New("repository name must have at least one component") - - // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. - ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) - - // ErrNameNotCanonical is returned when a name is not canonical. - ErrNameNotCanonical = errors.New("repository name must be canonical") -) - -// Reference is an opaque object reference identifier that may include -// modifiers such as a hostname, name, tag, and digest. -type Reference interface { - // String returns the full reference - String() string -} - -// Field provides a wrapper type for resolving correct reference types when -// working with encoding. -type Field struct { - reference Reference -} - -// AsField wraps a reference in a Field for encoding. -func AsField(reference Reference) Field { - return Field{reference} -} - -// Reference unwraps the reference type from the field to -// return the Reference object. This object should be -// of the appropriate type to further check for different -// reference types. -func (f Field) Reference() Reference { - return f.reference -} - -// MarshalText serializes the field to byte text which -// is the string of the reference. -func (f Field) MarshalText() (p []byte, err error) { - return []byte(f.reference.String()), nil -} - -// UnmarshalText parses text bytes by invoking the -// reference parser to ensure the appropriately -// typed reference object is wrapped by field. -func (f *Field) UnmarshalText(p []byte) error { - r, err := Parse(string(p)) - if err != nil { - return err - } - - f.reference = r - return nil -} - -// Named is an object with a full name -type Named interface { - Reference - Name() string -} - -// Tagged is an object which has a tag -type Tagged interface { - Reference - Tag() string -} - -// NamedTagged is an object including a name and tag. -type NamedTagged interface { - Named - Tag() string -} - -// Digested is an object which has a digest -// in which it can be referenced by -type Digested interface { - Reference - Digest() digest.Digest -} - -// Canonical reference is an object with a fully unique -// name including a name with domain and digest -type Canonical interface { - Named - Digest() digest.Digest -} - -// namedRepository is a reference to a repository with a name. -// A namedRepository has both domain and path components. -type namedRepository interface { - Named - Domain() string - Path() string -} - -// Domain returns the domain part of the Named reference -func Domain(named Named) string { - if r, ok := named.(namedRepository); ok { - return r.Domain() - } - domain, _ := splitDomain(named.Name()) - return domain -} - -// Path returns the name without the domain part of the Named reference -func Path(named Named) (name string) { - if r, ok := named.(namedRepository); ok { - return r.Path() - } - _, path := splitDomain(named.Name()) - return path -} - -func splitDomain(name string) (string, string) { - match := anchoredNameRegexp.FindStringSubmatch(name) - if len(match) != 3 { - return "", name - } - return match[1], match[2] -} - -// SplitHostname splits a named reference into a -// hostname and name string. If no valid hostname is -// found, the hostname is empty and the full value -// is returned as name -// DEPRECATED: Use Domain or Path -func SplitHostname(named Named) (string, string) { - if r, ok := named.(namedRepository); ok { - return r.Domain(), r.Path() - } - return splitDomain(named.Name()) -} - -// Parse parses s and returns a syntactically valid Reference. -// If an error was encountered it is returned, along with a nil Reference. -// NOTE: Parse will not handle short digests. -func Parse(s string) (Reference, error) { - matches := ReferenceRegexp.FindStringSubmatch(s) - if matches == nil { - if s == "" { - return nil, ErrNameEmpty - } - if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { - return nil, ErrNameContainsUppercase - } - return nil, ErrReferenceInvalidFormat - } - - if len(matches[1]) > NameTotalLengthMax { - return nil, ErrNameTooLong - } - - var repo repository - - nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) - if nameMatch != nil && len(nameMatch) == 3 { - repo.domain = nameMatch[1] - repo.path = nameMatch[2] - } else { - repo.domain = "" - repo.path = matches[1] - } - - ref := reference{ - namedRepository: repo, - tag: matches[2], - } - if matches[3] != "" { - var err error - ref.digest, err = digest.Parse(matches[3]) - if err != nil { - return nil, err - } - } - - r := getBestReferenceType(ref) - if r == nil { - return nil, ErrNameEmpty - } - - return r, nil -} - -// ParseNamed parses s and returns a syntactically valid reference implementing -// the Named interface. The reference must have a name and be in the canonical -// form, otherwise an error is returned. -// If an error was encountered it is returned, along with a nil Reference. -// NOTE: ParseNamed will not handle short digests. -func ParseNamed(s string) (Named, error) { - named, err := ParseNormalizedNamed(s) - if err != nil { - return nil, err - } - if named.String() != s { - return nil, ErrNameNotCanonical - } - return named, nil -} - -// WithName returns a named object representing the given string. If the input -// is invalid ErrReferenceInvalidFormat will be returned. -func WithName(name string) (Named, error) { - if len(name) > NameTotalLengthMax { - return nil, ErrNameTooLong - } - - match := anchoredNameRegexp.FindStringSubmatch(name) - if match == nil || len(match) != 3 { - return nil, ErrReferenceInvalidFormat - } - return repository{ - domain: match[1], - path: match[2], - }, nil -} - -// WithTag combines the name from "name" and the tag from "tag" to form a -// reference incorporating both the name and the tag. -func WithTag(name Named, tag string) (NamedTagged, error) { - if !anchoredTagRegexp.MatchString(tag) { - return nil, ErrTagInvalidFormat - } - var repo repository - if r, ok := name.(namedRepository); ok { - repo.domain = r.Domain() - repo.path = r.Path() - } else { - repo.path = name.Name() - } - if canonical, ok := name.(Canonical); ok { - return reference{ - namedRepository: repo, - tag: tag, - digest: canonical.Digest(), - }, nil - } - return taggedReference{ - namedRepository: repo, - tag: tag, - }, nil -} - -// WithDigest combines the name from "name" and the digest from "digest" to form -// a reference incorporating both the name and the digest. -func WithDigest(name Named, digest digest.Digest) (Canonical, error) { - if !anchoredDigestRegexp.MatchString(digest.String()) { - return nil, ErrDigestInvalidFormat - } - var repo repository - if r, ok := name.(namedRepository); ok { - repo.domain = r.Domain() - repo.path = r.Path() - } else { - repo.path = name.Name() - } - if tagged, ok := name.(Tagged); ok { - return reference{ - namedRepository: repo, - tag: tagged.Tag(), - digest: digest, - }, nil - } - return canonicalReference{ - namedRepository: repo, - digest: digest, - }, nil -} - -// TrimNamed removes any tag or digest from the named reference. -func TrimNamed(ref Named) Named { - domain, path := SplitHostname(ref) - return repository{ - domain: domain, - path: path, - } -} - -func getBestReferenceType(ref reference) Reference { - if ref.Name() == "" { - // Allow digest only references - if ref.digest != "" { - return digestReference(ref.digest) - } - return nil - } - if ref.tag == "" { - if ref.digest != "" { - return canonicalReference{ - namedRepository: ref.namedRepository, - digest: ref.digest, - } - } - return ref.namedRepository - } - if ref.digest == "" { - return taggedReference{ - namedRepository: ref.namedRepository, - tag: ref.tag, - } - } - - return ref -} - -type reference struct { - namedRepository - tag string - digest digest.Digest -} - -func (r reference) String() string { - return r.Name() + ":" + r.tag + "@" + r.digest.String() -} - -func (r reference) Tag() string { - return r.tag -} - -func (r reference) Digest() digest.Digest { - return r.digest -} - -type repository struct { - domain string - path string -} - -func (r repository) String() string { - return r.Name() -} - -func (r repository) Name() string { - if r.domain == "" { - return r.path - } - return r.domain + "/" + r.path -} - -func (r repository) Domain() string { - return r.domain -} - -func (r repository) Path() string { - return r.path -} - -type digestReference digest.Digest - -func (d digestReference) String() string { - return digest.Digest(d).String() -} - -func (d digestReference) Digest() digest.Digest { - return digest.Digest(d) -} - -type taggedReference struct { - namedRepository - tag string -} - -func (t taggedReference) String() string { - return t.Name() + ":" + t.tag -} - -func (t taggedReference) Tag() string { - return t.tag -} - -type canonicalReference struct { - namedRepository - digest digest.Digest -} - -func (c canonicalReference) String() string { - return c.Name() + "@" + c.digest.String() -} - -func (c canonicalReference) Digest() digest.Digest { - return c.digest -} diff --git a/vendor/github.com/containers/image/docker/reference/reference_test.go b/vendor/github.com/containers/image/docker/reference/reference_test.go deleted file mode 100644 index 16b871f987ba..000000000000 --- a/vendor/github.com/containers/image/docker/reference/reference_test.go +++ /dev/null @@ -1,659 +0,0 @@ -package reference - -import ( - _ "crypto/sha256" - _ "crypto/sha512" - "encoding/json" - "strconv" - "strings" - "testing" - - "github.com/opencontainers/go-digest" -) - -func TestReferenceParse(t *testing.T) { - // referenceTestcases is a unified set of testcases for - // testing the parsing of references - referenceTestcases := []struct { - // input is the repository name or name component testcase - input string - // err is the error expected from Parse, or nil - err error - // repository is the string representation for the reference - repository string - // domain is the domain expected in the reference - domain string - // tag is the tag for the reference - tag string - // digest is the digest for the reference (enforces digest reference) - digest string - }{ - { - input: "test_com", - repository: "test_com", - }, - { - input: "test.com:tag", - repository: "test.com", - tag: "tag", - }, - { - input: "test.com:5000", - repository: "test.com", - tag: "5000", - }, - { - input: "test.com/repo:tag", - domain: "test.com", - repository: "test.com/repo", - tag: "tag", - }, - { - input: "test:5000/repo", - domain: "test:5000", - repository: "test:5000/repo", - }, - { - input: "test:5000/repo:tag", - domain: "test:5000", - repository: "test:5000/repo", - tag: "tag", - }, - { - input: "test:5000/repo@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - domain: "test:5000", - repository: "test:5000/repo", - digest: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - }, - { - input: "test:5000/repo:tag@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - domain: "test:5000", - repository: "test:5000/repo", - tag: "tag", - digest: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - }, - { - input: "test:5000/repo", - domain: "test:5000", - repository: "test:5000/repo", - }, - { - input: "", - err: ErrNameEmpty, - }, - { - input: ":justtag", - err: ErrReferenceInvalidFormat, - }, - { - input: "@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - err: ErrReferenceInvalidFormat, - }, - { - input: "repo@sha256:ffffffffffffffffffffffffffffffffff", - err: digest.ErrDigestInvalidLength, - }, - { - input: "validname@invaliddigest:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - err: digest.ErrDigestUnsupported, - }, - { - input: "Uppercase:tag", - err: ErrNameContainsUppercase, - }, - // FIXME "Uppercase" is incorrectly handled as a domain-name here, therefore passes. - // See https://github.com/docker/distribution/pull/1778, and https://github.com/docker/docker/pull/20175 - //{ - // input: "Uppercase/lowercase:tag", - // err: ErrNameContainsUppercase, - //}, - { - input: "test:5000/Uppercase/lowercase:tag", - err: ErrNameContainsUppercase, - }, - { - input: "lowercase:Uppercase", - repository: "lowercase", - tag: "Uppercase", - }, - { - input: strings.Repeat("a/", 128) + "a:tag", - err: ErrNameTooLong, - }, - { - input: strings.Repeat("a/", 127) + "a:tag-puts-this-over-max", - domain: "a", - repository: strings.Repeat("a/", 127) + "a", - tag: "tag-puts-this-over-max", - }, - { - input: "aa/asdf$$^/aa", - err: ErrReferenceInvalidFormat, - }, - { - input: "sub-dom1.foo.com/bar/baz/quux", - domain: "sub-dom1.foo.com", - repository: "sub-dom1.foo.com/bar/baz/quux", - }, - { - input: "sub-dom1.foo.com/bar/baz/quux:some-long-tag", - domain: "sub-dom1.foo.com", - repository: "sub-dom1.foo.com/bar/baz/quux", - tag: "some-long-tag", - }, - { - input: "b.gcr.io/test.example.com/my-app:test.example.com", - domain: "b.gcr.io", - repository: "b.gcr.io/test.example.com/my-app", - tag: "test.example.com", - }, - { - input: "xn--n3h.com/myimage:xn--n3h.com", // ☃.com in punycode - domain: "xn--n3h.com", - repository: "xn--n3h.com/myimage", - tag: "xn--n3h.com", - }, - { - input: "xn--7o8h.com/myimage:xn--7o8h.com@sha512:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", // 🐳.com in punycode - domain: "xn--7o8h.com", - repository: "xn--7o8h.com/myimage", - tag: "xn--7o8h.com", - digest: "sha512:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - }, - { - input: "foo_bar.com:8080", - repository: "foo_bar.com", - tag: "8080", - }, - { - input: "foo/foo_bar.com:8080", - domain: "foo", - repository: "foo/foo_bar.com", - tag: "8080", - }, - } - for _, testcase := range referenceTestcases { - failf := func(format string, v ...interface{}) { - t.Logf(strconv.Quote(testcase.input)+": "+format, v...) - t.Fail() - } - - repo, err := Parse(testcase.input) - if testcase.err != nil { - if err == nil { - failf("missing expected error: %v", testcase.err) - } else if testcase.err != err { - failf("mismatched error: got %v, expected %v", err, testcase.err) - } - continue - } else if err != nil { - failf("unexpected parse error: %v", err) - continue - } - if repo.String() != testcase.input { - failf("mismatched repo: got %q, expected %q", repo.String(), testcase.input) - } - - if named, ok := repo.(Named); ok { - if named.Name() != testcase.repository { - failf("unexpected repository: got %q, expected %q", named.Name(), testcase.repository) - } - domain, _ := SplitHostname(named) - if domain != testcase.domain { - failf("unexpected domain: got %q, expected %q", domain, testcase.domain) - } - } else if testcase.repository != "" || testcase.domain != "" { - failf("expected named type, got %T", repo) - } - - tagged, ok := repo.(Tagged) - if testcase.tag != "" { - if ok { - if tagged.Tag() != testcase.tag { - failf("unexpected tag: got %q, expected %q", tagged.Tag(), testcase.tag) - } - } else { - failf("expected tagged type, got %T", repo) - } - } else if ok { - failf("unexpected tagged type") - } - - digested, ok := repo.(Digested) - if testcase.digest != "" { - if ok { - if digested.Digest().String() != testcase.digest { - failf("unexpected digest: got %q, expected %q", digested.Digest().String(), testcase.digest) - } - } else { - failf("expected digested type, got %T", repo) - } - } else if ok { - failf("unexpected digested type") - } - - } -} - -// TestWithNameFailure tests cases where WithName should fail. Cases where it -// should succeed are covered by TestSplitHostname, below. -func TestWithNameFailure(t *testing.T) { - testcases := []struct { - input string - err error - }{ - { - input: "", - err: ErrNameEmpty, - }, - { - input: ":justtag", - err: ErrReferenceInvalidFormat, - }, - { - input: "@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - err: ErrReferenceInvalidFormat, - }, - { - input: "validname@invaliddigest:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - err: ErrReferenceInvalidFormat, - }, - { - input: strings.Repeat("a/", 128) + "a:tag", - err: ErrNameTooLong, - }, - { - input: "aa/asdf$$^/aa", - err: ErrReferenceInvalidFormat, - }, - } - for _, testcase := range testcases { - failf := func(format string, v ...interface{}) { - t.Logf(strconv.Quote(testcase.input)+": "+format, v...) - t.Fail() - } - - _, err := WithName(testcase.input) - if err == nil { - failf("no error parsing name. expected: %s", testcase.err) - } - } -} - -func TestSplitHostname(t *testing.T) { - testcases := []struct { - input string - domain string - name string - }{ - { - input: "test.com/foo", - domain: "test.com", - name: "foo", - }, - { - input: "test_com/foo", - domain: "", - name: "test_com/foo", - }, - { - input: "test:8080/foo", - domain: "test:8080", - name: "foo", - }, - { - input: "test.com:8080/foo", - domain: "test.com:8080", - name: "foo", - }, - { - input: "test-com:8080/foo", - domain: "test-com:8080", - name: "foo", - }, - { - input: "xn--n3h.com:18080/foo", - domain: "xn--n3h.com:18080", - name: "foo", - }, - } - for _, testcase := range testcases { - failf := func(format string, v ...interface{}) { - t.Logf(strconv.Quote(testcase.input)+": "+format, v...) - t.Fail() - } - - named, err := WithName(testcase.input) - if err != nil { - failf("error parsing name: %s", err) - } - domain, name := SplitHostname(named) - if domain != testcase.domain { - failf("unexpected domain: got %q, expected %q", domain, testcase.domain) - } - if name != testcase.name { - failf("unexpected name: got %q, expected %q", name, testcase.name) - } - } -} - -type serializationType struct { - Description string - Field Field -} - -func TestSerialization(t *testing.T) { - testcases := []struct { - description string - input string - name string - tag string - digest string - err error - }{ - { - description: "empty value", - err: ErrNameEmpty, - }, - { - description: "just a name", - input: "example.com:8000/named", - name: "example.com:8000/named", - }, - { - description: "name with a tag", - input: "example.com:8000/named:tagged", - name: "example.com:8000/named", - tag: "tagged", - }, - { - description: "name with digest", - input: "other.com/named@sha256:1234567890098765432112345667890098765432112345667890098765432112", - name: "other.com/named", - digest: "sha256:1234567890098765432112345667890098765432112345667890098765432112", - }, - } - for _, testcase := range testcases { - failf := func(format string, v ...interface{}) { - t.Logf(strconv.Quote(testcase.input)+": "+format, v...) - t.Fail() - } - - m := map[string]string{ - "Description": testcase.description, - "Field": testcase.input, - } - b, err := json.Marshal(m) - if err != nil { - failf("error marshalling: %v", err) - } - t := serializationType{} - - if err := json.Unmarshal(b, &t); err != nil { - if testcase.err == nil { - failf("error unmarshalling: %v", err) - } - if err != testcase.err { - failf("wrong error, expected %v, got %v", testcase.err, err) - } - - continue - } else if testcase.err != nil { - failf("expected error unmarshalling: %v", testcase.err) - } - - if t.Description != testcase.description { - failf("wrong description, expected %q, got %q", testcase.description, t.Description) - } - - ref := t.Field.Reference() - - if named, ok := ref.(Named); ok { - if named.Name() != testcase.name { - failf("unexpected repository: got %q, expected %q", named.Name(), testcase.name) - } - } else if testcase.name != "" { - failf("expected named type, got %T", ref) - } - - tagged, ok := ref.(Tagged) - if testcase.tag != "" { - if ok { - if tagged.Tag() != testcase.tag { - failf("unexpected tag: got %q, expected %q", tagged.Tag(), testcase.tag) - } - } else { - failf("expected tagged type, got %T", ref) - } - } else if ok { - failf("unexpected tagged type") - } - - digested, ok := ref.(Digested) - if testcase.digest != "" { - if ok { - if digested.Digest().String() != testcase.digest { - failf("unexpected digest: got %q, expected %q", digested.Digest().String(), testcase.digest) - } - } else { - failf("expected digested type, got %T", ref) - } - } else if ok { - failf("unexpected digested type") - } - - t = serializationType{ - Description: testcase.description, - Field: AsField(ref), - } - - b2, err := json.Marshal(t) - if err != nil { - failf("error marshing serialization type: %v", err) - } - - if string(b) != string(b2) { - failf("unexpected serialized value: expected %q, got %q", string(b), string(b2)) - } - - // Ensure t.Field is not implementing "Reference" directly, getting - // around the Reference type system - var fieldInterface interface{} = t.Field - if _, ok := fieldInterface.(Reference); ok { - failf("field should not implement Reference interface") - } - - } -} - -func TestWithTag(t *testing.T) { - testcases := []struct { - name string - digest digest.Digest - tag string - combined string - }{ - { - name: "test.com/foo", - tag: "tag", - combined: "test.com/foo:tag", - }, - { - name: "foo", - tag: "tag2", - combined: "foo:tag2", - }, - { - name: "test.com:8000/foo", - tag: "tag4", - combined: "test.com:8000/foo:tag4", - }, - { - name: "test.com:8000/foo", - tag: "TAG5", - combined: "test.com:8000/foo:TAG5", - }, - { - name: "test.com:8000/foo", - digest: "sha256:1234567890098765432112345667890098765", - tag: "TAG5", - combined: "test.com:8000/foo:TAG5@sha256:1234567890098765432112345667890098765", - }, - } - for _, testcase := range testcases { - failf := func(format string, v ...interface{}) { - t.Logf(strconv.Quote(testcase.name)+": "+format, v...) - t.Fail() - } - - named, err := WithName(testcase.name) - if err != nil { - failf("error parsing name: %s", err) - } - if testcase.digest != "" { - canonical, err := WithDigest(named, testcase.digest) - if err != nil { - failf("error adding digest") - } - named = canonical - } - - tagged, err := WithTag(named, testcase.tag) - if err != nil { - failf("WithTag failed: %s", err) - } - if tagged.String() != testcase.combined { - failf("unexpected: got %q, expected %q", tagged.String(), testcase.combined) - } - } -} - -func TestWithDigest(t *testing.T) { - testcases := []struct { - name string - digest digest.Digest - tag string - combined string - }{ - { - name: "test.com/foo", - digest: "sha256:1234567890098765432112345667890098765", - combined: "test.com/foo@sha256:1234567890098765432112345667890098765", - }, - { - name: "foo", - digest: "sha256:1234567890098765432112345667890098765", - combined: "foo@sha256:1234567890098765432112345667890098765", - }, - { - name: "test.com:8000/foo", - digest: "sha256:1234567890098765432112345667890098765", - combined: "test.com:8000/foo@sha256:1234567890098765432112345667890098765", - }, - { - name: "test.com:8000/foo", - digest: "sha256:1234567890098765432112345667890098765", - tag: "latest", - combined: "test.com:8000/foo:latest@sha256:1234567890098765432112345667890098765", - }, - } - for _, testcase := range testcases { - failf := func(format string, v ...interface{}) { - t.Logf(strconv.Quote(testcase.name)+": "+format, v...) - t.Fail() - } - - named, err := WithName(testcase.name) - if err != nil { - failf("error parsing name: %s", err) - } - if testcase.tag != "" { - tagged, err := WithTag(named, testcase.tag) - if err != nil { - failf("error adding tag") - } - named = tagged - } - digested, err := WithDigest(named, testcase.digest) - if err != nil { - failf("WithDigest failed: %s", err) - } - if digested.String() != testcase.combined { - failf("unexpected: got %q, expected %q", digested.String(), testcase.combined) - } - } -} - -func TestParseNamed(t *testing.T) { - testcases := []struct { - input string - domain string - name string - err error - }{ - { - input: "test.com/foo", - domain: "test.com", - name: "foo", - }, - { - input: "test:8080/foo", - domain: "test:8080", - name: "foo", - }, - { - input: "test_com/foo", - err: ErrNameNotCanonical, - }, - { - input: "test.com", - err: ErrNameNotCanonical, - }, - { - input: "foo", - err: ErrNameNotCanonical, - }, - { - input: "library/foo", - err: ErrNameNotCanonical, - }, - { - input: "docker.io/library/foo", - domain: "docker.io", - name: "library/foo", - }, - // Ambiguous case, parser will add "library/" to foo - { - input: "docker.io/foo", - err: ErrNameNotCanonical, - }, - } - for _, testcase := range testcases { - failf := func(format string, v ...interface{}) { - t.Logf(strconv.Quote(testcase.input)+": "+format, v...) - t.Fail() - } - - named, err := ParseNamed(testcase.input) - if err != nil && testcase.err == nil { - failf("error parsing name: %s", err) - continue - } else if err == nil && testcase.err != nil { - failf("parsing succeded: expected error %v", testcase.err) - continue - } else if err != testcase.err { - failf("unexpected error %v, expected %v", err, testcase.err) - continue - } else if err != nil { - continue - } - - domain, name := SplitHostname(named) - if domain != testcase.domain { - failf("unexpected domain: got %q, expected %q", domain, testcase.domain) - } - if name != testcase.name { - failf("unexpected name: got %q, expected %q", name, testcase.name) - } - } -} diff --git a/vendor/github.com/containers/image/docker/reference/regexp.go b/vendor/github.com/containers/image/docker/reference/regexp.go deleted file mode 100644 index 405e995db9dd..000000000000 --- a/vendor/github.com/containers/image/docker/reference/regexp.go +++ /dev/null @@ -1,143 +0,0 @@ -package reference - -import "regexp" - -var ( - // alphaNumericRegexp defines the alpha numeric atom, typically a - // component of names. This only allows lower case characters and digits. - alphaNumericRegexp = match(`[a-z0-9]+`) - - // separatorRegexp defines the separators allowed to be embedded in name - // components. This allow one period, one or two underscore and multiple - // dashes. - separatorRegexp = match(`(?:[._]|__|[-]*)`) - - // nameComponentRegexp restricts registry path component names to start - // with at least one letter or number, with following parts able to be - // separated by one period, one or two underscore and multiple dashes. - nameComponentRegexp = expression( - alphaNumericRegexp, - optional(repeated(separatorRegexp, alphaNumericRegexp))) - - // domainComponentRegexp restricts the registry domain component of a - // repository name to start with a component as defined by domainRegexp - // and followed by an optional port. - domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) - - // domainRegexp defines the structure of potential domain components - // that may be part of image names. This is purposely a subset of what is - // allowed by DNS to ensure backwards compatibility with Docker image - // names. - domainRegexp = expression( - domainComponentRegexp, - optional(repeated(literal(`.`), domainComponentRegexp)), - optional(literal(`:`), match(`[0-9]+`))) - - // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. - TagRegexp = match(`[\w][\w.-]{0,127}`) - - // anchoredTagRegexp matches valid tag names, anchored at the start and - // end of the matched string. - anchoredTagRegexp = anchored(TagRegexp) - - // DigestRegexp matches valid digests. - DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) - - // anchoredDigestRegexp matches valid digests, anchored at the start and - // end of the matched string. - anchoredDigestRegexp = anchored(DigestRegexp) - - // NameRegexp is the format for the name component of references. The - // regexp has capturing groups for the domain and name part omitting - // the separating forward slash from either. - NameRegexp = expression( - optional(domainRegexp, literal(`/`)), - nameComponentRegexp, - optional(repeated(literal(`/`), nameComponentRegexp))) - - // anchoredNameRegexp is used to parse a name value, capturing the - // domain and trailing components. - anchoredNameRegexp = anchored( - optional(capture(domainRegexp), literal(`/`)), - capture(nameComponentRegexp, - optional(repeated(literal(`/`), nameComponentRegexp)))) - - // ReferenceRegexp is the full supported format of a reference. The regexp - // is anchored and has capturing groups for name, tag, and digest - // components. - ReferenceRegexp = anchored(capture(NameRegexp), - optional(literal(":"), capture(TagRegexp)), - optional(literal("@"), capture(DigestRegexp))) - - // IdentifierRegexp is the format for string identifier used as a - // content addressable identifier using sha256. These identifiers - // are like digests without the algorithm, since sha256 is used. - IdentifierRegexp = match(`([a-f0-9]{64})`) - - // ShortIdentifierRegexp is the format used to represent a prefix - // of an identifier. A prefix may be used to match a sha256 identifier - // within a list of trusted identifiers. - ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`) - - // anchoredIdentifierRegexp is used to check or match an - // identifier value, anchored at start and end of string. - anchoredIdentifierRegexp = anchored(IdentifierRegexp) - - // anchoredShortIdentifierRegexp is used to check if a value - // is a possible identifier prefix, anchored at start and end - // of string. - anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp) -) - -// match compiles the string to a regular expression. -var match = regexp.MustCompile - -// literal compiles s into a literal regular expression, escaping any regexp -// reserved characters. -func literal(s string) *regexp.Regexp { - re := match(regexp.QuoteMeta(s)) - - if _, complete := re.LiteralPrefix(); !complete { - panic("must be a literal") - } - - return re -} - -// expression defines a full expression, where each regular expression must -// follow the previous. -func expression(res ...*regexp.Regexp) *regexp.Regexp { - var s string - for _, re := range res { - s += re.String() - } - - return match(s) -} - -// optional wraps the expression in a non-capturing group and makes the -// production optional. -func optional(res ...*regexp.Regexp) *regexp.Regexp { - return match(group(expression(res...)).String() + `?`) -} - -// repeated wraps the regexp in a non-capturing group to get one or more -// matches. -func repeated(res ...*regexp.Regexp) *regexp.Regexp { - return match(group(expression(res...)).String() + `+`) -} - -// group wraps the regexp in a non-capturing group. -func group(res ...*regexp.Regexp) *regexp.Regexp { - return match(`(?:` + expression(res...).String() + `)`) -} - -// capture wraps the expression in a capturing group. -func capture(res ...*regexp.Regexp) *regexp.Regexp { - return match(`(` + expression(res...).String() + `)`) -} - -// anchored anchors the regular expression by adding start and end delimiters. -func anchored(res ...*regexp.Regexp) *regexp.Regexp { - return match(`^` + expression(res...).String() + `$`) -} diff --git a/vendor/github.com/containers/image/docker/reference/regexp_test.go b/vendor/github.com/containers/image/docker/reference/regexp_test.go deleted file mode 100644 index c21263992f3e..000000000000 --- a/vendor/github.com/containers/image/docker/reference/regexp_test.go +++ /dev/null @@ -1,553 +0,0 @@ -package reference - -import ( - "regexp" - "strings" - "testing" -) - -type regexpMatch struct { - input string - match bool - subs []string -} - -func checkRegexp(t *testing.T, r *regexp.Regexp, m regexpMatch) { - matches := r.FindStringSubmatch(m.input) - if m.match && matches != nil { - if len(matches) != (r.NumSubexp()+1) || matches[0] != m.input { - t.Fatalf("Bad match result %#v for %q", matches, m.input) - } - if len(matches) < (len(m.subs) + 1) { - t.Errorf("Expected %d sub matches, only have %d for %q", len(m.subs), len(matches)-1, m.input) - } - for i := range m.subs { - if m.subs[i] != matches[i+1] { - t.Errorf("Unexpected submatch %d: %q, expected %q for %q", i+1, matches[i+1], m.subs[i], m.input) - } - } - } else if m.match { - t.Errorf("Expected match for %q", m.input) - } else if matches != nil { - t.Errorf("Unexpected match for %q", m.input) - } -} - -func TestDomainRegexp(t *testing.T) { - hostcases := []regexpMatch{ - { - input: "test.com", - match: true, - }, - { - input: "test.com:10304", - match: true, - }, - { - input: "test.com:http", - match: false, - }, - { - input: "localhost", - match: true, - }, - { - input: "localhost:8080", - match: true, - }, - { - input: "a", - match: true, - }, - { - input: "a.b", - match: true, - }, - { - input: "ab.cd.com", - match: true, - }, - { - input: "a-b.com", - match: true, - }, - { - input: "-ab.com", - match: false, - }, - { - input: "ab-.com", - match: false, - }, - { - input: "ab.c-om", - match: true, - }, - { - input: "ab.-com", - match: false, - }, - { - input: "ab.com-", - match: false, - }, - { - input: "0101.com", - match: true, // TODO(dmcgowan): valid if this should be allowed - }, - { - input: "001a.com", - match: true, - }, - { - input: "b.gbc.io:443", - match: true, - }, - { - input: "b.gbc.io", - match: true, - }, - { - input: "xn--n3h.com", // ☃.com in punycode - match: true, - }, - { - input: "Asdf.com", // uppercase character - match: true, - }, - } - r := regexp.MustCompile(`^` + domainRegexp.String() + `$`) - for i := range hostcases { - checkRegexp(t, r, hostcases[i]) - } -} - -func TestFullNameRegexp(t *testing.T) { - if anchoredNameRegexp.NumSubexp() != 2 { - t.Fatalf("anchored name regexp should have two submatches: %v, %v != 2", - anchoredNameRegexp, anchoredNameRegexp.NumSubexp()) - } - - testcases := []regexpMatch{ - { - input: "", - match: false, - }, - { - input: "short", - match: true, - subs: []string{"", "short"}, - }, - { - input: "simple/name", - match: true, - subs: []string{"simple", "name"}, - }, - { - input: "library/ubuntu", - match: true, - subs: []string{"library", "ubuntu"}, - }, - { - input: "docker/stevvooe/app", - match: true, - subs: []string{"docker", "stevvooe/app"}, - }, - { - input: "aa/aa/aa/aa/aa/aa/aa/aa/aa/bb/bb/bb/bb/bb/bb", - match: true, - subs: []string{"aa", "aa/aa/aa/aa/aa/aa/aa/aa/bb/bb/bb/bb/bb/bb"}, - }, - { - input: "aa/aa/bb/bb/bb", - match: true, - subs: []string{"aa", "aa/bb/bb/bb"}, - }, - { - input: "a/a/a/a", - match: true, - subs: []string{"a", "a/a/a"}, - }, - { - input: "a/a/a/a/", - match: false, - }, - { - input: "a//a/a", - match: false, - }, - { - input: "a", - match: true, - subs: []string{"", "a"}, - }, - { - input: "a/aa", - match: true, - subs: []string{"a", "aa"}, - }, - { - input: "a/aa/a", - match: true, - subs: []string{"a", "aa/a"}, - }, - { - input: "foo.com", - match: true, - subs: []string{"", "foo.com"}, - }, - { - input: "foo.com/", - match: false, - }, - { - input: "foo.com:8080/bar", - match: true, - subs: []string{"foo.com:8080", "bar"}, - }, - { - input: "foo.com:http/bar", - match: false, - }, - { - input: "foo.com/bar", - match: true, - subs: []string{"foo.com", "bar"}, - }, - { - input: "foo.com/bar/baz", - match: true, - subs: []string{"foo.com", "bar/baz"}, - }, - { - input: "localhost:8080/bar", - match: true, - subs: []string{"localhost:8080", "bar"}, - }, - { - input: "sub-dom1.foo.com/bar/baz/quux", - match: true, - subs: []string{"sub-dom1.foo.com", "bar/baz/quux"}, - }, - { - input: "blog.foo.com/bar/baz", - match: true, - subs: []string{"blog.foo.com", "bar/baz"}, - }, - { - input: "a^a", - match: false, - }, - { - input: "aa/asdf$$^/aa", - match: false, - }, - { - input: "asdf$$^/aa", - match: false, - }, - { - input: "aa-a/a", - match: true, - subs: []string{"aa-a", "a"}, - }, - { - input: strings.Repeat("a/", 128) + "a", - match: true, - subs: []string{"a", strings.Repeat("a/", 127) + "a"}, - }, - { - input: "a-/a/a/a", - match: false, - }, - { - input: "foo.com/a-/a/a", - match: false, - }, - { - input: "-foo/bar", - match: false, - }, - { - input: "foo/bar-", - match: false, - }, - { - input: "foo-/bar", - match: false, - }, - { - input: "foo/-bar", - match: false, - }, - { - input: "_foo/bar", - match: false, - }, - { - input: "foo_bar", - match: true, - subs: []string{"", "foo_bar"}, - }, - { - input: "foo_bar.com", - match: true, - subs: []string{"", "foo_bar.com"}, - }, - { - input: "foo_bar.com:8080", - match: false, - }, - { - input: "foo_bar.com:8080/app", - match: false, - }, - { - input: "foo.com/foo_bar", - match: true, - subs: []string{"foo.com", "foo_bar"}, - }, - { - input: "____/____", - match: false, - }, - { - input: "_docker/_docker", - match: false, - }, - { - input: "docker_/docker_", - match: false, - }, - { - input: "b.gcr.io/test.example.com/my-app", - match: true, - subs: []string{"b.gcr.io", "test.example.com/my-app"}, - }, - { - input: "xn--n3h.com/myimage", // ☃.com in punycode - match: true, - subs: []string{"xn--n3h.com", "myimage"}, - }, - { - input: "xn--7o8h.com/myimage", // 🐳.com in punycode - match: true, - subs: []string{"xn--7o8h.com", "myimage"}, - }, - { - input: "example.com/xn--7o8h.com/myimage", // 🐳.com in punycode - match: true, - subs: []string{"example.com", "xn--7o8h.com/myimage"}, - }, - { - input: "example.com/some_separator__underscore/myimage", - match: true, - subs: []string{"example.com", "some_separator__underscore/myimage"}, - }, - { - input: "example.com/__underscore/myimage", - match: false, - }, - { - input: "example.com/..dots/myimage", - match: false, - }, - { - input: "example.com/.dots/myimage", - match: false, - }, - { - input: "example.com/nodouble..dots/myimage", - match: false, - }, - { - input: "example.com/nodouble..dots/myimage", - match: false, - }, - { - input: "docker./docker", - match: false, - }, - { - input: ".docker/docker", - match: false, - }, - { - input: "docker-/docker", - match: false, - }, - { - input: "-docker/docker", - match: false, - }, - { - input: "do..cker/docker", - match: false, - }, - { - input: "do__cker:8080/docker", - match: false, - }, - { - input: "do__cker/docker", - match: true, - subs: []string{"", "do__cker/docker"}, - }, - { - input: "b.gcr.io/test.example.com/my-app", - match: true, - subs: []string{"b.gcr.io", "test.example.com/my-app"}, - }, - { - input: "registry.io/foo/project--id.module--name.ver---sion--name", - match: true, - subs: []string{"registry.io", "foo/project--id.module--name.ver---sion--name"}, - }, - { - input: "Asdf.com/foo/bar", // uppercase character in hostname - match: true, - }, - { - input: "Foo/FarB", // uppercase characters in remote name - match: false, - }, - } - for i := range testcases { - checkRegexp(t, anchoredNameRegexp, testcases[i]) - } -} - -func TestReferenceRegexp(t *testing.T) { - if ReferenceRegexp.NumSubexp() != 3 { - t.Fatalf("anchored name regexp should have three submatches: %v, %v != 3", - ReferenceRegexp, ReferenceRegexp.NumSubexp()) - } - - testcases := []regexpMatch{ - { - input: "registry.com:8080/myapp:tag", - match: true, - subs: []string{"registry.com:8080/myapp", "tag", ""}, - }, - { - input: "registry.com:8080/myapp@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", - match: true, - subs: []string{"registry.com:8080/myapp", "", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, - }, - { - input: "registry.com:8080/myapp:tag2@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", - match: true, - subs: []string{"registry.com:8080/myapp", "tag2", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, - }, - { - input: "registry.com:8080/myapp@sha256:badbadbadbad", - match: false, - }, - { - input: "registry.com:8080/myapp:invalid~tag", - match: false, - }, - { - input: "bad_hostname.com:8080/myapp:tag", - match: false, - }, - { - input:// localhost treated as name, missing tag with 8080 as tag - "localhost:8080@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", - match: true, - subs: []string{"localhost", "8080", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, - }, - { - input: "localhost:8080/name@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", - match: true, - subs: []string{"localhost:8080/name", "", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, - }, - { - input: "localhost:http/name@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", - match: false, - }, - { - // localhost will be treated as an image name without a host - input: "localhost@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", - match: true, - subs: []string{"localhost", "", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, - }, - { - input: "registry.com:8080/myapp@bad", - match: false, - }, - { - input: "registry.com:8080/myapp@2bad", - match: false, // TODO(dmcgowan): Support this as valid - }, - } - - for i := range testcases { - checkRegexp(t, ReferenceRegexp, testcases[i]) - } - -} - -func TestIdentifierRegexp(t *testing.T) { - fullCases := []regexpMatch{ - { - input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf9821", - match: true, - }, - { - input: "7EC43B381E5AEFE6E04EFB0B3F0693FF2A4A50652D64AEC573905F2DB5889A1C", - match: false, - }, - { - input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf", - match: false, - }, - { - input: "sha256:da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf9821", - match: false, - }, - { - input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf98218482", - match: false, - }, - } - - shortCases := []regexpMatch{ - { - input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf9821", - match: true, - }, - { - input: "7EC43B381E5AEFE6E04EFB0B3F0693FF2A4A50652D64AEC573905F2DB5889A1C", - match: false, - }, - { - input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf", - match: true, - }, - { - input: "sha256:da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf9821", - match: false, - }, - { - input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf98218482", - match: false, - }, - { - input: "da304", - match: false, - }, - { - input: "da304e", - match: true, - }, - } - - for i := range fullCases { - checkRegexp(t, anchoredIdentifierRegexp, fullCases[i]) - } - - for i := range shortCases { - checkRegexp(t, anchoredShortIdentifierRegexp, shortCases[i]) - } -} diff --git a/vendor/github.com/containers/image/docker/tarfile/dest.go b/vendor/github.com/containers/image/docker/tarfile/dest.go deleted file mode 100644 index 72c85c70a3cc..000000000000 --- a/vendor/github.com/containers/image/docker/tarfile/dest.go +++ /dev/null @@ -1,258 +0,0 @@ -package tarfile - -import ( - "archive/tar" - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "time" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/manifest" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -const temporaryDirectoryForBigFiles = "/var/tmp" // Do not use the system default of os.TempDir(), usually /tmp, because with systemd it could be a tmpfs. - -// Destination is a partial implementation of types.ImageDestination for writing to an io.Writer. -type Destination struct { - writer io.Writer - tar *tar.Writer - repoTag string - // Other state. - blobs map[digest.Digest]types.BlobInfo // list of already-sent blobs -} - -// NewDestination returns a tarfile.Destination for the specified io.Writer. -func NewDestination(dest io.Writer, ref reference.NamedTagged) *Destination { - // For github.com/docker/docker consumers, this works just as well as - // refString := ref.String() - // because when reading the RepoTags strings, github.com/docker/docker/reference - // normalizes both of them to the same value. - // - // Doing it this way to include the normalized-out `docker.io[/library]` does make - // a difference for github.com/projectatomic/docker consumers, with the - // “Add --add-registry and --block-registry options to docker daemon” patch. - // These consumers treat reference strings which include a hostname and reference - // strings without a hostname differently. - // - // Using the host name here is more explicit about the intent, and it has the same - // effect as (docker pull) in projectatomic/docker, which tags the result using - // a hostname-qualified reference. - // See https://github.com/containers/image/issues/72 for a more detailed - // analysis and explanation. - refString := fmt.Sprintf("%s:%s", ref.Name(), ref.Tag()) - return &Destination{ - writer: dest, - tar: tar.NewWriter(dest), - repoTag: refString, - blobs: make(map[digest.Digest]types.BlobInfo), - } -} - -// SupportedManifestMIMETypes tells which manifest mime types the destination supports -// If an empty slice or nil it's returned, then any mime type can be tried to upload -func (d *Destination) SupportedManifestMIMETypes() []string { - return []string{ - manifest.DockerV2Schema2MediaType, // We rely on the types.Image.UpdatedImage schema conversion capabilities. - } -} - -// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. -// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. -func (d *Destination) SupportsSignatures() error { - return errors.Errorf("Storing signatures for docker tar files is not supported") -} - -// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination. -func (d *Destination) ShouldCompressLayers() bool { - return false -} - -// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually -// uploaded to the image destination, true otherwise. -func (d *Destination) AcceptsForeignLayerURLs() bool { - return false -} - -// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. -func (d *Destination) MustMatchRuntimeOS() bool { - return false -} - -// PutBlob writes contents of stream and returns data representing the result (with all data filled in). -// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. -// inputInfo.Size is the expected length of stream, if known. -// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available -// to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *Destination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) { - if inputInfo.Digest.String() == "" { - return types.BlobInfo{}, errors.Errorf("Can not stream a blob with unknown digest to docker tarfile") - } - - ok, size, err := d.HasBlob(inputInfo) - if err != nil { - return types.BlobInfo{}, err - } - if ok { - return types.BlobInfo{Digest: inputInfo.Digest, Size: size}, nil - } - - if inputInfo.Size == -1 { // Ouch, we need to stream the blob into a temporary file just to determine the size. - logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...") - streamCopy, err := ioutil.TempFile(temporaryDirectoryForBigFiles, "docker-tarfile-blob") - if err != nil { - return types.BlobInfo{}, err - } - defer os.Remove(streamCopy.Name()) - defer streamCopy.Close() - - size, err := io.Copy(streamCopy, stream) - if err != nil { - return types.BlobInfo{}, err - } - _, err = streamCopy.Seek(0, os.SEEK_SET) - if err != nil { - return types.BlobInfo{}, err - } - inputInfo.Size = size // inputInfo is a struct, so we are only modifying our copy. - stream = streamCopy - logrus.Debugf("... streaming done") - } - - digester := digest.Canonical.Digester() - tee := io.TeeReader(stream, digester.Hash()) - if err := d.sendFile(inputInfo.Digest.String(), inputInfo.Size, tee); err != nil { - return types.BlobInfo{}, err - } - d.blobs[inputInfo.Digest] = types.BlobInfo{Digest: digester.Digest(), Size: inputInfo.Size} - return types.BlobInfo{Digest: digester.Digest(), Size: inputInfo.Size}, nil -} - -// HasBlob returns true iff the image destination already contains a blob with -// the matching digest which can be reapplied using ReapplyBlob. Unlike -// PutBlob, the digest can not be empty. If HasBlob returns true, the size of -// the blob must also be returned. If the destination does not contain the -// blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil); it -// returns a non-nil error only on an unexpected failure. -func (d *Destination) HasBlob(info types.BlobInfo) (bool, int64, error) { - if info.Digest == "" { - return false, -1, errors.Errorf("Can not check for a blob with unknown digest") - } - if blob, ok := d.blobs[info.Digest]; ok { - return true, blob.Size, nil - } - return false, -1, nil -} - -// ReapplyBlob informs the image destination that a blob for which HasBlob -// previously returned true would have been passed to PutBlob if it had -// returned false. Like HasBlob and unlike PutBlob, the digest can not be -// empty. If the blob is a filesystem layer, this signifies that the changes -// it describes need to be applied again when composing a filesystem tree. -func (d *Destination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) { - return info, nil -} - -// PutManifest writes manifest to the destination. -// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. -// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), -// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. -func (d *Destination) PutManifest(m []byte) error { - // We do not bother with types.ManifestTypeRejectedError; our .SupportedManifestMIMETypes() above is already providing only one alternative, - // so the caller trying a different manifest kind would be pointless. - var man schema2Manifest - if err := json.Unmarshal(m, &man); err != nil { - return errors.Wrap(err, "Error parsing manifest") - } - if man.SchemaVersion != 2 || man.MediaType != manifest.DockerV2Schema2MediaType { - return errors.Errorf("Unsupported manifest type, need a Docker schema 2 manifest") - } - - layerPaths := []string{} - for _, l := range man.Layers { - layerPaths = append(layerPaths, l.Digest.String()) - } - - items := []ManifestItem{{ - Config: man.Config.Digest.String(), - RepoTags: []string{d.repoTag}, - Layers: layerPaths, - Parent: "", - LayerSources: nil, - }} - itemsBytes, err := json.Marshal(&items) - if err != nil { - return err - } - - // FIXME? Do we also need to support the legacy format? - return d.sendFile(manifestFileName, int64(len(itemsBytes)), bytes.NewReader(itemsBytes)) -} - -type tarFI struct { - path string - size int64 -} - -func (t *tarFI) Name() string { - return t.path -} -func (t *tarFI) Size() int64 { - return t.size -} -func (t *tarFI) Mode() os.FileMode { - return 0444 -} -func (t *tarFI) ModTime() time.Time { - return time.Unix(0, 0) -} -func (t *tarFI) IsDir() bool { - return false -} -func (t *tarFI) Sys() interface{} { - return nil -} - -// sendFile sends a file into the tar stream. -func (d *Destination) sendFile(path string, expectedSize int64, stream io.Reader) error { - hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: expectedSize}, "") - if err != nil { - return nil - } - logrus.Debugf("Sending as tar file %s", path) - if err := d.tar.WriteHeader(hdr); err != nil { - return err - } - size, err := io.Copy(d.tar, stream) - if err != nil { - return err - } - if size != expectedSize { - return errors.Errorf("Size mismatch when copying %s, expected %d, got %d", path, expectedSize, size) - } - return nil -} - -// PutSignatures adds the given signatures to the docker tarfile (currently not -// supported). MUST be called after PutManifest (signatures reference manifest -// contents) -func (d *Destination) PutSignatures(signatures [][]byte) error { - if len(signatures) != 0 { - return errors.Errorf("Storing signatures for docker tar files is not supported") - } - return nil -} - -// Commit finishes writing data to the underlying io.Writer. -// It is the caller's responsibility to close it, if necessary. -func (d *Destination) Commit() error { - return d.tar.Close() -} diff --git a/vendor/github.com/containers/image/docker/tarfile/doc.go b/vendor/github.com/containers/image/docker/tarfile/doc.go deleted file mode 100644 index 4ea5369c05c8..000000000000 --- a/vendor/github.com/containers/image/docker/tarfile/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package tarfile is an internal implementation detail of some transports. -// Do not use outside of the github.com/containers/image repo! -package tarfile diff --git a/vendor/github.com/containers/image/docker/tarfile/src.go b/vendor/github.com/containers/image/docker/tarfile/src.go deleted file mode 100644 index f77cb713c42c..000000000000 --- a/vendor/github.com/containers/image/docker/tarfile/src.go +++ /dev/null @@ -1,360 +0,0 @@ -package tarfile - -import ( - "archive/tar" - "bytes" - "context" - "encoding/json" - "io" - "io/ioutil" - "os" - "path" - - "github.com/containers/image/manifest" - "github.com/containers/image/pkg/compression" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -// Source is a partial implementation of types.ImageSource for reading from tarPath. -type Source struct { - tarPath string - // The following data is only available after ensureCachedDataIsPresent() succeeds - tarManifest *ManifestItem // nil if not available yet. - configBytes []byte - configDigest digest.Digest - orderedDiffIDList []diffID - knownLayers map[diffID]*layerInfo - // Other state - generatedManifest []byte // Private cache for GetManifest(), nil if not set yet. -} - -type layerInfo struct { - path string - size int64 -} - -// NewSource returns a tarfile.Source for the specified path. -func NewSource(path string) *Source { - // TODO: We could add support for multiple images in a single archive, so - // that people could use docker-archive:opensuse.tar:opensuse:leap as - // the source of an image. - return &Source{ - tarPath: path, - } -} - -// tarReadCloser is a way to close the backing file of a tar.Reader when the user no longer needs the tar component. -type tarReadCloser struct { - *tar.Reader - backingFile *os.File -} - -func (t *tarReadCloser) Close() error { - return t.backingFile.Close() -} - -// openTarComponent returns a ReadCloser for the specific file within the archive. -// This is linear scan; we assume that the tar file will have a fairly small amount of files (~layers), -// and that filesystem caching will make the repeated seeking over the (uncompressed) tarPath cheap enough. -// The caller should call .Close() on the returned stream. -func (s *Source) openTarComponent(componentPath string) (io.ReadCloser, error) { - f, err := os.Open(s.tarPath) - if err != nil { - return nil, err - } - succeeded := false - defer func() { - if !succeeded { - f.Close() - } - }() - - tarReader, header, err := findTarComponent(f, componentPath) - if err != nil { - return nil, err - } - if header == nil { - return nil, os.ErrNotExist - } - if header.FileInfo().Mode()&os.ModeType == os.ModeSymlink { // FIXME: untested - // We follow only one symlink; so no loops are possible. - if _, err := f.Seek(0, os.SEEK_SET); err != nil { - return nil, err - } - // The new path could easily point "outside" the archive, but we only compare it to existing tar headers without extracting the archive, - // so we don't care. - tarReader, header, err = findTarComponent(f, path.Join(path.Dir(componentPath), header.Linkname)) - if err != nil { - return nil, err - } - if header == nil { - return nil, os.ErrNotExist - } - } - - if !header.FileInfo().Mode().IsRegular() { - return nil, errors.Errorf("Error reading tar archive component %s: not a regular file", header.Name) - } - succeeded = true - return &tarReadCloser{Reader: tarReader, backingFile: f}, nil -} - -// findTarComponent returns a header and a reader matching path within inputFile, -// or (nil, nil, nil) if not found. -func findTarComponent(inputFile io.Reader, path string) (*tar.Reader, *tar.Header, error) { - t := tar.NewReader(inputFile) - for { - h, err := t.Next() - if err == io.EOF { - break - } - if err != nil { - return nil, nil, err - } - if h.Name == path { - return t, h, nil - } - } - return nil, nil, nil -} - -// readTarComponent returns full contents of componentPath. -func (s *Source) readTarComponent(path string) ([]byte, error) { - file, err := s.openTarComponent(path) - if err != nil { - return nil, errors.Wrapf(err, "Error loading tar component %s", path) - } - defer file.Close() - bytes, err := ioutil.ReadAll(file) - if err != nil { - return nil, err - } - return bytes, nil -} - -// ensureCachedDataIsPresent loads data necessary for any of the public accessors. -func (s *Source) ensureCachedDataIsPresent() error { - if s.tarManifest != nil { - return nil - } - - // Read and parse manifest.json - tarManifest, err := s.loadTarManifest() - if err != nil { - return err - } - - // Check to make sure length is 1 - if len(tarManifest) != 1 { - return errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(tarManifest)) - } - - // Read and parse config. - configBytes, err := s.readTarComponent(tarManifest[0].Config) - if err != nil { - return err - } - var parsedConfig image // Most fields ommitted, we only care about layer DiffIDs. - if err := json.Unmarshal(configBytes, &parsedConfig); err != nil { - return errors.Wrapf(err, "Error decoding tar config %s", tarManifest[0].Config) - } - - knownLayers, err := s.prepareLayerData(&tarManifest[0], &parsedConfig) - if err != nil { - return err - } - - // Success; commit. - s.tarManifest = &tarManifest[0] - s.configBytes = configBytes - s.configDigest = digest.FromBytes(configBytes) - s.orderedDiffIDList = parsedConfig.RootFS.DiffIDs - s.knownLayers = knownLayers - return nil -} - -// loadTarManifest loads and decodes the manifest.json. -func (s *Source) loadTarManifest() ([]ManifestItem, error) { - // FIXME? Do we need to deal with the legacy format? - bytes, err := s.readTarComponent(manifestFileName) - if err != nil { - return nil, err - } - var items []ManifestItem - if err := json.Unmarshal(bytes, &items); err != nil { - return nil, errors.Wrap(err, "Error decoding tar manifest.json") - } - return items, nil -} - -// LoadTarManifest loads and decodes the manifest.json -func (s *Source) LoadTarManifest() ([]ManifestItem, error) { - return s.loadTarManifest() -} - -func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *image) (map[diffID]*layerInfo, error) { - // Collect layer data available in manifest and config. - if len(tarManifest.Layers) != len(parsedConfig.RootFS.DiffIDs) { - return nil, errors.Errorf("Inconsistent layer count: %d in manifest, %d in config", len(tarManifest.Layers), len(parsedConfig.RootFS.DiffIDs)) - } - knownLayers := map[diffID]*layerInfo{} - unknownLayerSizes := map[string]*layerInfo{} // Points into knownLayers, a "to do list" of items with unknown sizes. - for i, diffID := range parsedConfig.RootFS.DiffIDs { - if _, ok := knownLayers[diffID]; ok { - // Apparently it really can happen that a single image contains the same layer diff more than once. - // In that case, the diffID validation ensures that both layers truly are the same, and it should not matter - // which of the tarManifest.Layers paths is used; (docker save) actually makes the duplicates symlinks to the original. - continue - } - layerPath := tarManifest.Layers[i] - if _, ok := unknownLayerSizes[layerPath]; ok { - return nil, errors.Errorf("Layer tarfile %s used for two different DiffID values", layerPath) - } - li := &layerInfo{ // A new element in each iteration - path: layerPath, - size: -1, - } - knownLayers[diffID] = li - unknownLayerSizes[layerPath] = li - } - - // Scan the tar file to collect layer sizes. - file, err := os.Open(s.tarPath) - if err != nil { - return nil, err - } - defer file.Close() - t := tar.NewReader(file) - for { - h, err := t.Next() - if err == io.EOF { - break - } - if err != nil { - return nil, err - } - if li, ok := unknownLayerSizes[h.Name]; ok { - li.size = h.Size - delete(unknownLayerSizes, h.Name) - } - } - if len(unknownLayerSizes) != 0 { - return nil, errors.Errorf("Some layer tarfiles are missing in the tarball") // This could do with a better error reporting, if this ever happened in practice. - } - - return knownLayers, nil -} - -// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). -// It may use a remote (= slow) service. -func (s *Source) GetManifest() ([]byte, string, error) { - if s.generatedManifest == nil { - if err := s.ensureCachedDataIsPresent(); err != nil { - return nil, "", err - } - m := schema2Manifest{ - SchemaVersion: 2, - MediaType: manifest.DockerV2Schema2MediaType, - Config: distributionDescriptor{ - MediaType: manifest.DockerV2Schema2ConfigMediaType, - Size: int64(len(s.configBytes)), - Digest: s.configDigest, - }, - Layers: []distributionDescriptor{}, - } - for _, diffID := range s.orderedDiffIDList { - li, ok := s.knownLayers[diffID] - if !ok { - return nil, "", errors.Errorf("Internal inconsistency: Information about layer %s missing", diffID) - } - m.Layers = append(m.Layers, distributionDescriptor{ - Digest: digest.Digest(diffID), // diffID is a digest of the uncompressed tarball - MediaType: manifest.DockerV2Schema2LayerMediaType, - Size: li.size, - }) - } - manifestBytes, err := json.Marshal(&m) - if err != nil { - return nil, "", err - } - s.generatedManifest = manifestBytes - } - return s.generatedManifest, manifest.DockerV2Schema2MediaType, nil -} - -// GetTargetManifest returns an image's manifest given a digest. This is mainly used to retrieve a single image's manifest -// out of a manifest list. -func (s *Source) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { - // How did we even get here? GetManifest() above has returned a manifest.DockerV2Schema2MediaType. - return nil, "", errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`) -} - -type readCloseWrapper struct { - io.Reader - closeFunc func() error -} - -func (r readCloseWrapper) Close() error { - if r.closeFunc != nil { - return r.closeFunc() - } - return nil -} - -// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). -func (s *Source) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) { - if err := s.ensureCachedDataIsPresent(); err != nil { - return nil, 0, err - } - - if info.Digest == s.configDigest { // FIXME? Implement a more general algorithm matching instead of assuming sha256. - return ioutil.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil - } - - if li, ok := s.knownLayers[diffID(info.Digest)]; ok { // diffID is a digest of the uncompressed tarball, - stream, err := s.openTarComponent(li.path) - if err != nil { - return nil, 0, err - } - - // In order to handle the fact that digests != diffIDs (and thus that a - // caller which is trying to verify the blob will run into problems), - // we need to decompress blobs. This is a bit ugly, but it's a - // consequence of making everything addressable by their DiffID rather - // than by their digest... - // - // In particular, because the v2s2 manifest being generated uses - // DiffIDs, any caller of GetBlob is going to be asking for DiffIDs of - // layers not their _actual_ digest. The result is that copy/... will - // be verifing a "digest" which is not the actual layer's digest (but - // is instead the DiffID). - - decompressFunc, reader, err := compression.DetectCompression(stream) - if err != nil { - return nil, 0, errors.Wrapf(err, "Detecting compression in blob %s", info.Digest) - } - - if decompressFunc != nil { - reader, err = decompressFunc(reader) - if err != nil { - return nil, 0, errors.Wrapf(err, "Decompressing blob %s stream", info.Digest) - } - } - - newStream := readCloseWrapper{ - Reader: reader, - closeFunc: stream.Close, - } - - return newStream, li.size, nil - } - - return nil, 0, errors.Errorf("Unknown blob %s", info.Digest) -} - -// GetSignatures returns the image's signatures. It may use a remote (= slow) service. -func (s *Source) GetSignatures(ctx context.Context) ([][]byte, error) { - return [][]byte{}, nil -} diff --git a/vendor/github.com/containers/image/docker/tarfile/types.go b/vendor/github.com/containers/image/docker/tarfile/types.go deleted file mode 100644 index f16cc8c62a53..000000000000 --- a/vendor/github.com/containers/image/docker/tarfile/types.go +++ /dev/null @@ -1,54 +0,0 @@ -package tarfile - -import "github.com/opencontainers/go-digest" - -// Various data structures. - -// Based on github.com/docker/docker/image/tarexport/tarexport.go -const ( - manifestFileName = "manifest.json" - // legacyLayerFileName = "layer.tar" - // legacyConfigFileName = "json" - // legacyVersionFileName = "VERSION" - // legacyRepositoriesFileName = "repositories" -) - -// ManifestItem is an element of the array stored in the top-level manifest.json file. -type ManifestItem struct { - Config string - RepoTags []string - Layers []string - Parent imageID `json:",omitempty"` - LayerSources map[diffID]distributionDescriptor `json:",omitempty"` -} - -type imageID string -type diffID digest.Digest - -// Based on github.com/docker/distribution/blobs.go -type distributionDescriptor struct { - MediaType string `json:"mediaType,omitempty"` - Size int64 `json:"size,omitempty"` - Digest digest.Digest `json:"digest,omitempty"` - URLs []string `json:"urls,omitempty"` -} - -// Based on github.com/docker/distribution/manifest/schema2/manifest.go -// FIXME: We are repeating this all over the place; make a public copy? -type schema2Manifest struct { - SchemaVersion int `json:"schemaVersion"` - MediaType string `json:"mediaType,omitempty"` - Config distributionDescriptor `json:"config"` - Layers []distributionDescriptor `json:"layers"` -} - -// Based on github.com/docker/docker/image/image.go -// MOST CONTENT OMITTED AS UNNECESSARY -type image struct { - RootFS *rootFS `json:"rootfs,omitempty"` -} - -type rootFS struct { - Type string `json:"type"` - DiffIDs []diffID `json:"diff_ids,omitempty"` -} diff --git a/vendor/github.com/containers/image/docker/wwwauthenticate.go b/vendor/github.com/containers/image/docker/wwwauthenticate.go deleted file mode 100644 index 23664a74a52a..000000000000 --- a/vendor/github.com/containers/image/docker/wwwauthenticate.go +++ /dev/null @@ -1,159 +0,0 @@ -package docker - -// Based on github.com/docker/distribution/registry/client/auth/authchallenge.go, primarily stripping unnecessary dependencies. - -import ( - "net/http" - "strings" -) - -// challenge carries information from a WWW-Authenticate response header. -// See RFC 7235. -type challenge struct { - // Scheme is the auth-scheme according to RFC 7235 - Scheme string - - // Parameters are the auth-params according to RFC 7235 - Parameters map[string]string -} - -// Octet types from RFC 7230. -type octetType byte - -var octetTypes [256]octetType - -const ( - isToken octetType = 1 << iota - isSpace -) - -func init() { - // OCTET = - // CHAR = - // CTL = - // CR = - // LF = - // SP = - // HT = - // <"> = - // CRLF = CR LF - // LWS = [CRLF] 1*( SP | HT ) - // TEXT = - // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> - // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT - // token = 1* - // qdtext = > - - for c := 0; c < 256; c++ { - var t octetType - isCtl := c <= 31 || c == 127 - isChar := 0 <= c && c <= 127 - isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 - if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { - t |= isSpace - } - if isChar && !isCtl && !isSeparator { - t |= isToken - } - octetTypes[c] = t - } -} - -func parseAuthHeader(header http.Header) []challenge { - challenges := []challenge{} - for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { - v, p := parseValueAndParams(h) - if v != "" { - challenges = append(challenges, challenge{Scheme: v, Parameters: p}) - } - } - return challenges -} - -// NOTE: This is not a fully compliant parser per RFC 7235: -// Most notably it does not support more than one challenge within a single header -// Some of the whitespace parsing also seems noncompliant. -// But it is clearly better than what we used to have… -func parseValueAndParams(header string) (value string, params map[string]string) { - params = make(map[string]string) - value, s := expectToken(header) - if value == "" { - return - } - value = strings.ToLower(value) - s = "," + skipSpace(s) - for strings.HasPrefix(s, ",") { - var pkey string - pkey, s = expectToken(skipSpace(s[1:])) - if pkey == "" { - return - } - if !strings.HasPrefix(s, "=") { - return - } - var pvalue string - pvalue, s = expectTokenOrQuoted(s[1:]) - if pvalue == "" { - return - } - pkey = strings.ToLower(pkey) - params[pkey] = pvalue - s = skipSpace(s) - } - return -} - -func skipSpace(s string) (rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isSpace == 0 { - break - } - } - return s[i:] -} - -func expectToken(s string) (token, rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isToken == 0 { - break - } - } - return s[:i], s[i:] -} - -func expectTokenOrQuoted(s string) (value string, rest string) { - if !strings.HasPrefix(s, "\"") { - return expectToken(s) - } - s = s[1:] - for i := 0; i < len(s); i++ { - switch s[i] { - case '"': - return s[:i], s[i+1:] - case '\\': - p := make([]byte, len(s)-1) - j := copy(p, s[:i]) - escape := true - for i = i + 1; i < len(s); i++ { - b := s[i] - switch { - case escape: - escape = false - p[j] = b - j++ - case b == '\\': - escape = true - case b == '"': - return string(p[:j]), s[i+1:] - default: - p[j] = b - j++ - } - } - return "", "" - } - } - return "", "" -} diff --git a/vendor/github.com/containers/image/docker/wwwauthenticate_test.go b/vendor/github.com/containers/image/docker/wwwauthenticate_test.go deleted file mode 100644 index d11f6fbc96e4..000000000000 --- a/vendor/github.com/containers/image/docker/wwwauthenticate_test.go +++ /dev/null @@ -1,45 +0,0 @@ -package docker - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -// This is just a smoke test for the common expected header formats, -// by no means comprehensive. -func TestParseValueAndParams(t *testing.T) { - for _, c := range []struct { - input string - scope string - params map[string]string - }{ - { - `Bearer realm="https://auth.docker.io/token",service="registry.docker.io",scope="repository:library/busybox:pull"`, - "bearer", - map[string]string{ - "realm": "https://auth.docker.io/token", - "service": "registry.docker.io", - "scope": "repository:library/busybox:pull", - }, - }, - { - `Bearer realm="https://auth.docker.io/token",service="registry.docker.io",scope="repository:library/busybox:pull,push"`, - "bearer", - map[string]string{ - "realm": "https://auth.docker.io/token", - "service": "registry.docker.io", - "scope": "repository:library/busybox:pull,push", - }, - }, - { - `Bearer realm="http://127.0.0.1:5000/openshift/token"`, - "bearer", - map[string]string{"realm": "http://127.0.0.1:5000/openshift/token"}, - }, - } { - scope, params := parseValueAndParams(c.input) - assert.Equal(t, c.scope, scope, c.input) - assert.Equal(t, c.params, params, c.input) - } -} diff --git a/vendor/github.com/containers/image/docs/atomic-signature-embedded-json.json b/vendor/github.com/containers/image/docs/atomic-signature-embedded-json.json deleted file mode 100644 index ccb4eda096f0..000000000000 --- a/vendor/github.com/containers/image/docs/atomic-signature-embedded-json.json +++ /dev/null @@ -1,66 +0,0 @@ -{ - "title": "JSON embedded in an atomic container signature", - "description": "This schema is a supplement to atomic-signature.md in this directory.\n\nConsumers of the JSON MUST use the processing rules documented in atomic-signature.md, especially the requirements for the 'critical' subjobject.\n\nWhenever this schema and atomic-signature.md, or the github.com/containers/image/signature implementation, differ,\nit is the atomic-signature.md document, or the github.com/containers/image/signature implementation, which governs.\n\nUsers are STRONGLY RECOMMENDED to use the github.com/containeres/image/signature implementation instead of writing\ntheir own, ESPECIALLY when consuming signatures, so that the policy.json format can be shared by all image consumers.\n", - "type": "object", - "required": [ - "critical", - "optional" - ], - "additionalProperties": false, - "properties": { - "critical": { - "type": "object", - "required": [ - "type", - "image", - "identity" - ], - "additionalProperties": false, - "properties": { - "type": { - "type": "string", - "enum": [ - "atomic container signature" - ] - }, - "image": { - "type": "object", - "required": [ - "docker-manifest-digest" - ], - "additionalProperties": false, - "properties": { - "docker-manifest-digest": { - "type": "string" - } - } - }, - "identity": { - "type": "object", - "required": [ - "docker-reference" - ], - "additionalProperties": false, - "properties": { - "docker-reference": { - "type": "string" - } - } - } - } - }, - "optional": { - "type": "object", - "description": "All members are optional, but if they are included, they must be valid.", - "additionalProperties": true, - "properties": { - "creator": { - "type": "string" - }, - "timestamp": { - "type": "integer" - } - } - } - } -} \ No newline at end of file diff --git a/vendor/github.com/containers/image/docs/atomic-signature.md b/vendor/github.com/containers/image/docs/atomic-signature.md deleted file mode 100644 index 8a1fad5d0160..000000000000 --- a/vendor/github.com/containers/image/docs/atomic-signature.md +++ /dev/null @@ -1,241 +0,0 @@ -% atomic-signature(5) Atomic signature format -% Miloslav Trmač -% March 2017 - -# Atomic signature format - -This document describes the format of “atomic” container signatures, -as implemented by the `github.com/containers/image/signature` package. - -Most users should be able to consume these signatures by using the `github.com/containers/image/signature` package -(preferably through the higher-level `signature.PolicyContext` interface) -without having to care about the details of the format described below. -This documentation exists primarily for maintainers of the package -and to allow independent reimplementations. - -## High-level overview - -The signature provides an end-to-end authenticated claim that a container image -has been approved by a specific party (e.g. the creator of the image as their work, -an automated build system as a result of an automated build, -a company IT department approving the image for production) under a specified _identity_ -(e.g. an OS base image / specific application, with a specific version). - -An atomic container signature consists of a cryptographic signature which identifies -and authenticates who signed the image, and carries as a signed payload a JSON document. -The JSON document identifies the image being signed, claims a specific identity of the -image and if applicable, contains other information about the image. - -The signatures do not modify the container image (the layers, configuration, manifest, …); -e.g. their presence does not change the manifest digest used to identify the image in -docker/distribution servers; rather, the signatures are associated with an immutable image. -An image can have any number of signatures so signature distribution systems SHOULD support -associating more than one signature with an image. - -## The cryptographic signature - -As distributed, the atomic container signature is a blob which contains a cryptographic signature -in an industry-standard format, carrying a signed JSON payload (i.e. the blob contains both the -JSON document and a signature of the JSON document; it is not a “detached signature” with -independent blobs containing the JSON document and a cryptographic signature). - -Currently the only defined cryptographic signature format is an OpenPGP signature (RFC 4880), -but others may be added in the future. (The blob does not contain metadata identifying the -cryptographic signature format. It is expected that most formats are sufficiently self-describing -that this is not necessary and the configured expected public key provides another indication -of the expected cryptographic signature format. Such metadata may be added in the future for -newly added cryptographic signature formats, if necessary.) - -Consumers of atomic container signatures SHOULD verify the cryptographic signature -against one or more trusted public keys -(e.g. defined in a [policy.json signature verification policy file](policy.json.md)) -before parsing or processing the JSON payload in _any_ way, -in particular they SHOULD stop processing the container signature -if the cryptographic signature verification fails, without even starting to process the JSON payload. - -(Consumers MAY extract identification of the signing key and other metadata from the cryptographic signature, -and the JSON payload, without verifying the signature, if the purpose is to allow managing the signature blobs, -e.g. to list the authors and image identities of signatures associated with a single container image; -if so, they SHOULD design the output of such processing to minimize the risk of users considering the output trusted -or in any way usable for making policy decisions about the image.) - -### OpenPGP signature verification - -When verifying a cryptographic signature in the OpenPGP format, -the consumer MUST verify at least the following aspects of the signature -(like the `github.com/containers/image/signature` package does): - -- The blob MUST be a “Signed Message” as defined RFC 4880 section 11.3. - (e.g. it MUST NOT be an unsigned “Literal Message”, or any other non-signature format). -- The signature MUST have been made by an expected key trusted for the purpose (and the specific container image). -- The signature MUST be correctly formed and pass the cryptographic validation. -- The signature MUST correctly authenticate the included JSON payload - (in particular, the parsing of the JSON payload MUST NOT start before the complete payload has been cryptographically authenticated). -- The signature MUST NOT be expired. - -The consumer SHOULD have tests for its verification code which verify that signatures failing any of the above are rejected. - -## JSON processing and forward compatibility - -The payload of the cryptographic signature is a JSON document (RFC 7159). -Consumers SHOULD parse it very strictly, -refusing any signature which violates the expected format (e.g. missing members, incorrect member types) -or can be interpreted ambiguously (e.g. a duplicated member in a JSON object). - -Any violations of the JSON format or of other requirements in this document MAY be accepted if the JSON document can be recognized -to have been created by a known-incorrect implementation (see [`optional.creator`](#optionalcreator) below) -and if the semantics of the invalid document, as created by such an implementation, is clear. - -The top-level value of the JSON document MUST be a JSON object with exactly two members, `critical` and `optional`, -each a JSON object. - -The `critical` object MUST contain a `type` member identifying the document as an atomic container signature -(as defined [below](#criticaltype)) -and signature consumers MUST reject signatures which do not have this member or in which this member does not have the expected value. - -To ensure forward compatibility (allowing older signature consumers to correctly -accept or reject signatures created at a later date, with possible extensions to this format), -consumers MUST reject the signature if the `critical` object, or _any_ of its subobjects, -contain _any_ member or data value which is unrecognized, unsupported, invalid, or in any other way unexpected. -At a minimum, this includes unrecognized members in a JSON object, or incorrect types of expected members. - -For the same reason, consumers SHOULD accept any members with unrecognized names in the `optional` object, -and MAY accept signatures where the object member is recognized but unsupported, or the value of the member is unsupported. -Consumers still SHOULD reject signatures where a member of an `optional` object is supported but the value is recognized as invalid. - -## JSON data format - -An example of the full format follows, with detailed description below. -To reiterate, consumers of the signature SHOULD perform successful cryptographic verification, -and MUST reject unexpected data in the `critical` object, or in the top-level object, as described above. - -```json -{ - "critical": { - "type": "atomic container signature", - "image": { - "docker-manifest-digest": "sha256:817a12c32a39bbe394944ba49de563e085f1d3c5266eb8e9723256bc4448680e" - }, - "identity": { - "docker-reference": "docker.io/library/busybox:latest" - } - }, - "optional": { - "creator": "some software package v1.0.1-35", - "timestamp": 1483228800, - } -} -``` - -### `critical` - -This MUST be a JSON object which contains data critical to correctly evaluating the validity of a signature. - -Consumers MUST reject any signature where the `critical` object contains any unrecognized, unsupported, invalid or in any other way unexpected member or data. - -### `critical.type` - -This MUST be a string with a string value exactly equal to `atomic container signature` (three words, including the spaces). - -Signature consumers MUST reject signatures which do not have this member or this member does not have exactly the expected value. - -(The consumers MAY support signatures with a different value of the `type` member, if any is defined in the future; -if so, the rest of the JSON document is interpreted according to rules defining that value of `critical.type`, -not by this document.) - -### `critical.image` - -This MUST be a JSON object which identifies the container image this signature applies to. - -Consumers MUST reject any signature where the `critical.image` object contains any unrecognized, unsupported, invalid or in any other way unexpected member or data. - -(Currently only the `docker-manifest-digest` way of identifying a container image is defined; -alternatives to this may be defined in the future, -but existing consumers are required to reject signatures which use formats they do not support.) - -### `critical.image.docker-manifest-digest` - -This MUST be a JSON string, in the `github.com/opencontainers/go-digest.Digest` string format. - -The value of this member MUST match the manifest of the signed container image, as implemented in the docker/distribution manifest addressing system. - -The consumer of the signature SHOULD verify the manifest digest against a fully verified signature before processing the contents of the image manifest in any other way -(e.g. parsing the manifest further or downloading layers of the image). - -Implementation notes: -* A single container image manifest may have several valid manifest digest values, using different algorithms. -* For “signed” [docker/distribution schema 1](https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md) manifests, -the manifest digest applies to the payload of the JSON web signature, not to the raw manifest blob. - -### `critical.identity` - -This MUST be a JSON object which identifies the claimed identity of the image (usually the purpose of the image, or the application, along with a version information), -as asserted by the author of the signature. - -Consumers MUST reject any signature where the `critical.identity` object contains any unrecognized, unsupported, invalid or in any other way unexpected member or data. - -(Currently only the `docker-reference` way of claiming an image identity/purpose is defined; -alternatives to this may be defined in the future, -but existing consumers are required to reject signatures which use formats they do not support.) - -### `critical.identity.docker-reference` - -This MUST be a JSON string, in the `github.com/docker/distribution/reference` string format, -and using the same normalization semantics (where e.g. `busybox:latest` is equivalent to `docker.io/library/busybox:latest`). -If the normalization semantics allows multiple string representations of the claimed identity with equivalent meaning, -the `critical.identity.docker-reference` member SHOULD use the fully explicit form (including the full host name and namespaces). - -The value of this member MUST match the image identity/purpose expected by the consumer of the image signature and the image -(again, accounting for the `docker/distribution/reference` normalization semantics). - -In the most common case, this means that the `critical.identity.docker-reference` value must be equal to the docker/distribution reference used to refer to or download the image. -However, depending on the specific application, users or system administrators may accept less specific matches -(e.g. ignoring the tag value in the signature when pulling the `:latest` tag or when referencing an image by digest), -or they may require `critical.identity.docker-reference` values with a completely different namespace to the reference used to refer to/download the image -(e.g. requiring a `critical.identity.docker-reference` value which identifies the image as coming from a supplier when fetching it from a company-internal mirror of approved images). -The software performing this verification SHOULD allow the users to define such a policy using the [policy.json signature verification policy file format](policy.json.md). - -The `critical.identity.docker-reference` value SHOULD contain either a tag or digest; -in most cases, it SHOULD use a tag rather than a digest. (See also the default [`matchRepoDigestOrExact` matching semantics in `policy.json`](policy.json.md#signedby).) - -### `optional` - -This MUST be a JSON object. - -Consumers SHOULD accept any members with unrecognized names in the `optional` object, -and MAY accept a signature where the object member is recognized but unsupported, or the value of the member is valid but unsupported. -Consumers still SHOULD reject any signature where a member of an `optional` object is supported but the value is recognized as invalid. - -### `optional.creator` - -If present, this MUST be a JSON string, identifying the name and version of the software which has created the signature. - -The contents of this string is not defined in detail; however each implementation creating atomic container signatures: - -- SHOULD define the contents to unambiguously define the software in practice (e.g. it SHOULD contain the name of the software, not only the version number) -- SHOULD use a build and versioning process which ensures that the contents of this string (e.g. an included version number) - changes whenever the format or semantics of the generated signature changes in any way; - it SHOULD not be possible for two implementations which use a different format or semantics to have the same `optional.creator` value -- SHOULD use a format which is reasonably easy to parse in software (perhaps using a regexp), - and which makes it easy enough to recognize a range of versions of a specific implementation - (e.g. the version of the implementation SHOULD NOT be only a git hash, because they don’t have an easily defined ordering; - the string should contain a version number, or at least a date of the commit). - -Consumers of atomic container signatures MAY recognize specific values or sets of values of `optional.creator` -(perhaps augmented with `optional.timestamp`), -and MAY change their processing of the signature based on these values -(usually to acommodate violations of this specification in past versions of the signing software which cannot be fixed retroactively), -as long as the semantics of the invalid document, as created by such an implementation, is clear. - -If consumers of signatures do change their behavior based on the `optional.creator` value, -they SHOULD take care that the way they process the signatures is not inconsistent with -strictly validating signature consumers. -(I.e. it is acceptable for a consumer to accept a signature based on a specific `optional.creator` value -if other implementations would completely reject the signature, -but it would be very undesirable for the two kinds of implementations to accept the signature in different -and inconsistent situations.) - -### `optional.timestamp` - -If present, this MUST be a JSON number, which is representable as a 64-bit integer, and identifies the time when the signature was created -as the number of seconds since the UNIX epoch (Jan 1 1970 00:00 UTC). diff --git a/vendor/github.com/containers/image/docs/policy.json.md b/vendor/github.com/containers/image/docs/policy.json.md deleted file mode 100644 index 2984d388710c..000000000000 --- a/vendor/github.com/containers/image/docs/policy.json.md +++ /dev/null @@ -1,267 +0,0 @@ -% POLICY.JSON(5) policy.json Man Page -% Miloslav Trmač -% September 2016 - -# Signature verification policy file format - -Signature verification policy files are used to specify policy, e.g. trusted keys, -applicable when deciding whether to accept an image, or individual signatures of that image, as valid. - -The default policy is stored (unless overridden at compile-time) at `/etc/containers/policy.json`; -applications performing verification may allow using a different policy instead. - -## Overall structure - -The signature verification policy file, usually called `policy.json`, -uses a JSON format. Unlike some other JSON files, its parsing is fairly strict: -unrecognized, duplicated or otherwise invalid fields cause the entire file, -and usually the entire operation, to be rejected. - -The purpose of the policy file is to define a set of *policy requirements* for a container image, -usually depending on its location (where it is being pulled from) or otherwise defined identity. - -Policy requirements can be defined for: - -- An individual *scope* in a *transport*. - The *transport* values are the same as the transport prefixes when pushing/pulling images (e.g. `docker:`, `atomic:`), - and *scope* values are defined by each transport; see below for more details. - - Usually, a scope can be defined to match a single image, and various prefixes of - such a most specific scope define namespaces of matching images. -- A default policy for a single transport, expressed using an empty string as a scope -- A global default policy. - -If multiple policy requirements match a given image, only the requirements from the most specific match apply, -the more general policy requirements definitions are ignored. - -This is expressed in JSON using the top-level syntax -```js -{ - "default": [/* policy requirements: global default */] - "transports": { - transport_name: { - "": [/* policy requirements: default for transport $transport_name */], - scope_1: [/* policy requirements: default for $scope_1 in $transport_name */], - scope_2: [/*…*/] - /*…*/ - }, - transport_name_2: {/*…*/} - /*…*/ - } -} -``` - -The global `default` set of policy requirements is mandatory; all of the other fields -(`transports` itself, any specific transport, the transport-specific default, etc.) are optional. - - -## Supported transports and their scopes - -### `atomic:` - -The `atomic:` transport refers to images in an Atomic Registry. - -Supported scopes use the form _hostname_[`:`_port_][`/`_namespace_[`/`_imagestream_ [`:`_tag_]]], -i.e. either specifying a complete name of a tagged image, or prefix denoting -a host/namespace/image stream. - -*Note:* The _hostname_ and _port_ refer to the Docker registry host and port (the one used -e.g. for `docker pull`), _not_ to the OpenShift API host and port. - -### `dir:` - -The `dir:` transport refers to images stored in local directories. - -Supported scopes are paths of directories (either containing a single image or -subdirectories possibly containing images). - -*Note:* The paths must be absolute and contain no symlinks. Paths violating these requirements may be silently ignored. - -The top-level scope `"/"` is forbidden; use the transport default scope `""`, -for consistency with other transports. - -### `docker:` - -The `docker:` transport refers to images in a registry implementing the "Docker Registry HTTP API V2". - -Scopes matching individual images are named Docker references *in the fully expanded form*, either -using a tag or digest. For example, `docker.io/library/busybox:latest` (*not* `busybox:latest`). - -More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), -a repository namespace, or a registry host (by only specifying the host name). - -### `oci:` - -The `oci:` transport refers to images in directories compliant with "Open Container Image Layout Specification". - -Supported scopes use the form _directory_`:`_tag_, and _directory_ referring to -a directory containing one or more tags, or any of the parent directories. - -*Note:* See `dir:` above for semantics and restrictions on the directory paths, they apply to `oci:` equivalently. - -## Policy Requirements - -Using the mechanisms above, a set of policy requirements is looked up. The policy requirements -are represented as a JSON array of individual requirement objects. For an image to be accepted, -*all* of the requirements must be satisfied simulatenously. - -The policy requirements can also be used to decide whether an individual signature is accepted (= is signed by a recognized key of a known author); -in that case some requirements may apply only to some signatures, but each signature must be accepted by *at least one* requirement object. - -The following requirement objects are supported: - -### `insecureAcceptAnything` - -A simple requirement with the following syntax - -```json -{"type":"insecureAcceptAnything"} -``` - -This requirement accepts any image (but note that other requirements in the array still apply). - -When deciding to accept an individual signature, this requirement does not have any effect; it does *not* cause the signature to be accepted, though. - -This is useful primarily for policy scopes where no signature verification is required; -because the array of policy requirements must not be empty, this requirement is used -to represent the lack of requirements explicitly. - -### `reject` - -A simple requirement with the following syntax: - -```json -{"type":"reject"} -``` - -This requirement rejects every image, and every signature. - -### `signedBy` - -This requirement requires an image to be signed with an expected identity, or accepts a signature if it is using an expected identity and key. - -```js -{ - "type": "signedBy", - "keyType": "GPGKeys", /* The only currently supported value */ - "keyPath": "/path/to/local/keyring/file", - "keyData": "base64-encoded-keyring-data", - "signedIdentity": identity_requirement -} -``` - - -Exactly one of `keyPath` and `keyData` must be present, containing a GPG keyring of one or more public keys. Only signatures made by these keys are accepted. - -The `signedIdentity` field, a JSON object, specifies what image identity the signature claims about the image. -One of the following alternatives are supported: - -- The identity in the signature must exactly match the image identity. Note that with this, referencing an image by digest (with a signature claiming a _repository_`:`_tag_ identity) will fail. - - ```json - {"type":"matchExact"} - ``` -- If the image identity carries a tag, the identity in the signature must exactly match; - if the image identity uses a digest reference, the identity in the signature must be in the same repository as the image identity (using any tag). - - (Note that with images identified using digest references, the digest from the reference is validated even before signature verification starts.) - - ```json - {"type":"matchRepoDigestOrExact"} - ``` -- The identity in the signature must be in the same repository as the image identity. This is useful e.g. to pull an image using the `:latest` tag when the image is signed with a tag specifing an exact image version. - - ```json - {"type":"matchRepository"} - ``` -- The identity in the signature must exactly match a specified identity. - This is useful e.g. when locally mirroring images signed using their public identity. - - ```js - { - "type": "exactReference", - "dockerReference": docker_reference_value - } - ``` -- The identity in the signature must be in the same repository as a specified identity. - This combines the properties of `matchRepository` and `exactReference`. - - ```js - { - "type": "exactRepository", - "dockerRepository": docker_repository_value - } - ``` - -If the `signedIdentity` field is missing, it is treated as `matchRepoDigestOrExact`. - -*Note*: `matchExact`, `matchRepoDigestOrExact` and `matchRepository` can be only used if a Docker-like image identity is -provided by the transport. In particular, the `dir:` and `oci:` transports can be only -used with `exactReference` or `exactRepository`. - - - -## Examples - -It is *strongly* recommended to set the `default` policy to `reject`, and then -selectively allow individual transports and scopes as desired. - -### A reasonably locked-down system - -(Note that the `/*`…`*/` comments are not valid in JSON, and must not be used in real policies.) - -```js -{ - "default": [{"type": "reject"}], /* Reject anything not explicitly allowed */ - "transports": { - "docker": { - /* Allow installing images from a specific repository namespace, without cryptographic verification. - This namespace includes images like openshift/hello-openshift and openshift/origin. */ - "docker.io/openshift": [{"type": "insecureAcceptAnything"}], - /* Similarly, allow installing the “official” busybox images. Note how the fully expanded - form, with the explicit /library/, must be used. */ - "docker.io/library/busybox": [{"type": "insecureAcceptAnything"}] - /* Other docker: images use the global default policy and are rejected */ - }, - "dir": { - "": [{"type": "insecureAcceptAnything"}] /* Allow any images originating in local directories */ - }, - "atomic": { - /* The common case: using a known key for a repository or set of repositories */ - "hostname:5000/myns/official": [ - { - "type": "signedBy", - "keyType": "GPGKeys", - "keyPath": "/path/to/official-pubkey.gpg" - } - ], - /* A more complex example, for a repository which contains a mirror of a third-party product, - which must be signed-off by local IT */ - "hostname:5000/vendor/product": [ - { /* Require the image to be signed by the original vendor, using the vendor's repository location. */ - "type": "signedBy", - "keyType": "GPGKeys", - "keyPath": "/path/to/vendor-pubkey.gpg", - "signedIdentity": { - "type": "exactRepository", - "dockerRepository": "vendor-hostname/product/repository" - } - }, - { /* Require the image to _also_ be signed by a local reviewer. */ - "type": "signedBy", - "keyType": "GPGKeys", - "keyPath": "/path/to/reviewer-pubkey.gpg" - } - ] - } - } -} -``` - -### Completely disable security, allow all images, do not trust any signatures - -```json -{ - "default": [{"type": "insecureAcceptAnything"}] -} -``` diff --git a/vendor/github.com/containers/image/docs/registries.d.md b/vendor/github.com/containers/image/docs/registries.d.md deleted file mode 100644 index 424ecf8a40a4..000000000000 --- a/vendor/github.com/containers/image/docs/registries.d.md +++ /dev/null @@ -1,124 +0,0 @@ -% REGISTRIES.D(5) Registries.d Man Page -% Miloslav Trmač -% August 2016 -# Registries Configuration Directory - -The registries configuration directory contains configuration for various registries -(servers storing remote container images), and for content stored in them, -so that the configuration does not have to be provided in command-line options over and over for every command, -and so that it can be shared by all users of containers/image. - -By default (unless overridden at compile-time), the registries configuration directory is `/etc/containers/registries.d`; -applications may allow using a different directory instead. - -## Directory Structure - -The directory may contain any number of files with the extension `.yaml`, -each using the YAML format. Other than the mandatory extension, names of the files -don’t matter. - -The contents of these files are merged together; to have a well-defined and easy to understand -behavior, there can be only one configuration section describing a single namespace within a registry -(in particular there can be at most one one `default-docker` section across all files, -and there can be at most one instance of any key under the the `docker` section; -these sections are documented later). - -Thus, it is forbidden to have two conflicting configurations for a single registry or scope, -and it is also forbidden to split a configuration for a single registry or scope across -more than one file (even if they are not semantically in conflict). - -## Registries, Scopes and Search Order - -Each YAML file must contain a “YAML mapping” (key-value pairs). Two top-level keys are defined: - -- `default-docker` is the _configuration section_ (as documented below) - for registries implementing "Docker Registry HTTP API V2". - - This key is optional. - -- `docker` is a mapping, using individual registries implementing "Docker Registry HTTP API V2", - or namespaces and individual images within these registries, as keys; - the value assigned to any such key is a _configuration section_. - - This key is optional. - - Scopes matching individual images are named Docker references *in the fully expanded form*, either - using a tag or digest. For example, `docker.io/library/busybox:latest` (*not* `busybox:latest`). - - More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), - a repository namespace, or a registry host (and a port if it differs from the default). - - Note that if a registry is accessed using a hostname+port configuration, the port-less hostname - is _not_ used as parent scope. - -When searching for a configuration to apply for an individual container image, only -the configuration for the most-precisely matching scope is used; configuration using -more general scopes is ignored. For example, if _any_ configuration exists for -`docker.io/library/busybox`, the configuration for `docker.io` is ignored -(even if some element of the configuration is defined for `docker.io` and not for `docker.io/library/busybox`). - -## Individual Configuration Sections - -A single configuration section is selected for a container image using the process -described above. The configuration section is a YAML mapping, with the following keys: - -- `sigstore-staging` defines an URL of of the signature storage, used for editing it (adding or deleting signatures). - - This key is optional; if it is missing, `sigstore` below is used. - -- `sigstore` defines an URL of the signature storage. - This URL is used for reading existing signatures, - and if `sigstore-staging` does not exist, also for adding or removing them. - - This key is optional; if it is missing, no signature storage is defined (no signatures - are download along with images, adding new signatures is possible only if `sigstore-staging` is defined). - -## Examples - -### Using Containers from Various Origins - -The following demonstrates how to to consume and run images from various registries and namespaces: - -```yaml -docker: - registry.database-supplier.com: - sigstore: https://sigstore.database-supplier.com - distribution.great-middleware.org: - sigstore: https://security-team.great-middleware.org/sigstore - docker.io/web-framework: - sigstore: https://sigstore.web-framework.io:8080 -``` - -### Developing and Signing Containers, Staging Signatures - -For developers in `example.com`: - -- Consume most container images using the public servers also used by clients. -- Use a separate sigure storage for an container images in a namespace corresponding to the developers' department, with a staging storage used before publishing signatures. -- Craft an individual exception for a single branch a specific developer is working on locally. - -```yaml -docker: - registry.example.com: - sigstore: https://registry-sigstore.example.com - registry.example.com/mydepartment: - sigstore: https://sigstore.mydepartment.example.com - sigstore-staging: file:///mnt/mydepartment/sigstore-staging - registry.example.com/mydepartment/myproject:mybranch: - sigstore: http://localhost:4242/sigstore - sigstore-staging: file:///home/useraccount/webroot/sigstore -``` - -### A Global Default - -If a company publishes its products using a different domain, and different registry hostname for each of them, it is still possible to use a single signature storage server -without listing each domain individually. This is expected to rarely happen, usually only for staging new signatures. - -```yaml -default-docker: - sigstore-staging: file:///mnt/company/common-sigstore-staging -``` - -# AUTHORS - -Miloslav Trmač diff --git a/vendor/github.com/containers/image/docs/signature-protocols.md b/vendor/github.com/containers/image/docs/signature-protocols.md deleted file mode 100644 index ade23228b147..000000000000 --- a/vendor/github.com/containers/image/docs/signature-protocols.md +++ /dev/null @@ -1,136 +0,0 @@ -# Signature access protocols - -The `github.com/containers/image` library supports signatures implemented as blobs “attached to” an image. -Some image transports (local storage formats and remote procotocols) implement these signatures natively -or trivially; for others, the protocol extensions described below are necessary. - -## docker/distribution registries—separate storage - -### Usage - -Any existing docker/distribution registry, whether or not it natively supports signatures, -can be augmented with separate signature storage by configuring a signature storage URL in [`registries.d`](registries.d.md). -`registries.d` can be configured to use one storage URL for a whole docker/distribution server, -or also separate URLs for smaller namespaces or individual repositories within the server -(which e.g. allows image authors to manage their own signature storage while publishing -the images on the public `docker.io` server). - -The signature storage URL defines a root of a path hierarchy. -It can be either a `file:///…` URL, pointing to a local directory structure, -or a `http`/`https` URL, pointing to a remote server. -`file:///` signature storage can be both read and written, `http`/`https` only supports reading. - -The same path hierarchy is used in both cases, so the HTTP/HTTPS server can be -a simple static web server serving a directory structure created by writing to a `file:///` signature storage. -(This of course does not prevent other server implementations, -e.g. a HTTP server reading signatures from a database.) - -The usual workflow for producing and distributing images using the separate storage mechanism -is to configure the repository in `registries.d` with `sigstore-staging` URL pointing to a private -`file:///` staging area, and a `sigstore` URL pointing to a public web server. -To publish an image, the image author would sign the image as necessary (e.g. using `skopeo copy`), -and then copy the created directory structure from the `file:///` staging area -to a subdirectory of a webroot of the public web server so that they are accessible using the public `sigstore` URL. -The author would also instruct consumers of the image to, or provide a `registries.d` configuration file to, -set up a `sigstore` URL pointing to the public web server. - -### Path structure - -Given a _base_ signature storage URL configured in `registries.d` as mentioned above, -and a container image stored in a docker/distribution registry using the _fully-expanded_ name -_hostname_`/`_namespaces_`/`_name_{`@`_digest_,`:`_tag_} (e.g. for `docker.io/library/busybox:latest`, -_namespaces_ is `library`, even if the user refers to the image using the shorter syntax as `busybox:latest`), -signatures are accessed using URLs of the form -> _base_`/`_namespaces_`/`_name_`@`_digest-algo_`=`_digest-value_`/signature-`_index_ - -where _digest-algo_`:`_digest-value_ is a manifest digest usable for referencing the relevant image manifest -(i.e. even if the user referenced the image using a tag, -the signature storage is always disambiguated using digest references). -Note that in the URLs used for signatures, -_digest-algo_ and _digest-value_ are separated using the `=` character, -not `:` like when acessing the manifest using the docker/distribution API. - -Within the URL, _index_ is a decimal integer (in the canonical form), starting with 1. -Signatures are stored at URLs with successive _index_ values; to read all of them, start with _index_=1, -and continue reading signatures and increasing _index_ as long as signatures with these _index_ values exist. -Similarly, to add one more signature to an image, find the first _index_ which does not exist, and -then store the new signature using that _index_ value. - -There is no way to list existing signatures other than iterating through the successive _index_ values, -and no way to download all of the signatures at once. - -### Examples - -For a docker/distribution image available as `busybox@sha256:817a12c32a39bbe394944ba49de563e085f1d3c5266eb8e9723256bc4448680e` -(or as `busybox:latest` if the `latest` tag points to to a manifest with the same digest), -and with a `registries.d` configuration specifying a `sigstore` URL `https://example.com/sigstore` for the same image, -the following URLs would be accessed to download all signatures: -> - `https://example.com/sigstore/library/busybox@sha256=817a12c32a39bbe394944ba49de563e085f1d3c5266eb8e9723256bc4448680e/signature-1` -> - `https://example.com/sigstore/library/busybox@sha256=817a12c32a39bbe394944ba49de563e085f1d3c5266eb8e9723256bc4448680e/signature-2` -> - … - -For a docker/distribution image available as `example.com/ns1/ns2/ns3/repo@somedigest:digestvalue` and the same -`sigstore` URL, the signatures would be available at -> `https://example.com/sigstore/ns1/ns2/ns3/repo@somedigest=digestvalue/signature-1` - -and so on. - -## (OpenShift) docker/distribution API extension - -As of https://github.com/openshift/origin/pull/12504/ , the OpenShift-embedded registry also provides -an extension of the docker/distribution API which allows simpler access to the signatures, -using only the docker/distribution API endpoint. - -This API is not inherently OpenShift-specific (e.g. the client does not need to know the OpenShift API endpoint, -and credentials sufficient to access the docker/distribution API server are sufficient to access signatures as well), -and it is the preferred way implement signature storage in registries. - -See https://github.com/openshift/openshift-docs/pull/3556 for the upstream documentation of the API. - -To read the signature, any user with access to an image can use the `/extensions/v2/…/signatures/…` -path to read an array of signatures. Use only the signature objects -which have `version` equal to `2`, `type` equal to `atomic`, and read the signature from `content`; -ignore the other fields of the signature object. - -To add a single signature, `PUT` a new object with `version` set to `2`, `type` set to `atomic`, -and `content` set to the signature. Also set `name` to an unique name with the form -_digest_`@`_per-image-name_, where _digest_ is an image manifest digest (also used in the URL), -and _per-image-name_ is any unique identifier. - -To add more than one signature, add them one at a time. This API does not allow deleting signatures. - -Note that because signatures are stored within the cluster-wide image objects, -i.e. different namespaces can not associate different sets of signatures to the same image, -updating signatures requires a cluster-wide access to the `imagesignatures` resource -(by default available to the `system:image-signer` role), - -## OpenShift-embedded registries - -The OpenShift-embedded registry implements the ordinary docker/distribution API, -and it also exposes images through the OpenShift REST API (available through the “API master” servers). - -Note: OpenShift versions 1.5 and later support the above-described [docker/distribution API extension](#openshift-dockerdistribution-api-extension), -which is easier to set up and should usually be preferred. -Continue reading for details on using older versions of OpenShift. - -As of https://github.com/openshift/origin/pull/9181, -signatures are exposed through the OpenShift API -(i.e. to access the complete image, it is necessary to use both APIs, -in particular to know the URLs for both the docker/distribution and the OpenShift API master endpoints). - -To read the signature, any user with access to an image can use the `imagestreamimages` namespaced -resource to read an `Image` object and its `Signatures` array. Use only the `ImageSignature` objects -which have `Type` equal to `atomic`, and read the signature from `Content`; ignore the other fields of -the `ImageSignature` object. - -To add or remove signatures, use the cluster-wide (non-namespaced) `imagesignatures` resource, -with `Type` set to `atomic` and `Content` set to the signature. Signature names must have the form -_digest_`@`_per-image-name_, where _digest_ is an image manifest digest (OpenShift “image name”), -and _per-image-name_ is any unique identifier. - -Note that because signatures are stored within the cluster-wide image objects, -i.e. different namespaces can not associate different sets of signatures to the same image, -updating signatures requires a cluster-wide access to the `imagesignatures` resource -(by default available to the `system:image-signer` role), -and deleting signatures is strongly discouraged -(it deletes the signature from all namespaces which contain the same image). diff --git a/vendor/github.com/containers/image/image/docker_list.go b/vendor/github.com/containers/image/image/docker_list.go deleted file mode 100644 index c79adaccab66..000000000000 --- a/vendor/github.com/containers/image/image/docker_list.go +++ /dev/null @@ -1,63 +0,0 @@ -package image - -import ( - "encoding/json" - "runtime" - - "github.com/containers/image/manifest" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -type platformSpec struct { - Architecture string `json:"architecture"` - OS string `json:"os"` - OSVersion string `json:"os.version,omitempty"` - OSFeatures []string `json:"os.features,omitempty"` - Variant string `json:"variant,omitempty"` - Features []string `json:"features,omitempty"` // removed in OCI -} - -// A manifestDescriptor references a platform-specific manifest. -type manifestDescriptor struct { - descriptor - Platform platformSpec `json:"platform"` -} - -type manifestList struct { - SchemaVersion int `json:"schemaVersion"` - MediaType string `json:"mediaType"` - Manifests []manifestDescriptor `json:"manifests"` -} - -func manifestSchema2FromManifestList(src types.ImageSource, manblob []byte) (genericManifest, error) { - list := manifestList{} - if err := json.Unmarshal(manblob, &list); err != nil { - return nil, err - } - var targetManifestDigest digest.Digest - for _, d := range list.Manifests { - if d.Platform.Architecture == runtime.GOARCH && d.Platform.OS == runtime.GOOS { - targetManifestDigest = d.Digest - break - } - } - if targetManifestDigest == "" { - return nil, errors.New("no supported platform found in manifest list") - } - manblob, mt, err := src.GetTargetManifest(targetManifestDigest) - if err != nil { - return nil, err - } - - matches, err := manifest.MatchesDigest(manblob, targetManifestDigest) - if err != nil { - return nil, errors.Wrap(err, "Error computing manifest digest") - } - if !matches { - return nil, errors.Errorf("Manifest image does not match selected manifest digest %s", targetManifestDigest) - } - - return manifestInstanceFromBlob(src, manblob, mt) -} diff --git a/vendor/github.com/containers/image/image/docker_schema1.go b/vendor/github.com/containers/image/image/docker_schema1.go deleted file mode 100644 index 4152b3cdf78f..000000000000 --- a/vendor/github.com/containers/image/image/docker_schema1.go +++ /dev/null @@ -1,375 +0,0 @@ -package image - -import ( - "encoding/json" - "regexp" - "strings" - "time" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/manifest" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -var ( - validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) -) - -type fsLayersSchema1 struct { - BlobSum digest.Digest `json:"blobSum"` -} - -type historySchema1 struct { - V1Compatibility string `json:"v1Compatibility"` -} - -// historySchema1 is a string containing this. It is similar to v1Image but not the same, in particular note the ThrowAway field. -type v1Compatibility struct { - ID string `json:"id"` - Parent string `json:"parent,omitempty"` - Comment string `json:"comment,omitempty"` - Created time.Time `json:"created"` - ContainerConfig struct { - Cmd []string - } `json:"container_config,omitempty"` - Author string `json:"author,omitempty"` - ThrowAway bool `json:"throwaway,omitempty"` -} - -type manifestSchema1 struct { - Name string `json:"name"` - Tag string `json:"tag"` - Architecture string `json:"architecture"` - FSLayers []fsLayersSchema1 `json:"fsLayers"` - History []historySchema1 `json:"history"` - SchemaVersion int `json:"schemaVersion"` -} - -func manifestSchema1FromManifest(manifest []byte) (genericManifest, error) { - mschema1 := &manifestSchema1{} - if err := json.Unmarshal(manifest, mschema1); err != nil { - return nil, err - } - if mschema1.SchemaVersion != 1 { - return nil, errors.Errorf("unsupported schema version %d", mschema1.SchemaVersion) - } - if len(mschema1.FSLayers) != len(mschema1.History) { - return nil, errors.New("length of history not equal to number of layers") - } - if len(mschema1.FSLayers) == 0 { - return nil, errors.New("no FSLayers in manifest") - } - - if err := fixManifestLayers(mschema1); err != nil { - return nil, err - } - return mschema1, nil -} - -// manifestSchema1FromComponents builds a new manifestSchema1 from the supplied data. -func manifestSchema1FromComponents(ref reference.Named, fsLayers []fsLayersSchema1, history []historySchema1, architecture string) genericManifest { - var name, tag string - if ref != nil { // Well, what to do if it _is_ nil? Most consumers actually don't use these fields nowadays, so we might as well try not supplying them. - name = reference.Path(ref) - if tagged, ok := ref.(reference.NamedTagged); ok { - tag = tagged.Tag() - } - } - return &manifestSchema1{ - Name: name, - Tag: tag, - Architecture: architecture, - FSLayers: fsLayers, - History: history, - SchemaVersion: 1, - } -} - -func (m *manifestSchema1) serialize() ([]byte, error) { - // docker/distribution requires a signature even if the incoming data uses the nominally unsigned DockerV2Schema1MediaType. - unsigned, err := json.Marshal(*m) - if err != nil { - return nil, err - } - return manifest.AddDummyV2S1Signature(unsigned) -} - -func (m *manifestSchema1) manifestMIMEType() string { - return manifest.DockerV2Schema1SignedMediaType -} - -// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. -// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. -func (m *manifestSchema1) ConfigInfo() types.BlobInfo { - return types.BlobInfo{} -} - -// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. -// The result is cached; it is OK to call this however often you need. -func (m *manifestSchema1) ConfigBlob() ([]byte, error) { - return nil, nil -} - -// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about -// layers in the resulting configuration isn't guaranteed to be returned to due how -// old image manifests work (docker v2s1 especially). -func (m *manifestSchema1) OCIConfig() (*imgspecv1.Image, error) { - v2s2, err := m.convertToManifestSchema2(nil, nil) - if err != nil { - return nil, err - } - return v2s2.OCIConfig() -} - -// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (m *manifestSchema1) LayerInfos() []types.BlobInfo { - layers := make([]types.BlobInfo, len(m.FSLayers)) - for i, layer := range m.FSLayers { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway) - layers[(len(m.FSLayers)-1)-i] = types.BlobInfo{Digest: layer.BlobSum, Size: -1} - } - return layers -} - -// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. -// It returns false if the manifest does not embed a Docker reference. -// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) -func (m *manifestSchema1) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { - // This is a bit convoluted: We can’t just have a "get embedded docker reference" method - // and have the “does it conflict” logic in the generic copy code, because the manifest does not actually - // embed a full docker/distribution reference, but only the repo name and tag (without the host name). - // So we would have to provide a “return repo without host name, and tag” getter for the generic code, - // which would be very awkward. Instead, we do the matching here in schema1-specific code, and all the - // generic copy code needs to know about is reference.Named and that a manifest may need updating - // for some destinations. - name := reference.Path(ref) - var tag string - if tagged, isTagged := ref.(reference.NamedTagged); isTagged { - tag = tagged.Tag() - } else { - tag = "" - } - return m.Name != name || m.Tag != tag -} - -func (m *manifestSchema1) imageInspectInfo() (*types.ImageInspectInfo, error) { - v1 := &v1Image{} - if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), v1); err != nil { - return nil, err - } - return &types.ImageInspectInfo{ - Tag: m.Tag, - DockerVersion: v1.DockerVersion, - Created: v1.Created, - Labels: v1.Config.Labels, - Architecture: v1.Architecture, - Os: v1.OS, - }, nil -} - -// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. -// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute -// (most importantly it forces us to download the full layers even if they are already present at the destination). -func (m *manifestSchema1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { - return options.ManifestMIMEType == manifest.DockerV2Schema2MediaType -} - -// UpdatedImage returns a types.Image modified according to options. -// This does not change the state of the original Image object. -func (m *manifestSchema1) UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) { - copy := *m - if options.LayerInfos != nil { - // Our LayerInfos includes empty layers (where m.History.V1Compatibility->ThrowAway), so expect them to be included here as well. - if len(copy.FSLayers) != len(options.LayerInfos) { - return nil, errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.FSLayers), len(options.LayerInfos)) - } - for i, info := range options.LayerInfos { - // (docker push) sets up m.History.V1Compatibility->{Id,Parent} based on values of info.Digest, - // but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness. - // So, we don't bother recomputing the IDs in m.History.V1Compatibility. - copy.FSLayers[(len(options.LayerInfos)-1)-i].BlobSum = info.Digest - } - } - if options.EmbeddedDockerReference != nil { - copy.Name = reference.Path(options.EmbeddedDockerReference) - if tagged, isTagged := options.EmbeddedDockerReference.(reference.NamedTagged); isTagged { - copy.Tag = tagged.Tag() - } else { - copy.Tag = "" - } - } - - switch options.ManifestMIMEType { - case "": // No conversion, OK - case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType: - // We have 2 MIME types for schema 1, which are basically equivalent (even the un-"Signed" MIME type will be rejected if there isn’t a signature; so, - // handle conversions between them by doing nothing. - case manifest.DockerV2Schema2MediaType: - return copy.convertToManifestSchema2(options.InformationOnly.LayerInfos, options.InformationOnly.LayerDiffIDs) - default: - return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", manifest.DockerV2Schema1SignedMediaType, options.ManifestMIMEType) - } - - return memoryImageFromManifest(©), nil -} - -// fixManifestLayers, after validating the supplied manifest -// (to use correctly-formatted IDs, and to not have non-consecutive ID collisions in manifest.History), -// modifies manifest to only have one entry for each layer ID in manifest.History (deleting the older duplicates, -// both from manifest.History and manifest.FSLayers). -// Note that even after this succeeds, manifest.FSLayers may contain duplicate entries -// (for Dockerfile operations which change the configuration but not the filesystem). -func fixManifestLayers(manifest *manifestSchema1) error { - type imageV1 struct { - ID string - Parent string - } - // Per the specification, we can assume that len(manifest.FSLayers) == len(manifest.History) - imgs := make([]*imageV1, len(manifest.FSLayers)) - for i := range manifest.FSLayers { - img := &imageV1{} - - if err := json.Unmarshal([]byte(manifest.History[i].V1Compatibility), img); err != nil { - return err - } - - imgs[i] = img - if err := validateV1ID(img.ID); err != nil { - return err - } - } - if imgs[len(imgs)-1].Parent != "" { - return errors.New("Invalid parent ID in the base layer of the image") - } - // check general duplicates to error instead of a deadlock - idmap := make(map[string]struct{}) - var lastID string - for _, img := range imgs { - // skip IDs that appear after each other, we handle those later - if _, exists := idmap[img.ID]; img.ID != lastID && exists { - return errors.Errorf("ID %+v appears multiple times in manifest", img.ID) - } - lastID = img.ID - idmap[lastID] = struct{}{} - } - // backwards loop so that we keep the remaining indexes after removing items - for i := len(imgs) - 2; i >= 0; i-- { - if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue - manifest.FSLayers = append(manifest.FSLayers[:i], manifest.FSLayers[i+1:]...) - manifest.History = append(manifest.History[:i], manifest.History[i+1:]...) - } else if imgs[i].Parent != imgs[i+1].ID { - return errors.Errorf("Invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent) - } - } - return nil -} - -func validateV1ID(id string) error { - if ok := validHex.MatchString(id); !ok { - return errors.Errorf("image ID %q is invalid", id) - } - return nil -} - -// Based on github.com/docker/docker/distribution/pull_v2.go -func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.BlobInfo, layerDiffIDs []digest.Digest) (types.Image, error) { - if len(m.History) == 0 { - // What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing. - return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType) - } - if len(m.History) != len(m.FSLayers) { - return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.History), len(m.FSLayers)) - } - if uploadedLayerInfos != nil && len(uploadedLayerInfos) != len(m.FSLayers) { - return nil, errors.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.FSLayers)) - } - if layerDiffIDs != nil && len(layerDiffIDs) != len(m.FSLayers) { - return nil, errors.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.FSLayers)) - } - - rootFS := rootFS{ - Type: "layers", - DiffIDs: []digest.Digest{}, - BaseLayer: "", - } - var layers []descriptor - history := make([]imageHistory, len(m.History)) - for v1Index := len(m.History) - 1; v1Index >= 0; v1Index-- { - v2Index := (len(m.History) - 1) - v1Index - - var v1compat v1Compatibility - if err := json.Unmarshal([]byte(m.History[v1Index].V1Compatibility), &v1compat); err != nil { - return nil, errors.Wrapf(err, "Error decoding history entry %d", v1Index) - } - history[v2Index] = imageHistory{ - Created: v1compat.Created, - Author: v1compat.Author, - CreatedBy: strings.Join(v1compat.ContainerConfig.Cmd, " "), - Comment: v1compat.Comment, - EmptyLayer: v1compat.ThrowAway, - } - - if !v1compat.ThrowAway { - var size int64 - if uploadedLayerInfos != nil { - size = uploadedLayerInfos[v2Index].Size - } - var d digest.Digest - if layerDiffIDs != nil { - d = layerDiffIDs[v2Index] - } - layers = append(layers, descriptor{ - MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", - Size: size, - Digest: m.FSLayers[v1Index].BlobSum, - }) - rootFS.DiffIDs = append(rootFS.DiffIDs, d) - } - } - configJSON, err := configJSONFromV1Config([]byte(m.History[0].V1Compatibility), rootFS, history) - if err != nil { - return nil, err - } - configDescriptor := descriptor{ - MediaType: "application/vnd.docker.container.image.v1+json", - Size: int64(len(configJSON)), - Digest: digest.FromBytes(configJSON), - } - - m2 := manifestSchema2FromComponents(configDescriptor, nil, configJSON, layers) - return memoryImageFromManifest(m2), nil -} - -func configJSONFromV1Config(v1ConfigJSON []byte, rootFS rootFS, history []imageHistory) ([]byte, error) { - // github.com/docker/docker/image/v1/imagev1.go:MakeConfigFromV1Config unmarshals and re-marshals the input if docker_version is < 1.8.3 to remove blank fields; - // we don't do that here. FIXME? Should we? AFAICT it would only affect the digest value of the schema2 manifest, and we don't particularly need that to be - // a consistently reproducible value. - - // Preserve everything we don't specifically know about. - // (This must be a *json.RawMessage, even though *[]byte is fairly redundant, because only *RawMessage implements json.Marshaler.) - rawContents := map[string]*json.RawMessage{} - if err := json.Unmarshal(v1ConfigJSON, &rawContents); err != nil { // We have already unmarshaled it before, using a more detailed schema?! - return nil, err - } - - delete(rawContents, "id") - delete(rawContents, "parent") - delete(rawContents, "Size") - delete(rawContents, "parent_id") - delete(rawContents, "layer_id") - delete(rawContents, "throwaway") - - updates := map[string]interface{}{"rootfs": rootFS, "history": history} - for field, value := range updates { - encoded, err := json.Marshal(value) - if err != nil { - return nil, err - } - rawContents[field] = (*json.RawMessage)(&encoded) - } - return json.Marshal(rawContents) -} diff --git a/vendor/github.com/containers/image/image/docker_schema1_test.go b/vendor/github.com/containers/image/image/docker_schema1_test.go deleted file mode 100644 index 3aa1aad64ec5..000000000000 --- a/vendor/github.com/containers/image/image/docker_schema1_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package image - -import ( - "io/ioutil" - "path/filepath" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func manifestSchema1FromFixture(t *testing.T, fixture string) genericManifest { - manifest, err := ioutil.ReadFile(filepath.Join("fixtures", fixture)) - require.NoError(t, err) - - m, err := manifestSchema1FromManifest(manifest) - require.NoError(t, err) - return m -} - -func TestManifestSchema1ToOCIConfig(t *testing.T) { - m := manifestSchema1FromFixture(t, "schema1-to-oci-config.json") - configOCI, err := m.OCIConfig() - require.NoError(t, err) - assert.Equal(t, "/pause", configOCI.Config.Entrypoint[0]) -} diff --git a/vendor/github.com/containers/image/image/docker_schema2.go b/vendor/github.com/containers/image/image/docker_schema2.go deleted file mode 100644 index 8cc3c495942f..000000000000 --- a/vendor/github.com/containers/image/image/docker_schema2.go +++ /dev/null @@ -1,364 +0,0 @@ -package image - -import ( - "bytes" - "crypto/sha256" - "encoding/hex" - "encoding/json" - "io/ioutil" - "strings" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/manifest" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// gzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes) -// This comes from github.com/docker/distribution/manifest/schema1/config_builder.go; there is -// a non-zero embedded timestamp; we could zero that, but that would just waste storage space -// in registries, so let’s use the same values. -var gzippedEmptyLayer = []byte{ - 31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88, - 0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0, -} - -// gzippedEmptyLayerDigest is a digest of gzippedEmptyLayer -const gzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") - -type descriptor struct { - MediaType string `json:"mediaType"` - Size int64 `json:"size"` - Digest digest.Digest `json:"digest"` - URLs []string `json:"urls,omitempty"` -} - -type manifestSchema2 struct { - src types.ImageSource // May be nil if configBlob is not nil - configBlob []byte // If set, corresponds to contents of ConfigDescriptor. - SchemaVersion int `json:"schemaVersion"` - MediaType string `json:"mediaType"` - ConfigDescriptor descriptor `json:"config"` - LayersDescriptors []descriptor `json:"layers"` -} - -func manifestSchema2FromManifest(src types.ImageSource, manifest []byte) (genericManifest, error) { - v2s2 := manifestSchema2{src: src} - if err := json.Unmarshal(manifest, &v2s2); err != nil { - return nil, err - } - return &v2s2, nil -} - -// manifestSchema2FromComponents builds a new manifestSchema2 from the supplied data: -func manifestSchema2FromComponents(config descriptor, src types.ImageSource, configBlob []byte, layers []descriptor) genericManifest { - return &manifestSchema2{ - src: src, - configBlob: configBlob, - SchemaVersion: 2, - MediaType: manifest.DockerV2Schema2MediaType, - ConfigDescriptor: config, - LayersDescriptors: layers, - } -} - -func (m *manifestSchema2) serialize() ([]byte, error) { - return json.Marshal(*m) -} - -func (m *manifestSchema2) manifestMIMEType() string { - return m.MediaType -} - -// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. -// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. -func (m *manifestSchema2) ConfigInfo() types.BlobInfo { - return types.BlobInfo{Digest: m.ConfigDescriptor.Digest, Size: m.ConfigDescriptor.Size} -} - -// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about -// layers in the resulting configuration isn't guaranteed to be returned to due how -// old image manifests work (docker v2s1 especially). -func (m *manifestSchema2) OCIConfig() (*imgspecv1.Image, error) { - configBlob, err := m.ConfigBlob() - if err != nil { - return nil, err - } - // docker v2s2 and OCI v1 are mostly compatible but v2s2 contains more fields - // than OCI v1. This unmarshal makes sure we drop docker v2s2 - // fields that aren't needed in OCI v1. - configOCI := &imgspecv1.Image{} - if err := json.Unmarshal(configBlob, configOCI); err != nil { - return nil, err - } - return configOCI, nil -} - -// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. -// The result is cached; it is OK to call this however often you need. -func (m *manifestSchema2) ConfigBlob() ([]byte, error) { - if m.configBlob == nil { - if m.src == nil { - return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2") - } - stream, _, err := m.src.GetBlob(types.BlobInfo{ - Digest: m.ConfigDescriptor.Digest, - Size: m.ConfigDescriptor.Size, - URLs: m.ConfigDescriptor.URLs, - }) - if err != nil { - return nil, err - } - defer stream.Close() - blob, err := ioutil.ReadAll(stream) - if err != nil { - return nil, err - } - computedDigest := digest.FromBytes(blob) - if computedDigest != m.ConfigDescriptor.Digest { - return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.ConfigDescriptor.Digest) - } - m.configBlob = blob - } - return m.configBlob, nil -} - -// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (m *manifestSchema2) LayerInfos() []types.BlobInfo { - blobs := []types.BlobInfo{} - for _, layer := range m.LayersDescriptors { - blobs = append(blobs, types.BlobInfo{ - Digest: layer.Digest, - Size: layer.Size, - URLs: layer.URLs, - }) - } - return blobs -} - -// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. -// It returns false if the manifest does not embed a Docker reference. -// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) -func (m *manifestSchema2) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { - return false -} - -func (m *manifestSchema2) imageInspectInfo() (*types.ImageInspectInfo, error) { - config, err := m.ConfigBlob() - if err != nil { - return nil, err - } - v1 := &v1Image{} - if err := json.Unmarshal(config, v1); err != nil { - return nil, err - } - return &types.ImageInspectInfo{ - DockerVersion: v1.DockerVersion, - Created: v1.Created, - Labels: v1.Config.Labels, - Architecture: v1.Architecture, - Os: v1.OS, - }, nil -} - -// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. -// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute -// (most importantly it forces us to download the full layers even if they are already present at the destination). -func (m *manifestSchema2) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { - return false -} - -// UpdatedImage returns a types.Image modified according to options. -// This does not change the state of the original Image object. -func (m *manifestSchema2) UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) { - copy := *m // NOTE: This is not a deep copy, it still shares slices etc. - if options.LayerInfos != nil { - if len(copy.LayersDescriptors) != len(options.LayerInfos) { - return nil, errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.LayersDescriptors), len(options.LayerInfos)) - } - copy.LayersDescriptors = make([]descriptor, len(options.LayerInfos)) - for i, info := range options.LayerInfos { - copy.LayersDescriptors[i].MediaType = m.LayersDescriptors[i].MediaType - copy.LayersDescriptors[i].Digest = info.Digest - copy.LayersDescriptors[i].Size = info.Size - copy.LayersDescriptors[i].URLs = info.URLs - } - } - // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1 to schema2, but we really don't care. - - switch options.ManifestMIMEType { - case "": // No conversion, OK - case manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType: - return copy.convertToManifestSchema1(options.InformationOnly.Destination) - case imgspecv1.MediaTypeImageManifest: - return copy.convertToManifestOCI1() - default: - return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", manifest.DockerV2Schema2MediaType, options.ManifestMIMEType) - } - - return memoryImageFromManifest(©), nil -} - -func (m *manifestSchema2) convertToManifestOCI1() (types.Image, error) { - configOCI, err := m.OCIConfig() - if err != nil { - return nil, err - } - configOCIBytes, err := json.Marshal(configOCI) - if err != nil { - return nil, err - } - - config := descriptorOCI1{ - descriptor: descriptor{ - MediaType: imgspecv1.MediaTypeImageConfig, - Size: int64(len(configOCIBytes)), - Digest: digest.FromBytes(configOCIBytes), - }, - } - - layers := make([]descriptorOCI1, len(m.LayersDescriptors)) - for idx := range layers { - layers[idx] = descriptorOCI1{descriptor: m.LayersDescriptors[idx]} - if m.LayersDescriptors[idx].MediaType == manifest.DockerV2Schema2ForeignLayerMediaType { - layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable - } else { - // we assume layers are gzip'ed because docker v2s2 only deals with - // gzip'ed layers. However, OCI has non-gzip'ed layers as well. - layers[idx].MediaType = imgspecv1.MediaTypeImageLayerGzip - } - } - - m1 := manifestOCI1FromComponents(config, m.src, configOCIBytes, layers) - return memoryImageFromManifest(m1), nil -} - -// Based on docker/distribution/manifest/schema1/config_builder.go -func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination) (types.Image, error) { - configBytes, err := m.ConfigBlob() - if err != nil { - return nil, err - } - imageConfig := &image{} - if err := json.Unmarshal(configBytes, imageConfig); err != nil { - return nil, err - } - - // Build fsLayers and History, discarding all configs. We will patch the top-level config in later. - fsLayers := make([]fsLayersSchema1, len(imageConfig.History)) - history := make([]historySchema1, len(imageConfig.History)) - nonemptyLayerIndex := 0 - var parentV1ID string // Set in the loop - v1ID := "" - haveGzippedEmptyLayer := false - if len(imageConfig.History) == 0 { - // What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing. - return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema1SignedMediaType) - } - for v2Index, historyEntry := range imageConfig.History { - parentV1ID = v1ID - v1Index := len(imageConfig.History) - 1 - v2Index - - var blobDigest digest.Digest - if historyEntry.EmptyLayer { - if !haveGzippedEmptyLayer { - logrus.Debugf("Uploading empty layer during conversion to schema 1") - info, err := dest.PutBlob(bytes.NewReader(gzippedEmptyLayer), types.BlobInfo{Digest: gzippedEmptyLayerDigest, Size: int64(len(gzippedEmptyLayer))}) - if err != nil { - return nil, errors.Wrap(err, "Error uploading empty layer") - } - if info.Digest != gzippedEmptyLayerDigest { - return nil, errors.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, gzippedEmptyLayerDigest) - } - haveGzippedEmptyLayer = true - } - blobDigest = gzippedEmptyLayerDigest - } else { - if nonemptyLayerIndex >= len(m.LayersDescriptors) { - return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.LayersDescriptors)) - } - blobDigest = m.LayersDescriptors[nonemptyLayerIndex].Digest - nonemptyLayerIndex++ - } - - // AFAICT pull ignores these ID values, at least nowadays, so we could use anything unique, including a simple counter. Use what Docker uses for cargo-cult consistency. - v, err := v1IDFromBlobDigestAndComponents(blobDigest, parentV1ID) - if err != nil { - return nil, err - } - v1ID = v - - fakeImage := v1Compatibility{ - ID: v1ID, - Parent: parentV1ID, - Comment: historyEntry.Comment, - Created: historyEntry.Created, - Author: historyEntry.Author, - ThrowAway: historyEntry.EmptyLayer, - } - fakeImage.ContainerConfig.Cmd = []string{historyEntry.CreatedBy} - v1CompatibilityBytes, err := json.Marshal(&fakeImage) - if err != nil { - return nil, errors.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage) - } - - fsLayers[v1Index] = fsLayersSchema1{BlobSum: blobDigest} - history[v1Index] = historySchema1{V1Compatibility: string(v1CompatibilityBytes)} - // Note that parentV1ID of the top layer is preserved when exiting this loop - } - - // Now patch in real configuration for the top layer (v1Index == 0) - v1ID, err = v1IDFromBlobDigestAndComponents(fsLayers[0].BlobSum, parentV1ID, string(configBytes)) // See above WRT v1ID value generation and cargo-cult consistency. - if err != nil { - return nil, err - } - v1Config, err := v1ConfigFromConfigJSON(configBytes, v1ID, parentV1ID, imageConfig.History[len(imageConfig.History)-1].EmptyLayer) - if err != nil { - return nil, err - } - history[0].V1Compatibility = string(v1Config) - - m1 := manifestSchema1FromComponents(dest.Reference().DockerReference(), fsLayers, history, imageConfig.Architecture) - return memoryImageFromManifest(m1), nil -} - -func v1IDFromBlobDigestAndComponents(blobDigest digest.Digest, others ...string) (string, error) { - if err := blobDigest.Validate(); err != nil { - return "", err - } - parts := append([]string{blobDigest.Hex()}, others...) - v1IDHash := sha256.Sum256([]byte(strings.Join(parts, " "))) - return hex.EncodeToString(v1IDHash[:]), nil -} - -func v1ConfigFromConfigJSON(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { - // Preserve everything we don't specifically know about. - // (This must be a *json.RawMessage, even though *[]byte is fairly redundant, because only *RawMessage implements json.Marshaler.) - rawContents := map[string]*json.RawMessage{} - if err := json.Unmarshal(configJSON, &rawContents); err != nil { // We have already unmarshaled it before, using a more detailed schema?! - return nil, err - } - delete(rawContents, "rootfs") - delete(rawContents, "history") - - updates := map[string]interface{}{"id": v1ID} - if parentV1ID != "" { - updates["parent"] = parentV1ID - } - if throwaway { - updates["throwaway"] = throwaway - } - for field, value := range updates { - encoded, err := json.Marshal(value) - if err != nil { - return nil, err - } - rawContents[field] = (*json.RawMessage)(&encoded) - } - return json.Marshal(rawContents) -} diff --git a/vendor/github.com/containers/image/image/docker_schema2_test.go b/vendor/github.com/containers/image/image/docker_schema2_test.go deleted file mode 100644 index d80a018844cd..000000000000 --- a/vendor/github.com/containers/image/image/docker_schema2_test.go +++ /dev/null @@ -1,529 +0,0 @@ -package image - -import ( - "bytes" - "context" - "encoding/json" - "io" - "io/ioutil" - "path/filepath" - "testing" - "time" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/manifest" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// unusedImageSource is used when we don't expect the ImageSource to be used in our tests. -type unusedImageSource struct{} - -func (f unusedImageSource) Reference() types.ImageReference { - panic("Unexpected call to a mock function") -} -func (f unusedImageSource) Close() error { - panic("Unexpected call to a mock function") -} -func (f unusedImageSource) GetManifest() ([]byte, string, error) { - panic("Unexpected call to a mock function") -} -func (f unusedImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { - panic("Unexpected call to a mock function") -} -func (f unusedImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) { - panic("Unexpected call to a mock function") -} -func (f unusedImageSource) GetSignatures(context.Context) ([][]byte, error) { - panic("Unexpected call to a mock function") -} - -func manifestSchema2FromFixture(t *testing.T, src types.ImageSource, fixture string) genericManifest { - manifest, err := ioutil.ReadFile(filepath.Join("fixtures", fixture)) - require.NoError(t, err) - - m, err := manifestSchema2FromManifest(src, manifest) - require.NoError(t, err) - return m -} - -func manifestSchema2FromComponentsLikeFixture(configBlob []byte) genericManifest { - return manifestSchema2FromComponents(descriptor{ - MediaType: "application/octet-stream", - Size: 5940, - Digest: "sha256:9ca4bda0a6b3727a6ffcc43e981cad0f24e2ec79d338f6ba325b4dfd0756fb8f", - }, nil, configBlob, []descriptor{ - { - MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", - Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb", - Size: 51354364, - }, - { - MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", - Digest: "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c", - Size: 150, - }, - { - MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", - Digest: "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9", - Size: 11739507, - }, - { - MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", - Digest: "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909", - Size: 8841833, - }, - { - MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", - Digest: "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa", - Size: 291, - }, - }) -} - -func TestManifestSchema2FromManifest(t *testing.T) { - // This just tests that the JSON can be loaded; we test that the parsed - // values are correctly returned in tests for the individual getter methods. - _ = manifestSchema2FromFixture(t, unusedImageSource{}, "schema2.json") - - _, err := manifestSchema2FromManifest(nil, []byte{}) - assert.Error(t, err) -} - -func TestManifestSchema2FromComponents(t *testing.T) { - // This just smoke-tests that the manifest can be created; we test that the parsed - // values are correctly returned in tests for the individual getter methods. - _ = manifestSchema2FromComponentsLikeFixture(nil) -} - -func TestManifestSchema2Serialize(t *testing.T) { - for _, m := range []genericManifest{ - manifestSchema2FromFixture(t, unusedImageSource{}, "schema2.json"), - manifestSchema2FromComponentsLikeFixture(nil), - } { - serialized, err := m.serialize() - require.NoError(t, err) - var contents map[string]interface{} - err = json.Unmarshal(serialized, &contents) - require.NoError(t, err) - - original, err := ioutil.ReadFile("fixtures/schema2.json") - require.NoError(t, err) - var originalContents map[string]interface{} - err = json.Unmarshal(original, &originalContents) - require.NoError(t, err) - - // We would ideally like to compare “serialized” with some transformation of - // “original”, but the ordering of fields in JSON maps is undefined, so this is - // easier. - assert.Equal(t, originalContents, contents) - } -} - -func TestManifestSchema2ManifestMIMEType(t *testing.T) { - for _, m := range []genericManifest{ - manifestSchema2FromFixture(t, unusedImageSource{}, "schema2.json"), - manifestSchema2FromComponentsLikeFixture(nil), - } { - assert.Equal(t, manifest.DockerV2Schema2MediaType, m.manifestMIMEType()) - } -} - -func TestManifestSchema2ConfigInfo(t *testing.T) { - for _, m := range []genericManifest{ - manifestSchema2FromFixture(t, unusedImageSource{}, "schema2.json"), - manifestSchema2FromComponentsLikeFixture(nil), - } { - assert.Equal(t, types.BlobInfo{ - Size: 5940, - Digest: "sha256:9ca4bda0a6b3727a6ffcc43e981cad0f24e2ec79d338f6ba325b4dfd0756fb8f", - }, m.ConfigInfo()) - } -} - -// configBlobImageSource allows testing various GetBlob behaviors in .ConfigBlob() -type configBlobImageSource struct { - unusedImageSource // We inherit almost all of the methods, which just panic() - f func(digest digest.Digest) (io.ReadCloser, int64, error) -} - -func (f configBlobImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) { - if info.Digest.String() != "sha256:9ca4bda0a6b3727a6ffcc43e981cad0f24e2ec79d338f6ba325b4dfd0756fb8f" { - panic("Unexpected digest in GetBlob") - } - return f.f(info.Digest) -} - -func TestManifestSchema2ConfigBlob(t *testing.T) { - realConfigJSON, err := ioutil.ReadFile("fixtures/schema2-config.json") - require.NoError(t, err) - - for _, c := range []struct { - cbISfn func(digest digest.Digest) (io.ReadCloser, int64, error) - blob []byte - }{ - // Success - {func(digest digest.Digest) (io.ReadCloser, int64, error) { - return ioutil.NopCloser(bytes.NewReader(realConfigJSON)), int64(len(realConfigJSON)), nil - }, realConfigJSON}, - // Various kinds of failures - {nil, nil}, - {func(digest digest.Digest) (io.ReadCloser, int64, error) { - return nil, -1, errors.New("Error returned from GetBlob") - }, nil}, - {func(digest digest.Digest) (io.ReadCloser, int64, error) { - reader, writer := io.Pipe() - writer.CloseWithError(errors.New("Expected error reading input in ConfigBlob")) - return reader, 1, nil - }, nil}, - {func(digest digest.Digest) (io.ReadCloser, int64, error) { - nonmatchingJSON := []byte("This does not match ConfigDescriptor.Digest") - return ioutil.NopCloser(bytes.NewReader(nonmatchingJSON)), int64(len(nonmatchingJSON)), nil - }, nil}, - } { - var src types.ImageSource - if c.cbISfn != nil { - src = configBlobImageSource{unusedImageSource{}, c.cbISfn} - } else { - src = nil - } - m := manifestSchema2FromFixture(t, src, "schema2.json") - blob, err := m.ConfigBlob() - if c.blob != nil { - assert.NoError(t, err) - assert.Equal(t, c.blob, blob) - } else { - assert.Error(t, err) - } - } - - // Generally conficBlob should match ConfigInfo; we don’t quite need it to, and this will - // guarantee that the returned object is returning the original contents instead - // of reading an object from elsewhere. - configBlob := []byte("config blob which does not match ConfigInfo") - // This just tests that the manifest can be created; we test that the parsed - // values are correctly returned in tests for the individual getter methods. - m := manifestSchema2FromComponentsLikeFixture(configBlob) - cb, err := m.ConfigBlob() - require.NoError(t, err) - assert.Equal(t, configBlob, cb) -} - -func TestManifestSchema2LayerInfo(t *testing.T) { - for _, m := range []genericManifest{ - manifestSchema2FromFixture(t, unusedImageSource{}, "schema2.json"), - manifestSchema2FromComponentsLikeFixture(nil), - } { - assert.Equal(t, []types.BlobInfo{ - { - Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb", - Size: 51354364, - }, - { - Digest: "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c", - Size: 150, - }, - { - Digest: "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9", - Size: 11739507, - }, - { - Digest: "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909", - Size: 8841833, - }, - { - Digest: "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa", - Size: 291, - }, - }, m.LayerInfos()) - } -} - -func TestManifestSchema2EmbeddedDockerReferenceConflicts(t *testing.T) { - for _, m := range []genericManifest{ - manifestSchema2FromFixture(t, unusedImageSource{}, "schema2.json"), - manifestSchema2FromComponentsLikeFixture(nil), - } { - for _, name := range []string{"busybox", "example.com:5555/ns/repo:tag"} { - ref, err := reference.ParseNormalizedNamed(name) - require.NoError(t, err) - conflicts := m.EmbeddedDockerReferenceConflicts(ref) - assert.False(t, conflicts) - } - } -} - -func TestManifestSchema2ImageInspectInfo(t *testing.T) { - configJSON, err := ioutil.ReadFile("fixtures/schema2-config.json") - require.NoError(t, err) - - m := manifestSchema2FromComponentsLikeFixture(configJSON) - ii, err := m.imageInspectInfo() - require.NoError(t, err) - assert.Equal(t, types.ImageInspectInfo{ - Tag: "", - Created: time.Date(2016, 9, 23, 23, 20, 45, 789764590, time.UTC), - DockerVersion: "1.12.1", - Labels: map[string]string{}, - Architecture: "amd64", - Os: "linux", - Layers: nil, - }, *ii) - - // nil configBlob will trigger an error in m.ConfigBlob() - m = manifestSchema2FromComponentsLikeFixture(nil) - _, err = m.imageInspectInfo() - assert.Error(t, err) - - m = manifestSchema2FromComponentsLikeFixture([]byte("invalid JSON")) - _, err = m.imageInspectInfo() - assert.Error(t, err) -} - -func TestManifestSchema2UpdatedImageNeedsLayerDiffIDs(t *testing.T) { - for _, m := range []genericManifest{ - manifestSchema2FromFixture(t, unusedImageSource{}, "schema2.json"), - manifestSchema2FromComponentsLikeFixture(nil), - } { - assert.False(t, m.UpdatedImageNeedsLayerDiffIDs(types.ManifestUpdateOptions{ - ManifestMIMEType: manifest.DockerV2Schema1SignedMediaType, - })) - } -} - -// schema2ImageSource is plausible enough for schema conversions in manifestSchema2.UpdatedImage() to work. -type schema2ImageSource struct { - configBlobImageSource - ref reference.Named -} - -func (s2is *schema2ImageSource) Reference() types.ImageReference { - return refImageReferenceMock{s2is.ref} -} - -// refImageReferenceMock is a mock of types.ImageReference which returns itself in DockerReference. -type refImageReferenceMock struct{ reference.Named } - -func (ref refImageReferenceMock) Transport() types.ImageTransport { - panic("unexpected call to a mock function") -} -func (ref refImageReferenceMock) StringWithinTransport() string { - panic("unexpected call to a mock function") -} -func (ref refImageReferenceMock) DockerReference() reference.Named { - return ref.Named -} -func (ref refImageReferenceMock) PolicyConfigurationIdentity() string { - panic("unexpected call to a mock function") -} -func (ref refImageReferenceMock) PolicyConfigurationNamespaces() []string { - panic("unexpected call to a mock function") -} -func (ref refImageReferenceMock) NewImage(ctx *types.SystemContext) (types.Image, error) { - panic("unexpected call to a mock function") -} -func (ref refImageReferenceMock) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) { - panic("unexpected call to a mock function") -} -func (ref refImageReferenceMock) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) { - panic("unexpected call to a mock function") -} -func (ref refImageReferenceMock) DeleteImage(ctx *types.SystemContext) error { - panic("unexpected call to a mock function") -} - -func newSchema2ImageSource(t *testing.T, dockerRef string) *schema2ImageSource { - realConfigJSON, err := ioutil.ReadFile("fixtures/schema2-config.json") - require.NoError(t, err) - - ref, err := reference.ParseNormalizedNamed(dockerRef) - require.NoError(t, err) - - return &schema2ImageSource{ - configBlobImageSource: configBlobImageSource{ - f: func(digest digest.Digest) (io.ReadCloser, int64, error) { - return ioutil.NopCloser(bytes.NewReader(realConfigJSON)), int64(len(realConfigJSON)), nil - }, - }, - ref: ref, - } -} - -type memoryImageDest struct { - ref reference.Named - storedBlobs map[digest.Digest][]byte -} - -func (d *memoryImageDest) Reference() types.ImageReference { - return refImageReferenceMock{d.ref} -} -func (d *memoryImageDest) Close() error { - panic("Unexpected call to a mock function") -} -func (d *memoryImageDest) SupportedManifestMIMETypes() []string { - panic("Unexpected call to a mock function") -} -func (d *memoryImageDest) SupportsSignatures() error { - panic("Unexpected call to a mock function") -} -func (d *memoryImageDest) ShouldCompressLayers() bool { - panic("Unexpected call to a mock function") -} -func (d *memoryImageDest) AcceptsForeignLayerURLs() bool { - panic("Unexpected call to a mock function") -} -func (d *memoryImageDest) MustMatchRuntimeOS() bool { - panic("Unexpected call to a mock function") -} -func (d *memoryImageDest) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) { - if d.storedBlobs == nil { - d.storedBlobs = make(map[digest.Digest][]byte) - } - if inputInfo.Digest.String() == "" { - panic("inputInfo.Digest unexpectedly empty") - } - contents, err := ioutil.ReadAll(stream) - if err != nil { - return types.BlobInfo{}, err - } - d.storedBlobs[inputInfo.Digest] = contents - return types.BlobInfo{Digest: inputInfo.Digest, Size: int64(len(contents))}, nil -} -func (d *memoryImageDest) HasBlob(inputInfo types.BlobInfo) (bool, int64, error) { - panic("Unexpected call to a mock function") -} -func (d *memoryImageDest) ReapplyBlob(inputInfo types.BlobInfo) (types.BlobInfo, error) { - panic("Unexpected call to a mock function") -} -func (d *memoryImageDest) PutManifest([]byte) error { - panic("Unexpected call to a mock function") -} -func (d *memoryImageDest) PutSignatures(signatures [][]byte) error { - panic("Unexpected call to a mock function") -} -func (d *memoryImageDest) Commit() error { - panic("Unexpected call to a mock function") -} - -func TestManifestSchema2UpdatedImage(t *testing.T) { - originalSrc := newSchema2ImageSource(t, "httpd:latest") - original := manifestSchema2FromFixture(t, originalSrc, "schema2.json") - - // LayerInfos: - layerInfos := append(original.LayerInfos()[1:], original.LayerInfos()[0]) - res, err := original.UpdatedImage(types.ManifestUpdateOptions{ - LayerInfos: layerInfos, - }) - require.NoError(t, err) - assert.Equal(t, layerInfos, res.LayerInfos()) - _, err = original.UpdatedImage(types.ManifestUpdateOptions{ - LayerInfos: append(layerInfos, layerInfos[0]), - }) - assert.Error(t, err) - - // EmbeddedDockerReference: - // … is ignored - embeddedRef, err := reference.ParseNormalizedNamed("busybox") - require.NoError(t, err) - res, err = original.UpdatedImage(types.ManifestUpdateOptions{ - EmbeddedDockerReference: embeddedRef, - }) - require.NoError(t, err) - nonEmbeddedRef, err := reference.ParseNormalizedNamed("notbusybox:notlatest") - require.NoError(t, err) - conflicts := res.EmbeddedDockerReferenceConflicts(nonEmbeddedRef) - assert.False(t, conflicts) - - // ManifestMIMEType: - // Only smoke-test the valid conversions, detailed tests are below. (This also verifies that “original” is not affected.) - for _, mime := range []string{ - manifest.DockerV2Schema1MediaType, - manifest.DockerV2Schema1SignedMediaType, - } { - _, err = original.UpdatedImage(types.ManifestUpdateOptions{ - ManifestMIMEType: mime, - InformationOnly: types.ManifestUpdateInformation{ - Destination: &memoryImageDest{ref: originalSrc.ref}, - }, - }) - assert.NoError(t, err, mime) - } - for _, mime := range []string{ - manifest.DockerV2Schema2MediaType, // This indicates a confused caller, not a no-op - "this is invalid", - } { - _, err = original.UpdatedImage(types.ManifestUpdateOptions{ - ManifestMIMEType: mime, - }) - assert.Error(t, err, mime) - } - - // m hasn’t been changed: - m2 := manifestSchema2FromFixture(t, originalSrc, "schema2.json") - typedOriginal, ok := original.(*manifestSchema2) - require.True(t, ok) - typedM2, ok := m2.(*manifestSchema2) - require.True(t, ok) - assert.Equal(t, *typedM2, *typedOriginal) -} - -func TestConvertToManifestOCI(t *testing.T) { - originalSrc := newSchema2ImageSource(t, "httpd-copy:latest") - original := manifestSchema2FromFixture(t, originalSrc, "schema2.json") - res, err := original.UpdatedImage(types.ManifestUpdateOptions{ - ManifestMIMEType: imgspecv1.MediaTypeImageManifest, - }) - require.NoError(t, err) - - convertedJSON, mt, err := res.Manifest() - require.NoError(t, err) - assert.Equal(t, imgspecv1.MediaTypeImageManifest, mt) - - byHandJSON, err := ioutil.ReadFile("fixtures/schema2-to-oci1.json") - require.NoError(t, err) - var converted, byHand map[string]interface{} - err = json.Unmarshal(byHandJSON, &byHand) - require.NoError(t, err) - err = json.Unmarshal(convertedJSON, &converted) - require.NoError(t, err) - assert.Equal(t, byHand, converted) -} - -func TestConvertToManifestSchema1(t *testing.T) { - originalSrc := newSchema2ImageSource(t, "httpd-copy:latest") - original := manifestSchema2FromFixture(t, originalSrc, "schema2.json") - memoryDest := &memoryImageDest{ref: originalSrc.ref} - res, err := original.UpdatedImage(types.ManifestUpdateOptions{ - ManifestMIMEType: manifest.DockerV2Schema1SignedMediaType, - InformationOnly: types.ManifestUpdateInformation{ - Destination: memoryDest, - }, - }) - require.NoError(t, err) - - convertedJSON, mt, err := res.Manifest() - require.NoError(t, err) - assert.Equal(t, manifest.DockerV2Schema1SignedMediaType, mt) - - // byDockerJSON is the result of asking the Docker Hub for a schema1 manifest, - // except that we have replaced "name" to verify that the ref from - // memoryDest, not from originalSrc, is used. - byDockerJSON, err := ioutil.ReadFile("fixtures/schema2-to-schema1-by-docker.json") - require.NoError(t, err) - var converted, byDocker map[string]interface{} - err = json.Unmarshal(byDockerJSON, &byDocker) - require.NoError(t, err) - err = json.Unmarshal(convertedJSON, &converted) - require.NoError(t, err) - delete(byDocker, "signatures") - delete(converted, "signatures") - assert.Equal(t, byDocker, converted) - - assert.Equal(t, gzippedEmptyLayer, memoryDest.storedBlobs[gzippedEmptyLayerDigest]) - - // FIXME? Test also the various failure cases, if only to see that we don't crash? -} diff --git a/vendor/github.com/containers/image/image/fixtures/oci1-config.json b/vendor/github.com/containers/image/image/fixtures/oci1-config.json deleted file mode 100644 index f49230ea77d0..000000000000 --- a/vendor/github.com/containers/image/image/fixtures/oci1-config.json +++ /dev/null @@ -1 +0,0 @@ -{"architecture":"amd64","config":{"Hostname":"383850eeb47b","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":{"80/tcp":{}},"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","HTTPD_PREFIX=/usr/local/apache2","HTTPD_VERSION=2.4.23","HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f","HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\u0026filename=httpd/httpd-2.4.23.tar.bz2","HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc"],"Cmd":["httpd-foreground"],"ArgsEscaped":true,"Image":"sha256:4f83530449c67c1ed8fca72583c5b92fdf446010990028c362a381e55dd84afd","Volumes":null,"WorkingDir":"/usr/local/apache2","Entrypoint":null,"OnBuild":[],"Labels":{}},"container":"8825acde1b009729807e4b70a65a89399dd8da8e53be9216b9aaabaff4339f69","container_config":{"Hostname":"383850eeb47b","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":{"80/tcp":{}},"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","HTTPD_PREFIX=/usr/local/apache2","HTTPD_VERSION=2.4.23","HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f","HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\u0026filename=httpd/httpd-2.4.23.tar.bz2","HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc"],"Cmd":["/bin/sh","-c","#(nop) ","CMD [\"httpd-foreground\"]"],"ArgsEscaped":true,"Image":"sha256:4f83530449c67c1ed8fca72583c5b92fdf446010990028c362a381e55dd84afd","Volumes":null,"WorkingDir":"/usr/local/apache2","Entrypoint":null,"OnBuild":[],"Labels":{}},"created":"2016-09-23T23:20:45.78976459Z","docker_version":"1.12.1","history":[{"created":"2016-09-23T18:08:50.537223822Z","created_by":"/bin/sh -c #(nop) ADD file:c6c23585ab140b0b320d4e99bc1b0eb544c9e96c24d90fec5e069a6d57d335ca in / "},{"created":"2016-09-23T18:08:51.133779867Z","created_by":"/bin/sh -c #(nop) CMD [\"/bin/bash\"]","empty_layer":true},{"created":"2016-09-23T19:16:40.725768956Z","created_by":"/bin/sh -c #(nop) ENV HTTPD_PREFIX=/usr/local/apache2","empty_layer":true},{"created":"2016-09-23T19:16:41.037788416Z","created_by":"/bin/sh -c #(nop) ENV PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","empty_layer":true},{"created":"2016-09-23T19:16:41.990121202Z","created_by":"/bin/sh -c mkdir -p \"$HTTPD_PREFIX\" \t\u0026\u0026 chown www-data:www-data \"$HTTPD_PREFIX\""},{"created":"2016-09-23T19:16:42.339911155Z","created_by":"/bin/sh -c #(nop) WORKDIR /usr/local/apache2","empty_layer":true},{"created":"2016-09-23T19:16:54.948461741Z","created_by":"/bin/sh -c apt-get update \t\u0026\u0026 apt-get install -y --no-install-recommends \t\tlibapr1 \t\tlibaprutil1 \t\tlibaprutil1-ldap \t\tlibapr1-dev \t\tlibaprutil1-dev \t\tlibpcre++0 \t\tlibssl1.0.0 \t\u0026\u0026 rm -r /var/lib/apt/lists/*"},{"created":"2016-09-23T19:16:55.321573403Z","created_by":"/bin/sh -c #(nop) ENV HTTPD_VERSION=2.4.23","empty_layer":true},{"created":"2016-09-23T19:16:55.629947307Z","created_by":"/bin/sh -c #(nop) ENV HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f","empty_layer":true},{"created":"2016-09-23T23:19:03.705796801Z","created_by":"/bin/sh -c #(nop) ENV HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\u0026filename=httpd/httpd-2.4.23.tar.bz2","empty_layer":true},{"created":"2016-09-23T23:19:04.009782822Z","created_by":"/bin/sh -c #(nop) ENV HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc","empty_layer":true},{"created":"2016-09-23T23:20:44.585743332Z","created_by":"/bin/sh -c set -x \t\u0026\u0026 buildDeps=' \t\tbzip2 \t\tca-certificates \t\tgcc \t\tlibpcre++-dev \t\tlibssl-dev \t\tmake \t\twget \t' \t\u0026\u0026 apt-get update \t\u0026\u0026 apt-get install -y --no-install-recommends $buildDeps \t\u0026\u0026 rm -r /var/lib/apt/lists/* \t\t\u0026\u0026 wget -O httpd.tar.bz2 \"$HTTPD_BZ2_URL\" \t\u0026\u0026 echo \"$HTTPD_SHA1 *httpd.tar.bz2\" | sha1sum -c - \t\u0026\u0026 wget -O httpd.tar.bz2.asc \"$HTTPD_ASC_URL\" \t\u0026\u0026 export GNUPGHOME=\"$(mktemp -d)\" \t\u0026\u0026 gpg --keyserver ha.pool.sks-keyservers.net --recv-keys A93D62ECC3C8EA12DB220EC934EA76E6791485A8 \t\u0026\u0026 gpg --batch --verify httpd.tar.bz2.asc httpd.tar.bz2 \t\u0026\u0026 rm -r \"$GNUPGHOME\" httpd.tar.bz2.asc \t\t\u0026\u0026 mkdir -p src \t\u0026\u0026 tar -xvf httpd.tar.bz2 -C src --strip-components=1 \t\u0026\u0026 rm httpd.tar.bz2 \t\u0026\u0026 cd src \t\t\u0026\u0026 ./configure \t\t--prefix=\"$HTTPD_PREFIX\" \t\t--enable-mods-shared=reallyall \t\u0026\u0026 make -j\"$(nproc)\" \t\u0026\u0026 make install \t\t\u0026\u0026 cd .. \t\u0026\u0026 rm -r src \t\t\u0026\u0026 sed -ri \t\t-e 's!^(\\s*CustomLog)\\s+\\S+!\\1 /proc/self/fd/1!g' \t\t-e 's!^(\\s*ErrorLog)\\s+\\S+!\\1 /proc/self/fd/2!g' \t\t\"$HTTPD_PREFIX/conf/httpd.conf\" \t\t\u0026\u0026 apt-get purge -y --auto-remove $buildDeps"},{"created":"2016-09-23T23:20:45.127455562Z","created_by":"/bin/sh -c #(nop) COPY file:761e313354b918b6cd7ea99975a4f6b53ff5381ba689bab2984aec4dab597215 in /usr/local/bin/ "},{"created":"2016-09-23T23:20:45.453934921Z","created_by":"/bin/sh -c #(nop) EXPOSE 80/tcp","empty_layer":true},{"created":"2016-09-23T23:20:45.78976459Z","created_by":"/bin/sh -c #(nop) CMD [\"httpd-foreground\"]","empty_layer":true}],"os":"linux","rootfs":{"type":"layers","diff_ids":["sha256:142a601d97936307e75220c35dde0348971a9584c21e7cb42e1f7004005432ab","sha256:90fcc66ad3be9f1757f954b750deb37032f208428aa12599fcb02182b9065a9c","sha256:5a8624bb7e76d1e6829f9c64c43185e02bc07f97a2189eb048609a8914e72c56","sha256:d349ff6b3afc6a2800054768c82bfbf4289c9aa5da55c1290f802943dcd4d1e9","sha256:8c064bb1f60e84fa8cc6079b6d2e76e0423389fd6aeb7e497dfdae5e05b2b25b"]}} \ No newline at end of file diff --git a/vendor/github.com/containers/image/image/fixtures/oci1-to-schema2.json b/vendor/github.com/containers/image/image/fixtures/oci1-to-schema2.json deleted file mode 100644 index 8861521ec2d4..000000000000 --- a/vendor/github.com/containers/image/image/fixtures/oci1-to-schema2.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "schemaVersion": 2, - "mediaType": "application/vnd.docker.distribution.manifest.v2+json", - "config": { - "mediaType": "application/vnd.docker.container.image.v1+json", - "size": 5940, - "digest": "sha256:9ca4bda0a6b3727a6ffcc43e981cad0f24e2ec79d338f6ba325b4dfd0756fb8f" - }, - "layers": [ - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "size": 51354364, - "digest": "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb" - }, - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "size": 150, - "digest": "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c" - }, - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "size": 11739507, - "digest": "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9" - }, - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "size": 8841833, - "digest": "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909" - }, - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "size": 291, - "digest": "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa" - } - ] -} diff --git a/vendor/github.com/containers/image/image/fixtures/oci1.json b/vendor/github.com/containers/image/image/fixtures/oci1.json deleted file mode 100644 index d26561d82016..000000000000 --- a/vendor/github.com/containers/image/image/fixtures/oci1.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "schemaVersion": 2, - "config": { - "mediaType": "application/vnd.oci.image.config.v1+json", - "size": 5940, - "digest": "sha256:9ca4bda0a6b3727a6ffcc43e981cad0f24e2ec79d338f6ba325b4dfd0756fb8f" - }, - "layers": [ - { - "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", - "size": 51354364, - "digest": "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb" - }, - { - "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", - "size": 150, - "digest": "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c" - }, - { - "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", - "size": 11739507, - "digest": "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9" - }, - { - "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", - "size": 8841833, - "digest": "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909" - }, - { - "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", - "size": 291, - "digest": "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa" - } - ] -} diff --git a/vendor/github.com/containers/image/image/fixtures/schema1-to-oci-config.json b/vendor/github.com/containers/image/image/fixtures/schema1-to-oci-config.json deleted file mode 100644 index ee5825794aef..000000000000 --- a/vendor/github.com/containers/image/image/fixtures/schema1-to-oci-config.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "schemaVersion": 1, - "name": "google_containers/pause-amd64", - "tag": "3.0", - "architecture": "amd64", - "fsLayers": [ - { - "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" - }, - { - "blobSum": "sha256:f112334343777b75be77ec1f835e3bbbe7d7bd46e27b6a2ae35c6b3cfea0987c" - }, - { - "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" - } - ], - "history": [ - { - "v1Compatibility": "{\"id\":\"bb497e16a2d55195649174d1fadac52b00fa2c14124d73009712606909286bc5\",\"parent\":\"f8e2eec424cf985b4e41d6423991433fb7a93c90f9acc73a5e7bee213b789c52\",\"created\":\"2016-05-04T06:26:41.522308365Z\",\"container\":\"a9873535145fe72b464d3055efbac36aab70d059914e221cbbd7fe3cac53ef6b\",\"container_config\":{\"Hostname\":\"95722352e41d\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT \\u0026{[\\\"/pause\\\"]}\"],\"Image\":\"f8e2eec424cf985b4e41d6423991433fb7a93c90f9acc73a5e7bee213b789c52\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":[\"/pause\"],\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.9.1\",\"config\":{\"Hostname\":\"95722352e41d\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"f8e2eec424cf985b4e41d6423991433fb7a93c90f9acc73a5e7bee213b789c52\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":[\"/pause\"],\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\"}" - }, - { - "v1Compatibility": "{\"id\":\"f8e2eec424cf985b4e41d6423991433fb7a93c90f9acc73a5e7bee213b789c52\",\"parent\":\"bdb43c586e887b513a056722b50553727b255e3a3d9166f318632d4209963464\",\"created\":\"2016-05-04T06:26:41.091672218Z\",\"container\":\"e1b38778b023f25642273ed9e7f4846b4bf38b22a8b55755880b2e6ab6019811\",\"container_config\":{\"Hostname\":\"95722352e41d\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ADD file:b7eb6a5df9d5fbe509cac16ed89f8d6513a4362017184b14c6a5fae151eee5c5 in /pause\"],\"Image\":\"bdb43c586e887b513a056722b50553727b255e3a3d9166f318632d4209963464\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.9.1\",\"config\":{\"Hostname\":\"95722352e41d\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"bdb43c586e887b513a056722b50553727b255e3a3d9166f318632d4209963464\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":746888}" - }, - { - "v1Compatibility": "{\"id\":\"bdb43c586e887b513a056722b50553727b255e3a3d9166f318632d4209963464\",\"created\":\"2016-05-04T06:26:40.628395649Z\",\"container\":\"95722352e41d57660259fbede4413d06889a28eb07a7302d2a7b3f9c71ceaa46\",\"container_config\":{\"Hostname\":\"95722352e41d\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ARG ARCH\"],\"Image\":\"\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.9.1\",\"config\":{\"Hostname\":\"95722352e41d\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\"}" - } - ],"signatures":[{"header":{"alg":"ES256","jwk":{"crv":"P-256","kid":"ORN4:M47W:3KP3:TZRZ:C3UF:5MFQ:INZV:TCMY:LHNV:EYQU:IRGJ:IJLJ","kty":"EC","x":"yJ0ZQ19NBZUQn8LV60sFEabhlgky9svozfK0VGVou7Y","y":"gOJScOkkLVY1f8aAx-6XXpVM5rJaDYLkCNJ1dvcQGMs"}},"protected":"eyJmb3JtYXRMZW5ndGgiOjQxMzMsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNi0wNS0wNFQwNjoyODo1MVoifQ","signature":"77_7DVx1IZ3PiKNnO7QnvoF7Sgik4GI4bnlVJdtQW461dSyYzd-nSdBmky8Jew3InEW8Cuv_t5w4GmOSwXvL7g"}] - -} diff --git a/vendor/github.com/containers/image/image/fixtures/schema2-config.json b/vendor/github.com/containers/image/image/fixtures/schema2-config.json deleted file mode 100644 index f49230ea77d0..000000000000 --- a/vendor/github.com/containers/image/image/fixtures/schema2-config.json +++ /dev/null @@ -1 +0,0 @@ -{"architecture":"amd64","config":{"Hostname":"383850eeb47b","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":{"80/tcp":{}},"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","HTTPD_PREFIX=/usr/local/apache2","HTTPD_VERSION=2.4.23","HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f","HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\u0026filename=httpd/httpd-2.4.23.tar.bz2","HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc"],"Cmd":["httpd-foreground"],"ArgsEscaped":true,"Image":"sha256:4f83530449c67c1ed8fca72583c5b92fdf446010990028c362a381e55dd84afd","Volumes":null,"WorkingDir":"/usr/local/apache2","Entrypoint":null,"OnBuild":[],"Labels":{}},"container":"8825acde1b009729807e4b70a65a89399dd8da8e53be9216b9aaabaff4339f69","container_config":{"Hostname":"383850eeb47b","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":{"80/tcp":{}},"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","HTTPD_PREFIX=/usr/local/apache2","HTTPD_VERSION=2.4.23","HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f","HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\u0026filename=httpd/httpd-2.4.23.tar.bz2","HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc"],"Cmd":["/bin/sh","-c","#(nop) ","CMD [\"httpd-foreground\"]"],"ArgsEscaped":true,"Image":"sha256:4f83530449c67c1ed8fca72583c5b92fdf446010990028c362a381e55dd84afd","Volumes":null,"WorkingDir":"/usr/local/apache2","Entrypoint":null,"OnBuild":[],"Labels":{}},"created":"2016-09-23T23:20:45.78976459Z","docker_version":"1.12.1","history":[{"created":"2016-09-23T18:08:50.537223822Z","created_by":"/bin/sh -c #(nop) ADD file:c6c23585ab140b0b320d4e99bc1b0eb544c9e96c24d90fec5e069a6d57d335ca in / "},{"created":"2016-09-23T18:08:51.133779867Z","created_by":"/bin/sh -c #(nop) CMD [\"/bin/bash\"]","empty_layer":true},{"created":"2016-09-23T19:16:40.725768956Z","created_by":"/bin/sh -c #(nop) ENV HTTPD_PREFIX=/usr/local/apache2","empty_layer":true},{"created":"2016-09-23T19:16:41.037788416Z","created_by":"/bin/sh -c #(nop) ENV PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","empty_layer":true},{"created":"2016-09-23T19:16:41.990121202Z","created_by":"/bin/sh -c mkdir -p \"$HTTPD_PREFIX\" \t\u0026\u0026 chown www-data:www-data \"$HTTPD_PREFIX\""},{"created":"2016-09-23T19:16:42.339911155Z","created_by":"/bin/sh -c #(nop) WORKDIR /usr/local/apache2","empty_layer":true},{"created":"2016-09-23T19:16:54.948461741Z","created_by":"/bin/sh -c apt-get update \t\u0026\u0026 apt-get install -y --no-install-recommends \t\tlibapr1 \t\tlibaprutil1 \t\tlibaprutil1-ldap \t\tlibapr1-dev \t\tlibaprutil1-dev \t\tlibpcre++0 \t\tlibssl1.0.0 \t\u0026\u0026 rm -r /var/lib/apt/lists/*"},{"created":"2016-09-23T19:16:55.321573403Z","created_by":"/bin/sh -c #(nop) ENV HTTPD_VERSION=2.4.23","empty_layer":true},{"created":"2016-09-23T19:16:55.629947307Z","created_by":"/bin/sh -c #(nop) ENV HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f","empty_layer":true},{"created":"2016-09-23T23:19:03.705796801Z","created_by":"/bin/sh -c #(nop) ENV HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\u0026filename=httpd/httpd-2.4.23.tar.bz2","empty_layer":true},{"created":"2016-09-23T23:19:04.009782822Z","created_by":"/bin/sh -c #(nop) ENV HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc","empty_layer":true},{"created":"2016-09-23T23:20:44.585743332Z","created_by":"/bin/sh -c set -x \t\u0026\u0026 buildDeps=' \t\tbzip2 \t\tca-certificates \t\tgcc \t\tlibpcre++-dev \t\tlibssl-dev \t\tmake \t\twget \t' \t\u0026\u0026 apt-get update \t\u0026\u0026 apt-get install -y --no-install-recommends $buildDeps \t\u0026\u0026 rm -r /var/lib/apt/lists/* \t\t\u0026\u0026 wget -O httpd.tar.bz2 \"$HTTPD_BZ2_URL\" \t\u0026\u0026 echo \"$HTTPD_SHA1 *httpd.tar.bz2\" | sha1sum -c - \t\u0026\u0026 wget -O httpd.tar.bz2.asc \"$HTTPD_ASC_URL\" \t\u0026\u0026 export GNUPGHOME=\"$(mktemp -d)\" \t\u0026\u0026 gpg --keyserver ha.pool.sks-keyservers.net --recv-keys A93D62ECC3C8EA12DB220EC934EA76E6791485A8 \t\u0026\u0026 gpg --batch --verify httpd.tar.bz2.asc httpd.tar.bz2 \t\u0026\u0026 rm -r \"$GNUPGHOME\" httpd.tar.bz2.asc \t\t\u0026\u0026 mkdir -p src \t\u0026\u0026 tar -xvf httpd.tar.bz2 -C src --strip-components=1 \t\u0026\u0026 rm httpd.tar.bz2 \t\u0026\u0026 cd src \t\t\u0026\u0026 ./configure \t\t--prefix=\"$HTTPD_PREFIX\" \t\t--enable-mods-shared=reallyall \t\u0026\u0026 make -j\"$(nproc)\" \t\u0026\u0026 make install \t\t\u0026\u0026 cd .. \t\u0026\u0026 rm -r src \t\t\u0026\u0026 sed -ri \t\t-e 's!^(\\s*CustomLog)\\s+\\S+!\\1 /proc/self/fd/1!g' \t\t-e 's!^(\\s*ErrorLog)\\s+\\S+!\\1 /proc/self/fd/2!g' \t\t\"$HTTPD_PREFIX/conf/httpd.conf\" \t\t\u0026\u0026 apt-get purge -y --auto-remove $buildDeps"},{"created":"2016-09-23T23:20:45.127455562Z","created_by":"/bin/sh -c #(nop) COPY file:761e313354b918b6cd7ea99975a4f6b53ff5381ba689bab2984aec4dab597215 in /usr/local/bin/ "},{"created":"2016-09-23T23:20:45.453934921Z","created_by":"/bin/sh -c #(nop) EXPOSE 80/tcp","empty_layer":true},{"created":"2016-09-23T23:20:45.78976459Z","created_by":"/bin/sh -c #(nop) CMD [\"httpd-foreground\"]","empty_layer":true}],"os":"linux","rootfs":{"type":"layers","diff_ids":["sha256:142a601d97936307e75220c35dde0348971a9584c21e7cb42e1f7004005432ab","sha256:90fcc66ad3be9f1757f954b750deb37032f208428aa12599fcb02182b9065a9c","sha256:5a8624bb7e76d1e6829f9c64c43185e02bc07f97a2189eb048609a8914e72c56","sha256:d349ff6b3afc6a2800054768c82bfbf4289c9aa5da55c1290f802943dcd4d1e9","sha256:8c064bb1f60e84fa8cc6079b6d2e76e0423389fd6aeb7e497dfdae5e05b2b25b"]}} \ No newline at end of file diff --git a/vendor/github.com/containers/image/image/fixtures/schema2-to-oci1.json b/vendor/github.com/containers/image/image/fixtures/schema2-to-oci1.json deleted file mode 100644 index cd081b7d1d5a..000000000000 --- a/vendor/github.com/containers/image/image/fixtures/schema2-to-oci1.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "schemaVersion": 2, - "config": { - "mediaType": "application/vnd.oci.image.config.v1+json", - "size": 4651, - "digest": "sha256:a13a0762ab7bed51a1b49adec0a702b1cd99294fd460a025b465bcfb7b152745" - }, - "layers": [{ - "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", - "size": 51354364, - "digest": "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb" - }, { - "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", - "size": 150, - "digest": "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c" - }, { - "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", - "size": 11739507, - "digest": "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9" - }, { - "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", - "size": 8841833, - "digest": "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909" - }, { - "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", - "size": 291, - "digest": "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa" - }] -} diff --git a/vendor/github.com/containers/image/image/fixtures/schema2-to-schema1-by-docker.json b/vendor/github.com/containers/image/image/fixtures/schema2-to-schema1-by-docker.json deleted file mode 100644 index 494450d9fdf0..000000000000 --- a/vendor/github.com/containers/image/image/fixtures/schema2-to-schema1-by-docker.json +++ /dev/null @@ -1,116 +0,0 @@ -{ - "schemaVersion": 1, - "name": "library/httpd-copy", - "tag": "latest", - "architecture": "amd64", - "fsLayers": [ - { - "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" - }, - { - "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" - }, - { - "blobSum": "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa" - }, - { - "blobSum": "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909" - }, - { - "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" - }, - { - "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" - }, - { - "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" - }, - { - "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" - }, - { - "blobSum": "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9" - }, - { - "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" - }, - { - "blobSum": "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c" - }, - { - "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" - }, - { - "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" - }, - { - "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" - }, - { - "blobSum": "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb" - } - ], - "history": [ - { - "v1Compatibility": "{\"architecture\":\"amd64\",\"config\":{\"Hostname\":\"383850eeb47b\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":{\"80/tcp\":{}},\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"HTTPD_PREFIX=/usr/local/apache2\",\"HTTPD_VERSION=2.4.23\",\"HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f\",\"HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\\u0026filename=httpd/httpd-2.4.23.tar.bz2\",\"HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc\"],\"Cmd\":[\"httpd-foreground\"],\"ArgsEscaped\":true,\"Image\":\"sha256:4f83530449c67c1ed8fca72583c5b92fdf446010990028c362a381e55dd84afd\",\"Volumes\":null,\"WorkingDir\":\"/usr/local/apache2\",\"Entrypoint\":null,\"OnBuild\":[],\"Labels\":{}},\"container\":\"8825acde1b009729807e4b70a65a89399dd8da8e53be9216b9aaabaff4339f69\",\"container_config\":{\"Hostname\":\"383850eeb47b\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":{\"80/tcp\":{}},\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"HTTPD_PREFIX=/usr/local/apache2\",\"HTTPD_VERSION=2.4.23\",\"HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f\",\"HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\\u0026filename=httpd/httpd-2.4.23.tar.bz2\",\"HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) \",\"CMD [\\\"httpd-foreground\\\"]\"],\"ArgsEscaped\":true,\"Image\":\"sha256:4f83530449c67c1ed8fca72583c5b92fdf446010990028c362a381e55dd84afd\",\"Volumes\":null,\"WorkingDir\":\"/usr/local/apache2\",\"Entrypoint\":null,\"OnBuild\":[],\"Labels\":{}},\"created\":\"2016-09-23T23:20:45.78976459Z\",\"docker_version\":\"1.12.1\",\"id\":\"dca7323f9c839837493199d63263083d94f5eb1796d7bd04ca8374c4e9d3749a\",\"os\":\"linux\",\"parent\":\"1b750729af47c9a802c8d14b0d327d3ad5ecdce5ae773ac728a0263315b914f4\",\"throwaway\":true}" - }, - { - "v1Compatibility": "{\"id\":\"1b750729af47c9a802c8d14b0d327d3ad5ecdce5ae773ac728a0263315b914f4\",\"parent\":\"3ef2f186f8b0a2fd2d95f5a1f1cd213f5fb0a6e51b0a8dfbe2ec7003a788ff9a\",\"created\":\"2016-09-23T23:20:45.453934921Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) EXPOSE 80/tcp\"]},\"throwaway\":true}" - }, - { - "v1Compatibility": "{\"id\":\"3ef2f186f8b0a2fd2d95f5a1f1cd213f5fb0a6e51b0a8dfbe2ec7003a788ff9a\",\"parent\":\"dbbb5c772ba968f675ebdb1968a2fbcf3cf53c0c85ff4e3329619e3735c811e6\",\"created\":\"2016-09-23T23:20:45.127455562Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) COPY file:761e313354b918b6cd7ea99975a4f6b53ff5381ba689bab2984aec4dab597215 in /usr/local/bin/ \"]}}" - }, - { - "v1Compatibility": "{\"id\":\"dbbb5c772ba968f675ebdb1968a2fbcf3cf53c0c85ff4e3329619e3735c811e6\",\"parent\":\"d264ded964bb52f78c8905c9e6c5f2b8526ef33f371981f0651f3fb0164ad4a7\",\"created\":\"2016-09-23T23:20:44.585743332Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c set -x \\t\\u0026\\u0026 buildDeps=' \\t\\tbzip2 \\t\\tca-certificates \\t\\tgcc \\t\\tlibpcre++-dev \\t\\tlibssl-dev \\t\\tmake \\t\\twget \\t' \\t\\u0026\\u0026 apt-get update \\t\\u0026\\u0026 apt-get install -y --no-install-recommends $buildDeps \\t\\u0026\\u0026 rm -r /var/lib/apt/lists/* \\t\\t\\u0026\\u0026 wget -O httpd.tar.bz2 \\\"$HTTPD_BZ2_URL\\\" \\t\\u0026\\u0026 echo \\\"$HTTPD_SHA1 *httpd.tar.bz2\\\" | sha1sum -c - \\t\\u0026\\u0026 wget -O httpd.tar.bz2.asc \\\"$HTTPD_ASC_URL\\\" \\t\\u0026\\u0026 export GNUPGHOME=\\\"$(mktemp -d)\\\" \\t\\u0026\\u0026 gpg --keyserver ha.pool.sks-keyservers.net --recv-keys A93D62ECC3C8EA12DB220EC934EA76E6791485A8 \\t\\u0026\\u0026 gpg --batch --verify httpd.tar.bz2.asc httpd.tar.bz2 \\t\\u0026\\u0026 rm -r \\\"$GNUPGHOME\\\" httpd.tar.bz2.asc \\t\\t\\u0026\\u0026 mkdir -p src \\t\\u0026\\u0026 tar -xvf httpd.tar.bz2 -C src --strip-components=1 \\t\\u0026\\u0026 rm httpd.tar.bz2 \\t\\u0026\\u0026 cd src \\t\\t\\u0026\\u0026 ./configure \\t\\t--prefix=\\\"$HTTPD_PREFIX\\\" \\t\\t--enable-mods-shared=reallyall \\t\\u0026\\u0026 make -j\\\"$(nproc)\\\" \\t\\u0026\\u0026 make install \\t\\t\\u0026\\u0026 cd .. \\t\\u0026\\u0026 rm -r src \\t\\t\\u0026\\u0026 sed -ri \\t\\t-e 's!^(\\\\s*CustomLog)\\\\s+\\\\S+!\\\\1 /proc/self/fd/1!g' \\t\\t-e 's!^(\\\\s*ErrorLog)\\\\s+\\\\S+!\\\\1 /proc/self/fd/2!g' \\t\\t\\\"$HTTPD_PREFIX/conf/httpd.conf\\\" \\t\\t\\u0026\\u0026 apt-get purge -y --auto-remove $buildDeps\"]}}" - }, - { - "v1Compatibility": "{\"id\":\"d264ded964bb52f78c8905c9e6c5f2b8526ef33f371981f0651f3fb0164ad4a7\",\"parent\":\"fd6f8d569a8a6d2a95f797494ab3cee7a47693dde647210b236a141f76b5c5fd\",\"created\":\"2016-09-23T23:19:04.009782822Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ENV HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc\"]},\"throwaway\":true}" - }, - { - "v1Compatibility": "{\"id\":\"fd6f8d569a8a6d2a95f797494ab3cee7a47693dde647210b236a141f76b5c5fd\",\"parent\":\"5e2578d171daa47c0eeb55e592b4e3bd28a0946a75baed58e4d4dd315c5d5780\",\"created\":\"2016-09-23T23:19:03.705796801Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ENV HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\\u0026filename=httpd/httpd-2.4.23.tar.bz2\"]},\"throwaway\":true}" - }, - { - "v1Compatibility": "{\"id\":\"5e2578d171daa47c0eeb55e592b4e3bd28a0946a75baed58e4d4dd315c5d5780\",\"parent\":\"1912159ee5bea8d7fde49b85012f90c47bceb3f09e4082b112b1f06a3f339c53\",\"created\":\"2016-09-23T19:16:55.629947307Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ENV HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f\"]},\"throwaway\":true}" - }, - { - "v1Compatibility": "{\"id\":\"1912159ee5bea8d7fde49b85012f90c47bceb3f09e4082b112b1f06a3f339c53\",\"parent\":\"3bfb089ca9d4bb73a9016e44a2c6f908b701f97704433305c419f75e8559d8a2\",\"created\":\"2016-09-23T19:16:55.321573403Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ENV HTTPD_VERSION=2.4.23\"]},\"throwaway\":true}" - }, - { - "v1Compatibility": "{\"id\":\"3bfb089ca9d4bb73a9016e44a2c6f908b701f97704433305c419f75e8559d8a2\",\"parent\":\"ae1ece73de4d0365c8b8ab45ba0bf6b1efa4213c16a4903b89341b704d101c3c\",\"created\":\"2016-09-23T19:16:54.948461741Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c apt-get update \\t\\u0026\\u0026 apt-get install -y --no-install-recommends \\t\\tlibapr1 \\t\\tlibaprutil1 \\t\\tlibaprutil1-ldap \\t\\tlibapr1-dev \\t\\tlibaprutil1-dev \\t\\tlibpcre++0 \\t\\tlibssl1.0.0 \\t\\u0026\\u0026 rm -r /var/lib/apt/lists/*\"]}}" - }, - { - "v1Compatibility": "{\"id\":\"ae1ece73de4d0365c8b8ab45ba0bf6b1efa4213c16a4903b89341b704d101c3c\",\"parent\":\"bffbcb416f40e0bd3ebae202403587bfd41829cd1e0d538b66f29adce40c6408\",\"created\":\"2016-09-23T19:16:42.339911155Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) WORKDIR /usr/local/apache2\"]},\"throwaway\":true}" - }, - { - "v1Compatibility": "{\"id\":\"bffbcb416f40e0bd3ebae202403587bfd41829cd1e0d538b66f29adce40c6408\",\"parent\":\"7b27731a3363efcb6b0520962d544471745aae15664920dffe690b4fdb410d80\",\"created\":\"2016-09-23T19:16:41.990121202Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c mkdir -p \\\"$HTTPD_PREFIX\\\" \\t\\u0026\\u0026 chown www-data:www-data \\\"$HTTPD_PREFIX\\\"\"]}}" - }, - { - "v1Compatibility": "{\"id\":\"7b27731a3363efcb6b0520962d544471745aae15664920dffe690b4fdb410d80\",\"parent\":\"57a0a421f1acbc1fe6b88b32d3d1c3c0388ff1958b97f95dd0e3a599b810499b\",\"created\":\"2016-09-23T19:16:41.037788416Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ENV PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"]},\"throwaway\":true}" - }, - { - "v1Compatibility": "{\"id\":\"57a0a421f1acbc1fe6b88b32d3d1c3c0388ff1958b97f95dd0e3a599b810499b\",\"parent\":\"faeaf6fdfdcbb18d68c12db9683a02428bab83962a493de88b4c7b1ec941db8f\",\"created\":\"2016-09-23T19:16:40.725768956Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ENV HTTPD_PREFIX=/usr/local/apache2\"]},\"throwaway\":true}" - }, - { - "v1Compatibility": "{\"id\":\"faeaf6fdfdcbb18d68c12db9683a02428bab83962a493de88b4c7b1ec941db8f\",\"parent\":\"d0c4f1eb7dc8f4dae2b45fe5c0cf4cfc70e5be85d933f5f5f4deb59f134fb520\",\"created\":\"2016-09-23T18:08:51.133779867Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) CMD [\\\"/bin/bash\\\"]\"]},\"throwaway\":true}" - }, - { - "v1Compatibility": "{\"id\":\"d0c4f1eb7dc8f4dae2b45fe5c0cf4cfc70e5be85d933f5f5f4deb59f134fb520\",\"created\":\"2016-09-23T18:08:50.537223822Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ADD file:c6c23585ab140b0b320d4e99bc1b0eb544c9e96c24d90fec5e069a6d57d335ca in / \"]}}" - } - ], - "signatures": [ - { - "header": { - "jwk": { - "crv": "P-256", - "kid": "6QVR:5NTY:VIHC:W6IU:XYIN:CTKT:OG5R:XEEG:Z6XJ:2623:YCBP:36MA", - "kty": "EC", - "x": "NAGHj6-IdNonuFoxlqJnNMjcrCCE1CBoq2r_1NDci68", - "y": "Kocqgj_Ey5J-wLXTjkuqLC-HjciAnWxsBEziAOTvSPc" - }, - "alg": "ES256" - }, - "signature": "2MN5k06i8xkJhD5ay4yxAFK7tsZk58UznAZONxDplvQ5lZwbRS162OeBDjCb0Hk0IDyrLXtAfBDlY2Gzf6jrpw", - "protected": "eyJmb3JtYXRMZW5ndGgiOjEwODk1LCJmb3JtYXRUYWlsIjoiQ24wIiwidGltZSI6IjIwMTYtMTAtMTRUMTY6MTI6MDlaIn0" - } - ] -} diff --git a/vendor/github.com/containers/image/image/fixtures/schema2.json b/vendor/github.com/containers/image/image/fixtures/schema2.json deleted file mode 100644 index 8df4c0daf8f1..000000000000 --- a/vendor/github.com/containers/image/image/fixtures/schema2.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "schemaVersion": 2, - "mediaType": "application/vnd.docker.distribution.manifest.v2+json", - "config": { - "mediaType": "application/octet-stream", - "size": 5940, - "digest": "sha256:9ca4bda0a6b3727a6ffcc43e981cad0f24e2ec79d338f6ba325b4dfd0756fb8f" - }, - "layers": [ - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "size": 51354364, - "digest": "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb" - }, - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "size": 150, - "digest": "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c" - }, - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "size": 11739507, - "digest": "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9" - }, - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "size": 8841833, - "digest": "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909" - }, - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "size": 291, - "digest": "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa" - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/containers/image/image/manifest.go b/vendor/github.com/containers/image/image/manifest.go deleted file mode 100644 index 75c9e7116417..000000000000 --- a/vendor/github.com/containers/image/image/manifest.go +++ /dev/null @@ -1,129 +0,0 @@ -package image - -import ( - "time" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/manifest" - "github.com/containers/image/pkg/strslice" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" -) - -type config struct { - Cmd strslice.StrSlice - Labels map[string]string -} - -type v1Image struct { - ID string `json:"id,omitempty"` - Parent string `json:"parent,omitempty"` - Comment string `json:"comment,omitempty"` - Created time.Time `json:"created"` - ContainerConfig *config `json:"container_config,omitempty"` - DockerVersion string `json:"docker_version,omitempty"` - Author string `json:"author,omitempty"` - // Config is the configuration of the container received from the client - Config *config `json:"config,omitempty"` - // Architecture is the hardware that the image is build and runs on - Architecture string `json:"architecture,omitempty"` - // OS is the operating system used to build and run the image - OS string `json:"os,omitempty"` -} - -type image struct { - v1Image - History []imageHistory `json:"history,omitempty"` - RootFS *rootFS `json:"rootfs,omitempty"` -} - -type imageHistory struct { - Created time.Time `json:"created"` - Author string `json:"author,omitempty"` - CreatedBy string `json:"created_by,omitempty"` - Comment string `json:"comment,omitempty"` - EmptyLayer bool `json:"empty_layer,omitempty"` -} - -type rootFS struct { - Type string `json:"type"` - DiffIDs []digest.Digest `json:"diff_ids,omitempty"` - BaseLayer string `json:"base_layer,omitempty"` -} - -// genericManifest is an interface for parsing, modifying image manifests and related data. -// Note that the public methods are intended to be a subset of types.Image -// so that embedding a genericManifest into structs works. -// will support v1 one day... -type genericManifest interface { - serialize() ([]byte, error) - manifestMIMEType() string - // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. - // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. - ConfigInfo() types.BlobInfo - // ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. - // The result is cached; it is OK to call this however often you need. - ConfigBlob() ([]byte, error) - // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about - // layers in the resulting configuration isn't guaranteed to be returned to due how - // old image manifests work (docker v2s1 especially). - OCIConfig() (*imgspecv1.Image, error) - // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). - // The Digest field is guaranteed to be provided; Size may be -1. - // WARNING: The list may contain duplicates, and they are semantically relevant. - LayerInfos() []types.BlobInfo - // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. - // It returns false if the manifest does not embed a Docker reference. - // (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) - EmbeddedDockerReferenceConflicts(ref reference.Named) bool - imageInspectInfo() (*types.ImageInspectInfo, error) // To be called by inspectManifest - // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. - // This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute - // (most importantly it forces us to download the full layers even if they are already present at the destination). - UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool - // UpdatedImage returns a types.Image modified according to options. - // This does not change the state of the original Image object. - UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) -} - -func manifestInstanceFromBlob(src types.ImageSource, manblob []byte, mt string) (genericManifest, error) { - switch mt { - // "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md . - // This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might - // need to happen within the ImageSource. - case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, "application/json": - return manifestSchema1FromManifest(manblob) - case imgspecv1.MediaTypeImageManifest: - return manifestOCI1FromManifest(src, manblob) - case manifest.DockerV2Schema2MediaType: - return manifestSchema2FromManifest(src, manblob) - case manifest.DockerV2ListMediaType: - return manifestSchema2FromManifestList(src, manblob) - default: - // If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time - // to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108 - // and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50 - // - // Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized” in the tag. - // This makes no real sense, but it happens - // because requests for manifests are - // redirected to a content distribution - // network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442 - return manifestSchema1FromManifest(manblob) - } -} - -// inspectManifest is an implementation of types.Image.Inspect -func inspectManifest(m genericManifest) (*types.ImageInspectInfo, error) { - info, err := m.imageInspectInfo() - if err != nil { - return nil, err - } - layers := m.LayerInfos() - info.Layers = make([]string, len(layers)) - for i, layer := range layers { - info.Layers[i] = layer.Digest.String() - } - return info, nil -} diff --git a/vendor/github.com/containers/image/image/memory.go b/vendor/github.com/containers/image/image/memory.go deleted file mode 100644 index 62995f6188f6..000000000000 --- a/vendor/github.com/containers/image/image/memory.go +++ /dev/null @@ -1,73 +0,0 @@ -package image - -import ( - "context" - - "github.com/pkg/errors" - - "github.com/containers/image/types" -) - -// memoryImage is a mostly-implementation of types.Image assembled from data -// created in memory, used primarily as a return value of types.Image.UpdatedImage -// as a way to carry various structured information in a type-safe and easy-to-use way. -// Note that this _only_ carries the immediate metadata; it is _not_ a stand-alone -// collection of all related information, e.g. there is no way to get layer blobs -// from a memoryImage. -type memoryImage struct { - genericManifest - serializedManifest []byte // A private cache for Manifest() -} - -func memoryImageFromManifest(m genericManifest) types.Image { - return &memoryImage{ - genericManifest: m, - serializedManifest: nil, - } -} - -// Reference returns the reference used to set up this source, _as specified by the user_ -// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. -func (i *memoryImage) Reference() types.ImageReference { - // It would really be inappropriate to return the ImageReference of the image this was based on. - return nil -} - -// Close removes resources associated with an initialized UnparsedImage, if any. -func (i *memoryImage) Close() error { - return nil -} - -// Size returns the size of the image as stored, if known, or -1 if not. -func (i *memoryImage) Size() (int64, error) { - return -1, nil -} - -// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. -func (i *memoryImage) Manifest() ([]byte, string, error) { - if i.serializedManifest == nil { - m, err := i.genericManifest.serialize() - if err != nil { - return nil, "", err - } - i.serializedManifest = m - } - return i.serializedManifest, i.genericManifest.manifestMIMEType(), nil -} - -// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. -func (i *memoryImage) Signatures(ctx context.Context) ([][]byte, error) { - // Modifying an image invalidates signatures; a caller asking the updated image for signatures - // is probably confused. - return nil, errors.New("Internal error: Image.Signatures() is not supported for images modified in memory") -} - -// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. -func (i *memoryImage) Inspect() (*types.ImageInspectInfo, error) { - return inspectManifest(i.genericManifest) -} - -// IsMultiImage returns true if the image's manifest is a list of images, false otherwise. -func (i *memoryImage) IsMultiImage() bool { - return false -} diff --git a/vendor/github.com/containers/image/image/oci.go b/vendor/github.com/containers/image/image/oci.go deleted file mode 100644 index 048387ec3e69..000000000000 --- a/vendor/github.com/containers/image/image/oci.go +++ /dev/null @@ -1,196 +0,0 @@ -package image - -import ( - "encoding/json" - "io/ioutil" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/manifest" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -type descriptorOCI1 struct { - descriptor - Annotations map[string]string `json:"annotations,omitempty"` -} - -type manifestOCI1 struct { - src types.ImageSource // May be nil if configBlob is not nil - configBlob []byte // If set, corresponds to contents of ConfigDescriptor. - SchemaVersion int `json:"schemaVersion"` - ConfigDescriptor descriptorOCI1 `json:"config"` - LayersDescriptors []descriptorOCI1 `json:"layers"` - Annotations map[string]string `json:"annotations,omitempty"` -} - -func manifestOCI1FromManifest(src types.ImageSource, manifest []byte) (genericManifest, error) { - oci := manifestOCI1{src: src} - if err := json.Unmarshal(manifest, &oci); err != nil { - return nil, err - } - return &oci, nil -} - -// manifestOCI1FromComponents builds a new manifestOCI1 from the supplied data: -func manifestOCI1FromComponents(config descriptorOCI1, src types.ImageSource, configBlob []byte, layers []descriptorOCI1) genericManifest { - return &manifestOCI1{ - src: src, - configBlob: configBlob, - SchemaVersion: 2, - ConfigDescriptor: config, - LayersDescriptors: layers, - } -} - -func (m *manifestOCI1) serialize() ([]byte, error) { - return json.Marshal(*m) -} - -func (m *manifestOCI1) manifestMIMEType() string { - return imgspecv1.MediaTypeImageManifest -} - -// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. -// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. -func (m *manifestOCI1) ConfigInfo() types.BlobInfo { - return types.BlobInfo{Digest: m.ConfigDescriptor.Digest, Size: m.ConfigDescriptor.Size} -} - -// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. -// The result is cached; it is OK to call this however often you need. -func (m *manifestOCI1) ConfigBlob() ([]byte, error) { - if m.configBlob == nil { - if m.src == nil { - return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1") - } - stream, _, err := m.src.GetBlob(types.BlobInfo{ - Digest: m.ConfigDescriptor.Digest, - Size: m.ConfigDescriptor.Size, - URLs: m.ConfigDescriptor.URLs, - }) - if err != nil { - return nil, err - } - defer stream.Close() - blob, err := ioutil.ReadAll(stream) - if err != nil { - return nil, err - } - computedDigest := digest.FromBytes(blob) - if computedDigest != m.ConfigDescriptor.Digest { - return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.ConfigDescriptor.Digest) - } - m.configBlob = blob - } - return m.configBlob, nil -} - -// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about -// layers in the resulting configuration isn't guaranteed to be returned to due how -// old image manifests work (docker v2s1 especially). -func (m *manifestOCI1) OCIConfig() (*imgspecv1.Image, error) { - cb, err := m.ConfigBlob() - if err != nil { - return nil, err - } - configOCI := &imgspecv1.Image{} - if err := json.Unmarshal(cb, configOCI); err != nil { - return nil, err - } - return configOCI, nil -} - -// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (m *manifestOCI1) LayerInfos() []types.BlobInfo { - blobs := []types.BlobInfo{} - for _, layer := range m.LayersDescriptors { - blobs = append(blobs, types.BlobInfo{Digest: layer.Digest, Size: layer.Size}) - } - return blobs -} - -// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. -// It returns false if the manifest does not embed a Docker reference. -// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) -func (m *manifestOCI1) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { - return false -} - -func (m *manifestOCI1) imageInspectInfo() (*types.ImageInspectInfo, error) { - config, err := m.ConfigBlob() - if err != nil { - return nil, err - } - v1 := &v1Image{} - if err := json.Unmarshal(config, v1); err != nil { - return nil, err - } - return &types.ImageInspectInfo{ - DockerVersion: v1.DockerVersion, - Created: v1.Created, - Labels: v1.Config.Labels, - Architecture: v1.Architecture, - Os: v1.OS, - }, nil -} - -// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. -// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute -// (most importantly it forces us to download the full layers even if they are already present at the destination). -func (m *manifestOCI1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { - return false -} - -// UpdatedImage returns a types.Image modified according to options. -// This does not change the state of the original Image object. -func (m *manifestOCI1) UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) { - copy := *m // NOTE: This is not a deep copy, it still shares slices etc. - if options.LayerInfos != nil { - if len(copy.LayersDescriptors) != len(options.LayerInfos) { - return nil, errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.LayersDescriptors), len(options.LayerInfos)) - } - copy.LayersDescriptors = make([]descriptorOCI1, len(options.LayerInfos)) - for i, info := range options.LayerInfos { - copy.LayersDescriptors[i].MediaType = m.LayersDescriptors[i].MediaType - copy.LayersDescriptors[i].Digest = info.Digest - copy.LayersDescriptors[i].Size = info.Size - } - } - // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1, but we really don't care. - - switch options.ManifestMIMEType { - case "": // No conversion, OK - case manifest.DockerV2Schema2MediaType: - return copy.convertToManifestSchema2() - default: - return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", imgspecv1.MediaTypeImageManifest, options.ManifestMIMEType) - } - - return memoryImageFromManifest(©), nil -} - -func (m *manifestOCI1) convertToManifestSchema2() (types.Image, error) { - // Create a copy of the descriptor. - config := m.ConfigDescriptor.descriptor - - // The only difference between OCI and DockerSchema2 is the mediatypes. The - // media type of the manifest is handled by manifestSchema2FromComponents. - config.MediaType = manifest.DockerV2Schema2ConfigMediaType - - layers := make([]descriptor, len(m.LayersDescriptors)) - for idx := range layers { - layers[idx] = m.LayersDescriptors[idx].descriptor - layers[idx].MediaType = manifest.DockerV2Schema2LayerMediaType - } - - // Rather than copying the ConfigBlob now, we just pass m.src to the - // translated manifest, since the only difference is the mediatype of - // descriptors there is no change to any blob stored in m.src. - m1 := manifestSchema2FromComponents(config, m.src, nil, layers) - return memoryImageFromManifest(m1), nil -} diff --git a/vendor/github.com/containers/image/image/oci_test.go b/vendor/github.com/containers/image/image/oci_test.go deleted file mode 100644 index 90fc5fbdeb9a..000000000000 --- a/vendor/github.com/containers/image/image/oci_test.go +++ /dev/null @@ -1,372 +0,0 @@ -package image - -import ( - "bytes" - "encoding/json" - "io" - "io/ioutil" - "path/filepath" - "testing" - "time" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/manifest" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func manifestOCI1FromFixture(t *testing.T, src types.ImageSource, fixture string) genericManifest { - manifest, err := ioutil.ReadFile(filepath.Join("fixtures", fixture)) - require.NoError(t, err) - - m, err := manifestOCI1FromManifest(src, manifest) - require.NoError(t, err) - return m -} - -func manifestOCI1FromComponentsLikeFixture(configBlob []byte) genericManifest { - return manifestOCI1FromComponents(descriptorOCI1{descriptor: descriptor{ - MediaType: imgspecv1.MediaTypeImageConfig, - Size: 5940, - Digest: "sha256:9ca4bda0a6b3727a6ffcc43e981cad0f24e2ec79d338f6ba325b4dfd0756fb8f", - }}, nil, configBlob, []descriptorOCI1{ - {descriptor: descriptor{ - MediaType: imgspecv1.MediaTypeImageLayerGzip, - Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb", - Size: 51354364, - }}, - {descriptor: descriptor{ - MediaType: imgspecv1.MediaTypeImageLayerGzip, - Digest: "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c", - Size: 150, - }}, - {descriptor: descriptor{ - MediaType: imgspecv1.MediaTypeImageLayerGzip, - Digest: "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9", - Size: 11739507, - }}, - {descriptor: descriptor{ - MediaType: imgspecv1.MediaTypeImageLayerGzip, - Digest: "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909", - Size: 8841833, - }}, - {descriptor: descriptor{ - MediaType: imgspecv1.MediaTypeImageLayerGzip, - Digest: "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa", - Size: 291, - }}, - }) -} - -func TestManifestOCI1FromManifest(t *testing.T) { - // This just tests that the JSON can be loaded; we test that the parsed - // values are correctly returned in tests for the individual getter methods. - _ = manifestOCI1FromFixture(t, unusedImageSource{}, "oci1.json") - - _, err := manifestOCI1FromManifest(nil, []byte{}) - assert.Error(t, err) -} - -func TestManifestOCI1FromComponents(t *testing.T) { - // This just smoke-tests that the manifest can be created; we test that the parsed - // values are correctly returned in tests for the individual getter methods. - _ = manifestOCI1FromComponentsLikeFixture(nil) -} - -func TestManifestOCI1Serialize(t *testing.T) { - for _, m := range []genericManifest{ - manifestOCI1FromFixture(t, unusedImageSource{}, "oci1.json"), - manifestOCI1FromComponentsLikeFixture(nil), - } { - serialized, err := m.serialize() - require.NoError(t, err) - var contents map[string]interface{} - err = json.Unmarshal(serialized, &contents) - require.NoError(t, err) - - original, err := ioutil.ReadFile("fixtures/oci1.json") - require.NoError(t, err) - var originalContents map[string]interface{} - err = json.Unmarshal(original, &originalContents) - require.NoError(t, err) - - // We would ideally like to compare “serialized” with some transformation of - // “original”, but the ordering of fields in JSON maps is undefined, so this is - // easier. - assert.Equal(t, originalContents, contents) - } -} - -func TestManifestOCI1ManifestMIMEType(t *testing.T) { - for _, m := range []genericManifest{ - manifestOCI1FromFixture(t, unusedImageSource{}, "oci1.json"), - manifestOCI1FromComponentsLikeFixture(nil), - } { - assert.Equal(t, imgspecv1.MediaTypeImageManifest, m.manifestMIMEType()) - } -} - -func TestManifestOCI1ConfigInfo(t *testing.T) { - for _, m := range []genericManifest{ - manifestOCI1FromFixture(t, unusedImageSource{}, "oci1.json"), - manifestOCI1FromComponentsLikeFixture(nil), - } { - assert.Equal(t, types.BlobInfo{ - Size: 5940, - Digest: "sha256:9ca4bda0a6b3727a6ffcc43e981cad0f24e2ec79d338f6ba325b4dfd0756fb8f", - }, m.ConfigInfo()) - } -} - -func TestManifestOCI1ConfigBlob(t *testing.T) { - realConfigJSON, err := ioutil.ReadFile("fixtures/oci1-config.json") - require.NoError(t, err) - - for _, c := range []struct { - cbISfn func(digest digest.Digest) (io.ReadCloser, int64, error) - blob []byte - }{ - // Success - {func(digest digest.Digest) (io.ReadCloser, int64, error) { - return ioutil.NopCloser(bytes.NewReader(realConfigJSON)), int64(len(realConfigJSON)), nil - }, realConfigJSON}, - // Various kinds of failures - {nil, nil}, - {func(digest digest.Digest) (io.ReadCloser, int64, error) { - return nil, -1, errors.New("Error returned from GetBlob") - }, nil}, - {func(digest digest.Digest) (io.ReadCloser, int64, error) { - reader, writer := io.Pipe() - writer.CloseWithError(errors.New("Expected error reading input in ConfigBlob")) - return reader, 1, nil - }, nil}, - {func(digest digest.Digest) (io.ReadCloser, int64, error) { - nonmatchingJSON := []byte("This does not match ConfigDescriptor.Digest") - return ioutil.NopCloser(bytes.NewReader(nonmatchingJSON)), int64(len(nonmatchingJSON)), nil - }, nil}, - } { - var src types.ImageSource - if c.cbISfn != nil { - src = configBlobImageSource{unusedImageSource{}, c.cbISfn} - } else { - src = nil - } - m := manifestOCI1FromFixture(t, src, "oci1.json") - blob, err := m.ConfigBlob() - if c.blob != nil { - assert.NoError(t, err) - assert.Equal(t, c.blob, blob) - } else { - assert.Error(t, err) - } - } - - // Generally conficBlob should match ConfigInfo; we don’t quite need it to, and this will - // guarantee that the returned object is returning the original contents instead - // of reading an object from elsewhere. - configBlob := []byte("config blob which does not match ConfigInfo") - // This just tests that the manifest can be created; we test that the parsed - // values are correctly returned in tests for the individual getter methods. - m := manifestOCI1FromComponentsLikeFixture(configBlob) - cb, err := m.ConfigBlob() - require.NoError(t, err) - assert.Equal(t, configBlob, cb) -} - -func TestManifestOCI1LayerInfo(t *testing.T) { - for _, m := range []genericManifest{ - manifestOCI1FromFixture(t, unusedImageSource{}, "oci1.json"), - manifestOCI1FromComponentsLikeFixture(nil), - } { - assert.Equal(t, []types.BlobInfo{ - { - Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb", - Size: 51354364, - }, - { - Digest: "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c", - Size: 150, - }, - { - Digest: "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9", - Size: 11739507, - }, - { - Digest: "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909", - Size: 8841833, - }, - { - Digest: "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa", - Size: 291, - }, - }, m.LayerInfos()) - } -} - -func TestManifestOCI1EmbeddedDockerReferenceConflicts(t *testing.T) { - for _, m := range []genericManifest{ - manifestOCI1FromFixture(t, unusedImageSource{}, "oci1.json"), - manifestOCI1FromComponentsLikeFixture(nil), - } { - for _, name := range []string{"busybox", "example.com:5555/ns/repo:tag"} { - ref, err := reference.ParseNormalizedNamed(name) - require.NoError(t, err) - conflicts := m.EmbeddedDockerReferenceConflicts(ref) - assert.False(t, conflicts) - } - } -} - -func TestManifestOCI1ImageInspectInfo(t *testing.T) { - configJSON, err := ioutil.ReadFile("fixtures/oci1-config.json") - require.NoError(t, err) - - m := manifestOCI1FromComponentsLikeFixture(configJSON) - ii, err := m.imageInspectInfo() - require.NoError(t, err) - assert.Equal(t, types.ImageInspectInfo{ - Tag: "", - Created: time.Date(2016, 9, 23, 23, 20, 45, 789764590, time.UTC), - DockerVersion: "1.12.1", - Labels: map[string]string{}, - Architecture: "amd64", - Os: "linux", - Layers: nil, - }, *ii) - - // nil configBlob will trigger an error in m.ConfigBlob() - m = manifestOCI1FromComponentsLikeFixture(nil) - _, err = m.imageInspectInfo() - assert.Error(t, err) - - m = manifestOCI1FromComponentsLikeFixture([]byte("invalid JSON")) - _, err = m.imageInspectInfo() - assert.Error(t, err) -} - -func TestManifestOCI1UpdatedImageNeedsLayerDiffIDs(t *testing.T) { - for _, m := range []genericManifest{ - manifestOCI1FromFixture(t, unusedImageSource{}, "oci1.json"), - manifestOCI1FromComponentsLikeFixture(nil), - } { - assert.False(t, m.UpdatedImageNeedsLayerDiffIDs(types.ManifestUpdateOptions{ - ManifestMIMEType: manifest.DockerV2Schema2MediaType, - })) - } -} - -// oci1ImageSource is plausible enough for schema conversions in manifestOCI1.UpdatedImage() to work. -type oci1ImageSource struct { - configBlobImageSource - ref reference.Named -} - -func (OCIis *oci1ImageSource) Reference() types.ImageReference { - return refImageReferenceMock{OCIis.ref} -} - -func newOCI1ImageSource(t *testing.T, dockerRef string) *oci1ImageSource { - realConfigJSON, err := ioutil.ReadFile("fixtures/oci1-config.json") - require.NoError(t, err) - - ref, err := reference.ParseNormalizedNamed(dockerRef) - require.NoError(t, err) - - return &oci1ImageSource{ - configBlobImageSource: configBlobImageSource{ - f: func(digest digest.Digest) (io.ReadCloser, int64, error) { - return ioutil.NopCloser(bytes.NewReader(realConfigJSON)), int64(len(realConfigJSON)), nil - }, - }, - ref: ref, - } -} - -func TestManifestOCI1UpdatedImage(t *testing.T) { - originalSrc := newOCI1ImageSource(t, "httpd:latest") - original := manifestOCI1FromFixture(t, originalSrc, "oci1.json") - - // LayerInfos: - layerInfos := append(original.LayerInfos()[1:], original.LayerInfos()[0]) - res, err := original.UpdatedImage(types.ManifestUpdateOptions{ - LayerInfos: layerInfos, - }) - require.NoError(t, err) - assert.Equal(t, layerInfos, res.LayerInfos()) - _, err = original.UpdatedImage(types.ManifestUpdateOptions{ - LayerInfos: append(layerInfos, layerInfos[0]), - }) - assert.Error(t, err) - - // EmbeddedDockerReference: - // … is ignored - embeddedRef, err := reference.ParseNormalizedNamed("busybox") - require.NoError(t, err) - res, err = original.UpdatedImage(types.ManifestUpdateOptions{ - EmbeddedDockerReference: embeddedRef, - }) - require.NoError(t, err) - nonEmbeddedRef, err := reference.ParseNormalizedNamed("notbusybox:notlatest") - require.NoError(t, err) - conflicts := res.EmbeddedDockerReferenceConflicts(nonEmbeddedRef) - assert.False(t, conflicts) - - // ManifestMIMEType: - // Only smoke-test the valid conversions, detailed tests are below. (This also verifies that “original” is not affected.) - for _, mime := range []string{ - manifest.DockerV2Schema2MediaType, - } { - _, err = original.UpdatedImage(types.ManifestUpdateOptions{ - ManifestMIMEType: mime, - InformationOnly: types.ManifestUpdateInformation{ - Destination: &memoryImageDest{ref: originalSrc.ref}, - }, - }) - assert.NoError(t, err, mime) - } - for _, mime := range []string{ - imgspecv1.MediaTypeImageManifest, // This indicates a confused caller, not a no-op. - "this is invalid", - } { - _, err = original.UpdatedImage(types.ManifestUpdateOptions{ - ManifestMIMEType: mime, - }) - assert.Error(t, err, mime) - } - - // m hasn’t been changed: - m2 := manifestOCI1FromFixture(t, originalSrc, "oci1.json") - typedOriginal, ok := original.(*manifestOCI1) - require.True(t, ok) - typedM2, ok := m2.(*manifestOCI1) - require.True(t, ok) - assert.Equal(t, *typedM2, *typedOriginal) -} - -func TestConvertToManifestSchema2(t *testing.T) { - originalSrc := newOCI1ImageSource(t, "httpd-copy:latest") - original := manifestOCI1FromFixture(t, originalSrc, "oci1.json") - res, err := original.UpdatedImage(types.ManifestUpdateOptions{ - ManifestMIMEType: manifest.DockerV2Schema2MediaType, - }) - require.NoError(t, err) - - convertedJSON, mt, err := res.Manifest() - require.NoError(t, err) - assert.Equal(t, manifest.DockerV2Schema2MediaType, mt) - - byHandJSON, err := ioutil.ReadFile("fixtures/oci1-to-schema2.json") - require.NoError(t, err) - var converted, byHand map[string]interface{} - err = json.Unmarshal(byHandJSON, &byHand) - require.NoError(t, err) - err = json.Unmarshal(convertedJSON, &converted) - require.NoError(t, err) - assert.Equal(t, byHand, converted) - - // FIXME? Test also the various failure cases, if only to see that we don't crash? -} diff --git a/vendor/github.com/containers/image/image/sourced.go b/vendor/github.com/containers/image/image/sourced.go deleted file mode 100644 index ef35b3c32aee..000000000000 --- a/vendor/github.com/containers/image/image/sourced.go +++ /dev/null @@ -1,90 +0,0 @@ -// Package image consolidates knowledge about various container image formats -// (as opposed to image storage mechanisms, which are handled by types.ImageSource) -// and exposes all of them using an unified interface. -package image - -import ( - "github.com/containers/image/manifest" - "github.com/containers/image/types" -) - -// FromSource returns a types.Image implementation for source. -// The caller must call .Close() on the returned Image. -// -// FromSource “takes ownership” of the input ImageSource and will call src.Close() -// when the image is closed. (This does not prevent callers from using both the -// Image and ImageSource objects simultaneously, but it means that they only need to -// the Image.) -// -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function. -func FromSource(src types.ImageSource) (types.Image, error) { - return FromUnparsedImage(UnparsedFromSource(src)) -} - -// sourcedImage is a general set of utilities for working with container images, -// whatever is their underlying location (i.e. dockerImageSource-independent). -// Note the existence of skopeo/docker.Image: some instances of a `types.Image` -// may not be a `sourcedImage` directly. However, most users of `types.Image` -// do not care, and those who care about `skopeo/docker.Image` know they do. -type sourcedImage struct { - *UnparsedImage - manifestBlob []byte - manifestMIMEType string - // genericManifest contains data corresponding to manifestBlob. - // NOTE: The manifest may have been modified in the process; DO NOT reserialize and store genericManifest - // if you want to preserve the original manifest; use manifestBlob directly. - genericManifest -} - -// FromUnparsedImage returns a types.Image implementation for unparsed. -// The caller must call .Close() on the returned Image. -// -// FromSource “takes ownership” of the input UnparsedImage and will call uparsed.Close() -// when the image is closed. (This does not prevent callers from using both the -// UnparsedImage and ImageSource objects simultaneously, but it means that they only need to -// keep a reference to the Image.) -func FromUnparsedImage(unparsed *UnparsedImage) (types.Image, error) { - // Note that the input parameter above is specifically *image.UnparsedImage, not types.UnparsedImage: - // we want to be able to use unparsed.src. We could make that an explicit interface, but, well, - // this is the only UnparsedImage implementation around, anyway. - - // Also, we do not explicitly implement types.Image.Close; we let the implementation fall through to - // unparsed.Close. - - // NOTE: It is essential for signature verification that all parsing done in this object happens on the same manifest which is returned by unparsed.Manifest(). - manifestBlob, manifestMIMEType, err := unparsed.Manifest() - if err != nil { - return nil, err - } - - parsedManifest, err := manifestInstanceFromBlob(unparsed.src, manifestBlob, manifestMIMEType) - if err != nil { - return nil, err - } - - return &sourcedImage{ - UnparsedImage: unparsed, - manifestBlob: manifestBlob, - manifestMIMEType: manifestMIMEType, - genericManifest: parsedManifest, - }, nil -} - -// Size returns the size of the image as stored, if it's known, or -1 if it isn't. -func (i *sourcedImage) Size() (int64, error) { - return -1, nil -} - -// Manifest overrides the UnparsedImage.Manifest to always use the fields which we have already fetched. -func (i *sourcedImage) Manifest() ([]byte, string, error) { - return i.manifestBlob, i.manifestMIMEType, nil -} - -func (i *sourcedImage) Inspect() (*types.ImageInspectInfo, error) { - return inspectManifest(i.genericManifest) -} - -func (i *sourcedImage) IsMultiImage() bool { - return i.manifestMIMEType == manifest.DockerV2ListMediaType -} diff --git a/vendor/github.com/containers/image/image/unparsed.go b/vendor/github.com/containers/image/image/unparsed.go deleted file mode 100644 index 483cfd04f141..000000000000 --- a/vendor/github.com/containers/image/image/unparsed.go +++ /dev/null @@ -1,85 +0,0 @@ -package image - -import ( - "context" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/manifest" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -// UnparsedImage implements types.UnparsedImage . -type UnparsedImage struct { - src types.ImageSource - cachedManifest []byte // A private cache for Manifest(); nil if not yet known. - // A private cache for Manifest(), may be the empty string if guessing failed. - // Valid iff cachedManifest is not nil. - cachedManifestMIMEType string - cachedSignatures [][]byte // A private cache for Signatures(); nil if not yet known. -} - -// UnparsedFromSource returns a types.UnparsedImage implementation for source. -// The caller must call .Close() on the returned UnparsedImage. -// -// UnparsedFromSource “takes ownership” of the input ImageSource and will call src.Close() -// when the image is closed. (This does not prevent callers from using both the -// UnparsedImage and ImageSource objects simultaneously, but it means that they only need to -// keep a reference to the UnparsedImage.) -func UnparsedFromSource(src types.ImageSource) *UnparsedImage { - return &UnparsedImage{src: src} -} - -// Reference returns the reference used to set up this source, _as specified by the user_ -// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. -func (i *UnparsedImage) Reference() types.ImageReference { - return i.src.Reference() -} - -// Close removes resources associated with an initialized UnparsedImage, if any. -func (i *UnparsedImage) Close() error { - return i.src.Close() -} - -// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. -func (i *UnparsedImage) Manifest() ([]byte, string, error) { - if i.cachedManifest == nil { - m, mt, err := i.src.GetManifest() - if err != nil { - return nil, "", err - } - - // ImageSource.GetManifest does not do digest verification, but we do; - // this immediately protects also any user of types.Image. - ref := i.Reference().DockerReference() - if ref != nil { - if canonical, ok := ref.(reference.Canonical); ok { - digest := digest.Digest(canonical.Digest()) - matches, err := manifest.MatchesDigest(m, digest) - if err != nil { - return nil, "", errors.Wrap(err, "Error computing manifest digest") - } - if !matches { - return nil, "", errors.Errorf("Manifest does not match provided manifest digest %s", digest) - } - } - } - - i.cachedManifest = m - i.cachedManifestMIMEType = mt - } - return i.cachedManifest, i.cachedManifestMIMEType, nil -} - -// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. -func (i *UnparsedImage) Signatures(ctx context.Context) ([][]byte, error) { - if i.cachedSignatures == nil { - sigs, err := i.src.GetSignatures(ctx) - if err != nil { - return nil, err - } - i.cachedSignatures = sigs - } - return i.cachedSignatures, nil -} diff --git a/vendor/github.com/containers/image/manifest/fixtures/non-json.manifest.json b/vendor/github.com/containers/image/manifest/fixtures/non-json.manifest.json deleted file mode 100644 index f89272127575..000000000000 Binary files a/vendor/github.com/containers/image/manifest/fixtures/non-json.manifest.json and /dev/null differ diff --git a/vendor/github.com/containers/image/manifest/fixtures/ociv1.image.index.json b/vendor/github.com/containers/image/manifest/fixtures/ociv1.image.index.json deleted file mode 100644 index 066f058db132..000000000000 --- a/vendor/github.com/containers/image/manifest/fixtures/ociv1.image.index.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "schemaVersion": 2, - "manifests": [ - { - "mediaType": "application/vnd.oci.image.manifest.v1+json", - "size": 7143, - "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", - "platform": { - "architecture": "ppc64le", - "os": "linux" - } - }, - { - "mediaType": "application/vnd.oci.image.manifest.v1+json", - "size": 7682, - "digest": "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270", - "platform": { - "architecture": "amd64", - "os": "linux", - "os.features": [ - "sse4" - ] - } - } - ], - "annotations": { - "com.example.key1": "value1", - "com.example.key2": "value2" - } -} diff --git a/vendor/github.com/containers/image/manifest/fixtures/ociv1.manifest.json b/vendor/github.com/containers/image/manifest/fixtures/ociv1.manifest.json deleted file mode 100644 index 1e1047ca7f3f..000000000000 --- a/vendor/github.com/containers/image/manifest/fixtures/ociv1.manifest.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "schemaVersion": 2, - "config": { - "mediaType": "application/vnd.oci.image.config.v1+json", - "size": 7023, - "digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7" - }, - "layers": [ - { - "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", - "size": 32654, - "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f" - }, - { - "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", - "size": 16724, - "digest": "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b" - }, - { - "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", - "size": 73109, - "digest": "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736" - } - ], - "annotations": { - "com.example.key1": "value1", - "com.example.key2": "value2" - } -} diff --git a/vendor/github.com/containers/image/manifest/fixtures/unknown-version.manifest.json b/vendor/github.com/containers/image/manifest/fixtures/unknown-version.manifest.json deleted file mode 100644 index b0f34b631c71..000000000000 --- a/vendor/github.com/containers/image/manifest/fixtures/unknown-version.manifest.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "schemaVersion": 99999, - "name": "mitr/noversion-nonsense", - "tag": "latest" -} diff --git a/vendor/github.com/containers/image/manifest/fixtures/v2list.manifest.json b/vendor/github.com/containers/image/manifest/fixtures/v2list.manifest.json deleted file mode 100644 index 1bf9896e04b8..000000000000 --- a/vendor/github.com/containers/image/manifest/fixtures/v2list.manifest.json +++ /dev/null @@ -1,56 +0,0 @@ -{ - "schemaVersion": 2, - "mediaType": "application/vnd.docker.distribution.manifest.list.v2+json", - "manifests": [ - { - "mediaType": "application/vnd.docker.distribution.manifest.v1+json", - "size": 2094, - "digest": "sha256:7820f9a86d4ad15a2c4f0c0e5479298df2aa7c2f6871288e2ef8546f3e7b6783", - "platform": { - "architecture": "ppc64le", - "os": "linux" - } - }, - { - "mediaType": "application/vnd.docker.distribution.manifest.v1+json", - "size": 1922, - "digest": "sha256:ae1b0e06e8ade3a11267564a26e750585ba2259c0ecab59ab165ad1af41d1bdd", - "platform": { - "architecture": "amd64", - "os": "linux", - "features": [ - "sse" - ] - } - }, - { - "mediaType": "application/vnd.docker.distribution.manifest.v1+json", - "size": 2084, - "digest": "sha256:e4c0df75810b953d6717b8f8f28298d73870e8aa2a0d5e77b8391f16fdfbbbe2", - "platform": { - "architecture": "s390x", - "os": "linux" - } - }, - { - "mediaType": "application/vnd.docker.distribution.manifest.v1+json", - "size": 2084, - "digest": "sha256:07ebe243465ef4a667b78154ae6c3ea46fdb1582936aac3ac899ea311a701b40", - "platform": { - "architecture": "arm", - "os": "linux", - "variant": "armv7" - } - }, - { - "mediaType": "application/vnd.docker.distribution.manifest.v1+json", - "size": 2090, - "digest": "sha256:fb2fc0707b86dafa9959fe3d29e66af8787aee4d9a23581714be65db4265ad8a", - "platform": { - "architecture": "arm64", - "os": "linux", - "variant": "armv8" - } - } - ] -} diff --git a/vendor/github.com/containers/image/manifest/fixtures/v2s1-invalid-signatures.manifest.json b/vendor/github.com/containers/image/manifest/fixtures/v2s1-invalid-signatures.manifest.json deleted file mode 100644 index 8dfefd4e1b71..000000000000 --- a/vendor/github.com/containers/image/manifest/fixtures/v2s1-invalid-signatures.manifest.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "schemaVersion": 1, - "name": "mitr/buxybox", - "tag": "latest", - "architecture": "amd64", - "fsLayers": [ - ], - "history": [ - ], - "signatures": 1 -} diff --git a/vendor/github.com/containers/image/manifest/fixtures/v2s1-unsigned.manifest.json b/vendor/github.com/containers/image/manifest/fixtures/v2s1-unsigned.manifest.json deleted file mode 100644 index 77cd6674fd74..000000000000 --- a/vendor/github.com/containers/image/manifest/fixtures/v2s1-unsigned.manifest.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "schemaVersion": 1, - "name": "mitr/buxybox", - "tag": "latest", - "architecture": "amd64", - "fsLayers": [ - { - "blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" - }, - { - "blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" - }, - { - "blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" - } - ], - "history": [ - { - "v1Compatibility": "{\"id\":\"f1b5eb0a1215f663765d509b6cdf3841bc2bcff0922346abb943d1342d469a97\",\"parent\":\"594075be8d003f784074cc639d970d1fa091a8197850baaae5052c01564ac535\",\"created\":\"2016-03-03T11:29:44.222098366Z\",\"container\":\"c0924f5b281a1992127d0afc065e59548ded8880b08aea4debd56d4497acb17a\",\"container_config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) LABEL Checksum=4fef81d30f31f9213c642881357e6662846a0f884c2366c13ebad807b4031368 ./tests/test-images/Dockerfile.2\"],\"Image\":\"594075be8d003f784074cc639d970d1fa091a8197850baaae5052c01564ac535\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{\"Checksum\":\"4fef81d30f31f9213c642881357e6662846a0f884c2366c13ebad807b4031368 ./tests/test-images/Dockerfile.2\",\"Name\":\"atomic-test-2\"}},\"docker_version\":\"1.8.2-fc22\",\"author\":\"\\\"William Temple \\u003cwtemple at redhat dot com\\u003e\\\"\",\"config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"594075be8d003f784074cc639d970d1fa091a8197850baaae5052c01564ac535\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{\"Checksum\":\"4fef81d30f31f9213c642881357e6662846a0f884c2366c13ebad807b4031368 ./tests/test-images/Dockerfile.2\",\"Name\":\"atomic-test-2\"}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" - }, - { - "v1Compatibility": "{\"id\":\"594075be8d003f784074cc639d970d1fa091a8197850baaae5052c01564ac535\",\"parent\":\"03dfa1cd1abe452bc2b69b8eb2362fa6beebc20893e65437906318954f6276d4\",\"created\":\"2016-03-03T11:29:38.563048924Z\",\"container\":\"fd4cf54dcd239fbae9bdade9db48e41880b436d27cb5313f60952a46ab04deff\",\"container_config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) LABEL Name=atomic-test-2\"],\"Image\":\"03dfa1cd1abe452bc2b69b8eb2362fa6beebc20893e65437906318954f6276d4\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{\"Name\":\"atomic-test-2\"}},\"docker_version\":\"1.8.2-fc22\",\"author\":\"\\\"William Temple \\u003cwtemple at redhat dot com\\u003e\\\"\",\"config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"03dfa1cd1abe452bc2b69b8eb2362fa6beebc20893e65437906318954f6276d4\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{\"Name\":\"atomic-test-2\"}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" - }, - { - "v1Compatibility": "{\"id\":\"03dfa1cd1abe452bc2b69b8eb2362fa6beebc20893e65437906318954f6276d4\",\"created\":\"2016-03-03T11:29:32.948089874Z\",\"container\":\"56f0fe1dfc95755dd6cda10f7215c9937a8d9c6348d079c581a261fd4c2f3a5f\",\"container_config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) MAINTAINER \\\"William Temple \\u003cwtemple at redhat dot com\\u003e\\\"\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.8.2-fc22\",\"author\":\"\\\"William Temple \\u003cwtemple at redhat dot com\\u003e\\\"\",\"config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/containers/image/manifest/fixtures/v2s1.manifest.json b/vendor/github.com/containers/image/manifest/fixtures/v2s1.manifest.json deleted file mode 100644 index d384e3e6e8a7..000000000000 --- a/vendor/github.com/containers/image/manifest/fixtures/v2s1.manifest.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "schemaVersion": 1, - "name": "mitr/buxybox", - "tag": "latest", - "architecture": "amd64", - "fsLayers": [ - { - "blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" - }, - { - "blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" - }, - { - "blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" - } - ], - "history": [ - { - "v1Compatibility": "{\"id\":\"f1b5eb0a1215f663765d509b6cdf3841bc2bcff0922346abb943d1342d469a97\",\"parent\":\"594075be8d003f784074cc639d970d1fa091a8197850baaae5052c01564ac535\",\"created\":\"2016-03-03T11:29:44.222098366Z\",\"container\":\"c0924f5b281a1992127d0afc065e59548ded8880b08aea4debd56d4497acb17a\",\"container_config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) LABEL Checksum=4fef81d30f31f9213c642881357e6662846a0f884c2366c13ebad807b4031368 ./tests/test-images/Dockerfile.2\"],\"Image\":\"594075be8d003f784074cc639d970d1fa091a8197850baaae5052c01564ac535\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{\"Checksum\":\"4fef81d30f31f9213c642881357e6662846a0f884c2366c13ebad807b4031368 ./tests/test-images/Dockerfile.2\",\"Name\":\"atomic-test-2\"}},\"docker_version\":\"1.8.2-fc22\",\"author\":\"\\\"William Temple \\u003cwtemple at redhat dot com\\u003e\\\"\",\"config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"594075be8d003f784074cc639d970d1fa091a8197850baaae5052c01564ac535\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{\"Checksum\":\"4fef81d30f31f9213c642881357e6662846a0f884c2366c13ebad807b4031368 ./tests/test-images/Dockerfile.2\",\"Name\":\"atomic-test-2\"}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" - }, - { - "v1Compatibility": "{\"id\":\"594075be8d003f784074cc639d970d1fa091a8197850baaae5052c01564ac535\",\"parent\":\"03dfa1cd1abe452bc2b69b8eb2362fa6beebc20893e65437906318954f6276d4\",\"created\":\"2016-03-03T11:29:38.563048924Z\",\"container\":\"fd4cf54dcd239fbae9bdade9db48e41880b436d27cb5313f60952a46ab04deff\",\"container_config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) LABEL Name=atomic-test-2\"],\"Image\":\"03dfa1cd1abe452bc2b69b8eb2362fa6beebc20893e65437906318954f6276d4\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{\"Name\":\"atomic-test-2\"}},\"docker_version\":\"1.8.2-fc22\",\"author\":\"\\\"William Temple \\u003cwtemple at redhat dot com\\u003e\\\"\",\"config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"03dfa1cd1abe452bc2b69b8eb2362fa6beebc20893e65437906318954f6276d4\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{\"Name\":\"atomic-test-2\"}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" - }, - { - "v1Compatibility": "{\"id\":\"03dfa1cd1abe452bc2b69b8eb2362fa6beebc20893e65437906318954f6276d4\",\"created\":\"2016-03-03T11:29:32.948089874Z\",\"container\":\"56f0fe1dfc95755dd6cda10f7215c9937a8d9c6348d079c581a261fd4c2f3a5f\",\"container_config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) MAINTAINER \\\"William Temple \\u003cwtemple at redhat dot com\\u003e\\\"\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.8.2-fc22\",\"author\":\"\\\"William Temple \\u003cwtemple at redhat dot com\\u003e\\\"\",\"config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" - } - ], - "signatures": [ - { - "header": { - "jwk": { - "crv": "P-256", - "kid": "OZ45:U3IG:TDOI:PMBD:NGP2:LDIW:II2U:PSBI:MMCZ:YZUP:TUUO:XPZT", - "kty": "EC", - "x": "ReC5c0J9tgXSdUL4_xzEt5RsD8kFt2wWSgJcpAcOQx8", - "y": "3sBGEqQ3ZMeqPKwQBAadN2toOUEASha18xa0WwsDF-M" - }, - "alg": "ES256" - }, - "signature": "dV1paJ3Ck1Ph4FcEhg_frjqxdlGdI6-ywRamk6CvMOcaOEUdCWCpCPQeBQpD2N6tGjkoG1BbstkFNflllfenCw", - "protected": "eyJmb3JtYXRMZW5ndGgiOjU0NzgsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNi0wNC0xOFQyMDo1NDo0MloifQ" - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/containers/image/manifest/fixtures/v2s2.manifest.json b/vendor/github.com/containers/image/manifest/fixtures/v2s2.manifest.json deleted file mode 100644 index 198da23f9264..000000000000 --- a/vendor/github.com/containers/image/manifest/fixtures/v2s2.manifest.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "schemaVersion": 2, - "mediaType": "application/vnd.docker.distribution.manifest.v2+json", - "config": { - "mediaType": "application/vnd.docker.container.image.v1+json", - "size": 7023, - "digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7" - }, - "layers": [ - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "size": 32654, - "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f" - }, - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "size": 16724, - "digest": "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b" - }, - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "size": 73109, - "digest": "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736" - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/containers/image/manifest/fixtures/v2s2nomime.manifest.json b/vendor/github.com/containers/image/manifest/fixtures/v2s2nomime.manifest.json deleted file mode 100644 index a0b06c233b52..000000000000 --- a/vendor/github.com/containers/image/manifest/fixtures/v2s2nomime.manifest.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "schemaVersion": 2, - "config": { - "mediaType": "application/vnd.docker.container.image.v1+json", - "size": 7023, - "digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7" - }, - "layers": [ - ] -} diff --git a/vendor/github.com/containers/image/manifest/fixtures_info_test.go b/vendor/github.com/containers/image/manifest/fixtures_info_test.go deleted file mode 100644 index 2607125be9c9..000000000000 --- a/vendor/github.com/containers/image/manifest/fixtures_info_test.go +++ /dev/null @@ -1,12 +0,0 @@ -package manifest - -import "github.com/opencontainers/go-digest" - -const ( - // TestV2S2ManifestDigest is the Docker manifest digest of "v2s2.manifest.json" - TestDockerV2S2ManifestDigest = digest.Digest("sha256:20bf21ed457b390829cdbeec8795a7bea1626991fda603e0d01b4e7f60427e55") - // TestV2S1ManifestDigest is the Docker manifest digest of "v2s1.manifest.json" - TestDockerV2S1ManifestDigest = digest.Digest("sha256:077594da70fc17ec2c93cfa4e6ed1fcc26992851fb2c71861338aaf4aa9e41b1") - // TestV2S1UnsignedManifestDigest is the Docker manifest digest of "v2s1unsigned.manifest.json" - TestDockerV2S1UnsignedManifestDigest = digest.Digest("sha256:077594da70fc17ec2c93cfa4e6ed1fcc26992851fb2c71861338aaf4aa9e41b1") -) diff --git a/vendor/github.com/containers/image/manifest/manifest.go b/vendor/github.com/containers/image/manifest/manifest.go deleted file mode 100644 index 605bab1db786..000000000000 --- a/vendor/github.com/containers/image/manifest/manifest.go +++ /dev/null @@ -1,144 +0,0 @@ -package manifest - -import ( - "encoding/json" - - "github.com/docker/libtrust" - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" -) - -// FIXME: Should we just use docker/distribution and docker/docker implementations directly? - -// FIXME(runcom, mitr): should we havea mediatype pkg?? -const ( - // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 - DockerV2Schema1MediaType = "application/vnd.docker.distribution.manifest.v1+json" - // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 with a JWS signature - DockerV2Schema1SignedMediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws" - // DockerV2Schema2MediaType MIME type represents Docker manifest schema 2 - DockerV2Schema2MediaType = "application/vnd.docker.distribution.manifest.v2+json" - // DockerV2Schema2ConfigMediaType is the MIME type used for schema 2 config blobs. - DockerV2Schema2ConfigMediaType = "application/vnd.docker.container.image.v1+json" - // DockerV2Schema2LayerMediaType is the MIME type used for schema 2 layers. - DockerV2Schema2LayerMediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip" - // DockerV2ListMediaType MIME type represents Docker manifest schema 2 list - DockerV2ListMediaType = "application/vnd.docker.distribution.manifest.list.v2+json" - // DockerV2Schema2ForeignLayerMediaType is the MIME type used for schema 2 foreign layers. - DockerV2Schema2ForeignLayerMediaType = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" -) - -// DefaultRequestedManifestMIMETypes is a list of MIME types a types.ImageSource -// should request from the backend unless directed otherwise. -var DefaultRequestedManifestMIMETypes = []string{ - imgspecv1.MediaTypeImageManifest, - DockerV2Schema2MediaType, - DockerV2Schema1SignedMediaType, - DockerV2Schema1MediaType, - DockerV2ListMediaType, -} - -// GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized. -// FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest, -// but we may not have such metadata available (e.g. when the manifest is a local file). -func GuessMIMEType(manifest []byte) string { - // A subset of manifest fields; the rest is silently ignored by json.Unmarshal. - // Also docker/distribution/manifest.Versioned. - meta := struct { - MediaType string `json:"mediaType"` - SchemaVersion int `json:"schemaVersion"` - Signatures interface{} `json:"signatures"` - }{} - if err := json.Unmarshal(manifest, &meta); err != nil { - return "" - } - - switch meta.MediaType { - case DockerV2Schema2MediaType, DockerV2ListMediaType: // A recognized type. - return meta.MediaType - } - // this is the only way the function can return DockerV2Schema1MediaType, and recognizing that is essential for stripping the JWS signatures = computing the correct manifest digest. - switch meta.SchemaVersion { - case 1: - if meta.Signatures != nil { - return DockerV2Schema1SignedMediaType - } - return DockerV2Schema1MediaType - case 2: - // best effort to understand if this is an OCI image since mediaType - // isn't in the manifest for OCI anymore - // for docker v2s2 meta.MediaType should have been set. But given the data, this is our best guess. - ociMan := struct { - Config struct { - MediaType string `json:"mediaType"` - } `json:"config"` - Layers []imgspecv1.Descriptor `json:"layers"` - }{} - if err := json.Unmarshal(manifest, &ociMan); err != nil { - return "" - } - if ociMan.Config.MediaType == imgspecv1.MediaTypeImageConfig && len(ociMan.Layers) != 0 { - return imgspecv1.MediaTypeImageManifest - } - ociIndex := struct { - Manifests []imgspecv1.Descriptor `json:"manifests"` - }{} - if err := json.Unmarshal(manifest, &ociIndex); err != nil { - return "" - } - if len(ociIndex.Manifests) != 0 && ociIndex.Manifests[0].MediaType == imgspecv1.MediaTypeImageManifest { - return imgspecv1.MediaTypeImageIndex - } - return DockerV2Schema2MediaType - } - return "" -} - -// Digest returns the a digest of a docker manifest, with any necessary implied transformations like stripping v1s1 signatures. -func Digest(manifest []byte) (digest.Digest, error) { - if GuessMIMEType(manifest) == DockerV2Schema1SignedMediaType { - sig, err := libtrust.ParsePrettySignature(manifest, "signatures") - if err != nil { - return "", err - } - manifest, err = sig.Payload() - if err != nil { - // Coverage: This should never happen, libtrust's Payload() can fail only if joseBase64UrlDecode() fails, on a string - // that libtrust itself has josebase64UrlEncode()d - return "", err - } - } - - return digest.FromBytes(manifest), nil -} - -// MatchesDigest returns true iff the manifest matches expectedDigest. -// Error may be set if this returns false. -// Note that this is not doing ConstantTimeCompare; by the time we get here, the cryptographic signature must already have been verified, -// or we are not using a cryptographic channel and the attacker can modify the digest along with the manifest blob. -func MatchesDigest(manifest []byte, expectedDigest digest.Digest) (bool, error) { - // This should eventually support various digest types. - actualDigest, err := Digest(manifest) - if err != nil { - return false, err - } - return expectedDigest == actualDigest, nil -} - -// AddDummyV2S1Signature adds an JWS signature with a temporary key (i.e. useless) to a v2s1 manifest. -// This is useful to make the manifest acceptable to a Docker Registry (even though nothing needs or wants the JWS signature). -func AddDummyV2S1Signature(manifest []byte) ([]byte, error) { - key, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - return nil, err // Coverage: This can fail only if rand.Reader fails. - } - - js, err := libtrust.NewJSONSignature(manifest) - if err != nil { - return nil, err - } - if err := js.Sign(key); err != nil { // Coverage: This can fail basically only if rand.Reader fails. - return nil, err - } - return js.PrettySignature("signatures") -} diff --git a/vendor/github.com/containers/image/manifest/manifest_test.go b/vendor/github.com/containers/image/manifest/manifest_test.go deleted file mode 100644 index 97febcc2d8f4..000000000000 --- a/vendor/github.com/containers/image/manifest/manifest_test.go +++ /dev/null @@ -1,125 +0,0 @@ -package manifest - -import ( - "io/ioutil" - "path/filepath" - "testing" - - "github.com/docker/libtrust" - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -const ( - digestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" -) - -func TestGuessMIMEType(t *testing.T) { - cases := []struct { - path string - mimeType string - }{ - {"v2s2.manifest.json", DockerV2Schema2MediaType}, - {"v2list.manifest.json", DockerV2ListMediaType}, - {"v2s1.manifest.json", DockerV2Schema1SignedMediaType}, - {"v2s1-unsigned.manifest.json", DockerV2Schema1MediaType}, - {"v2s1-invalid-signatures.manifest.json", DockerV2Schema1SignedMediaType}, - {"v2s2nomime.manifest.json", DockerV2Schema2MediaType}, // It is unclear whether this one is legal, but we should guess v2s2 if anything at all. - {"unknown-version.manifest.json", ""}, - {"non-json.manifest.json", ""}, // Not a manifest (nor JSON) at all - {"ociv1.manifest.json", imgspecv1.MediaTypeImageManifest}, - {"ociv1.image.index.json", imgspecv1.MediaTypeImageIndex}, - } - - for _, c := range cases { - manifest, err := ioutil.ReadFile(filepath.Join("fixtures", c.path)) - require.NoError(t, err) - mimeType := GuessMIMEType(manifest) - assert.Equal(t, c.mimeType, mimeType, c.path) - } -} - -func TestDigest(t *testing.T) { - cases := []struct { - path string - expectedDigest digest.Digest - }{ - {"v2s2.manifest.json", TestDockerV2S2ManifestDigest}, - {"v2s1.manifest.json", TestDockerV2S1ManifestDigest}, - {"v2s1-unsigned.manifest.json", TestDockerV2S1UnsignedManifestDigest}, - } - for _, c := range cases { - manifest, err := ioutil.ReadFile(filepath.Join("fixtures", c.path)) - require.NoError(t, err) - actualDigest, err := Digest(manifest) - require.NoError(t, err) - assert.Equal(t, c.expectedDigest, actualDigest) - } - - manifest, err := ioutil.ReadFile("fixtures/v2s1-invalid-signatures.manifest.json") - require.NoError(t, err) - actualDigest, err := Digest(manifest) - assert.Error(t, err) - - actualDigest, err = Digest([]byte{}) - require.NoError(t, err) - assert.Equal(t, digest.Digest(digestSha256EmptyTar), actualDigest) -} - -func TestMatchesDigest(t *testing.T) { - cases := []struct { - path string - expectedDigest digest.Digest - result bool - }{ - // Success - {"v2s2.manifest.json", TestDockerV2S2ManifestDigest, true}, - {"v2s1.manifest.json", TestDockerV2S1ManifestDigest, true}, - // No match (switched s1/s2) - {"v2s2.manifest.json", TestDockerV2S1ManifestDigest, false}, - {"v2s1.manifest.json", TestDockerV2S2ManifestDigest, false}, - // Unrecognized algorithm - {"v2s2.manifest.json", digest.Digest("md5:2872f31c5c1f62a694fbd20c1e85257c"), false}, - // Mangled format - {"v2s2.manifest.json", digest.Digest(TestDockerV2S2ManifestDigest.String() + "abc"), false}, - {"v2s2.manifest.json", digest.Digest(TestDockerV2S2ManifestDigest.String()[:20]), false}, - {"v2s2.manifest.json", digest.Digest(""), false}, - } - for _, c := range cases { - manifest, err := ioutil.ReadFile(filepath.Join("fixtures", c.path)) - require.NoError(t, err) - res, err := MatchesDigest(manifest, c.expectedDigest) - require.NoError(t, err) - assert.Equal(t, c.result, res) - } - - manifest, err := ioutil.ReadFile("fixtures/v2s1-invalid-signatures.manifest.json") - require.NoError(t, err) - // Even a correct SHA256 hash is rejected if we can't strip the JSON signature. - res, err := MatchesDigest(manifest, digest.FromBytes(manifest)) - assert.False(t, res) - assert.Error(t, err) - - res, err = MatchesDigest([]byte{}, digest.Digest(digestSha256EmptyTar)) - assert.True(t, res) - assert.NoError(t, err) -} - -func TestAddDummyV2S1Signature(t *testing.T) { - manifest, err := ioutil.ReadFile("fixtures/v2s1-unsigned.manifest.json") - require.NoError(t, err) - - signedManifest, err := AddDummyV2S1Signature(manifest) - require.NoError(t, err) - - sig, err := libtrust.ParsePrettySignature(signedManifest, "signatures") - require.NoError(t, err) - signaturePayload, err := sig.Payload() - require.NoError(t, err) - assert.Equal(t, manifest, signaturePayload) - - _, err = AddDummyV2S1Signature([]byte("}this is invalid JSON")) - assert.Error(t, err) -} diff --git a/vendor/github.com/containers/image/oci/layout/oci_dest.go b/vendor/github.com/containers/image/oci/layout/oci_dest.go deleted file mode 100644 index 0536730985b0..000000000000 --- a/vendor/github.com/containers/image/oci/layout/oci_dest.go +++ /dev/null @@ -1,226 +0,0 @@ -package layout - -import ( - "encoding/json" - "io" - "io/ioutil" - "os" - "path/filepath" - "runtime" - - "github.com/pkg/errors" - - "github.com/containers/image/manifest" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - imgspec "github.com/opencontainers/image-spec/specs-go" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" -) - -type ociImageDestination struct { - ref ociReference - index imgspecv1.Index -} - -// newImageDestination returns an ImageDestination for writing to an existing directory. -func newImageDestination(ref ociReference) types.ImageDestination { - index := imgspecv1.Index{ - Versioned: imgspec.Versioned{ - SchemaVersion: 2, - }, - } - return &ociImageDestination{ref: ref, index: index} -} - -// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, -// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. -func (d *ociImageDestination) Reference() types.ImageReference { - return d.ref -} - -// Close removes resources associated with an initialized ImageDestination, if any. -func (d *ociImageDestination) Close() error { - return nil -} - -func (d *ociImageDestination) SupportedManifestMIMETypes() []string { - return []string{ - imgspecv1.MediaTypeImageManifest, - } -} - -// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. -// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. -func (d *ociImageDestination) SupportsSignatures() error { - return errors.Errorf("Pushing signatures for OCI images is not supported") -} - -// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination. -func (d *ociImageDestination) ShouldCompressLayers() bool { - return true -} - -// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually -// uploaded to the image destination, true otherwise. -func (d *ociImageDestination) AcceptsForeignLayerURLs() bool { - return false -} - -// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. -func (d *ociImageDestination) MustMatchRuntimeOS() bool { - return false -} - -// PutBlob writes contents of stream and returns data representing the result (with all data filled in). -// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. -// inputInfo.Size is the expected length of stream, if known. -// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available -// to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *ociImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) { - if err := ensureDirectoryExists(d.ref.dir); err != nil { - return types.BlobInfo{}, err - } - blobFile, err := ioutil.TempFile(d.ref.dir, "oci-put-blob") - if err != nil { - return types.BlobInfo{}, err - } - succeeded := false - defer func() { - blobFile.Close() - if !succeeded { - os.Remove(blobFile.Name()) - } - }() - - digester := digest.Canonical.Digester() - tee := io.TeeReader(stream, digester.Hash()) - - size, err := io.Copy(blobFile, tee) - if err != nil { - return types.BlobInfo{}, err - } - computedDigest := digester.Digest() - if inputInfo.Size != -1 && size != inputInfo.Size { - return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size) - } - if err := blobFile.Sync(); err != nil { - return types.BlobInfo{}, err - } - if err := blobFile.Chmod(0644); err != nil { - return types.BlobInfo{}, err - } - - blobPath, err := d.ref.blobPath(computedDigest) - if err != nil { - return types.BlobInfo{}, err - } - if err := ensureParentDirectoryExists(blobPath); err != nil { - return types.BlobInfo{}, err - } - if err := os.Rename(blobFile.Name(), blobPath); err != nil { - return types.BlobInfo{}, err - } - succeeded = true - return types.BlobInfo{Digest: computedDigest, Size: size}, nil -} - -// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob. -// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned. -// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil); -// it returns a non-nil error only on an unexpected failure. -func (d *ociImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) { - if info.Digest == "" { - return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`) - } - blobPath, err := d.ref.blobPath(info.Digest) - if err != nil { - return false, -1, err - } - finfo, err := os.Stat(blobPath) - if err != nil && os.IsNotExist(err) { - return false, -1, nil - } - if err != nil { - return false, -1, err - } - return true, finfo.Size(), nil -} - -func (d *ociImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) { - return info, nil -} - -// PutManifest writes manifest to the destination. -// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. -// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), -// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. -func (d *ociImageDestination) PutManifest(m []byte) error { - digest, err := manifest.Digest(m) - if err != nil { - return err - } - desc := imgspecv1.Descriptor{} - desc.Digest = digest - // TODO(runcom): beaware and add support for OCI manifest list - desc.MediaType = imgspecv1.MediaTypeImageManifest - desc.Size = int64(len(m)) - - blobPath, err := d.ref.blobPath(digest) - if err != nil { - return err - } - if err := ensureParentDirectoryExists(blobPath); err != nil { - return err - } - if err := ioutil.WriteFile(blobPath, m, 0644); err != nil { - return err - } - - annotations := make(map[string]string) - annotations["org.opencontainers.image.ref.name"] = d.ref.tag - desc.Annotations = annotations - desc.Platform = &imgspecv1.Platform{ - Architecture: runtime.GOARCH, - OS: runtime.GOOS, - } - d.index.Manifests = append(d.index.Manifests, desc) - - return nil -} - -func ensureDirectoryExists(path string) error { - if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { - if err := os.MkdirAll(path, 0755); err != nil { - return err - } - } - return nil -} - -// ensureParentDirectoryExists ensures the parent of the supplied path exists. -func ensureParentDirectoryExists(path string) error { - return ensureDirectoryExists(filepath.Dir(path)) -} - -func (d *ociImageDestination) PutSignatures(signatures [][]byte) error { - if len(signatures) != 0 { - return errors.Errorf("Pushing signatures for OCI images is not supported") - } - return nil -} - -// Commit marks the process of storing the image as successful and asks for the image to be persisted. -// WARNING: This does not have any transactional semantics: -// - Uploaded data MAY be visible to others before Commit() is called -// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) -func (d *ociImageDestination) Commit() error { - if err := ioutil.WriteFile(d.ref.ociLayoutPath(), []byte(`{"imageLayoutVersion": "1.0.0"}`), 0644); err != nil { - return err - } - indexJSON, err := json.Marshal(d.index) - if err != nil { - return err - } - return ioutil.WriteFile(d.ref.indexPath(), indexJSON, 0644) -} diff --git a/vendor/github.com/containers/image/oci/layout/oci_dest_test.go b/vendor/github.com/containers/image/oci/layout/oci_dest_test.go deleted file mode 100644 index 9767f94f3a28..000000000000 --- a/vendor/github.com/containers/image/oci/layout/oci_dest_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package layout - -import ( - "os" - "testing" - - "github.com/containers/image/types" - "github.com/pkg/errors" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// readerFromFunc allows implementing Reader by any function, e.g. a closure. -type readerFromFunc func([]byte) (int, error) - -func (fn readerFromFunc) Read(p []byte) (int, error) { - return fn(p) -} - -// TestPutBlobDigestFailure simulates behavior on digest verification failure. -func TestPutBlobDigestFailure(t *testing.T) { - const digestErrorString = "Simulated digest error" - const blobDigest = "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f" - - ref, tmpDir := refToTempOCI(t) - defer os.RemoveAll(tmpDir) - dirRef, ok := ref.(ociReference) - require.True(t, ok) - blobPath, err := dirRef.blobPath(blobDigest) - assert.NoError(t, err) - - firstRead := true - reader := readerFromFunc(func(p []byte) (int, error) { - _, err := os.Lstat(blobPath) - require.Error(t, err) - require.True(t, os.IsNotExist(err)) - if firstRead { - if len(p) > 0 { - firstRead = false - } - for i := 0; i < len(p); i++ { - p[i] = 0xAA - } - return len(p), nil - } - return 0, errors.Errorf(digestErrorString) - }) - - dest, err := ref.NewImageDestination(nil) - require.NoError(t, err) - defer dest.Close() - _, err = dest.PutBlob(reader, types.BlobInfo{Digest: blobDigest, Size: -1}) - assert.Error(t, err) - assert.Contains(t, digestErrorString, err.Error()) - err = dest.Commit() - assert.NoError(t, err) - - _, err = os.Lstat(blobPath) - require.Error(t, err) - require.True(t, os.IsNotExist(err)) -} diff --git a/vendor/github.com/containers/image/oci/layout/oci_src.go b/vendor/github.com/containers/image/oci/layout/oci_src.go deleted file mode 100644 index 99b9f2083acf..000000000000 --- a/vendor/github.com/containers/image/oci/layout/oci_src.go +++ /dev/null @@ -1,91 +0,0 @@ -package layout - -import ( - "context" - "io" - "io/ioutil" - "os" - - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" -) - -type ociImageSource struct { - ref ociReference - descriptor imgspecv1.Descriptor -} - -// newImageSource returns an ImageSource for reading from an existing directory. -func newImageSource(ref ociReference) (types.ImageSource, error) { - descriptor, err := ref.getManifestDescriptor() - if err != nil { - return nil, err - } - return &ociImageSource{ref: ref, descriptor: descriptor}, nil -} - -// Reference returns the reference used to set up this source. -func (s *ociImageSource) Reference() types.ImageReference { - return s.ref -} - -// Close removes resources associated with an initialized ImageSource, if any. -func (s *ociImageSource) Close() error { - return nil -} - -// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). -// It may use a remote (= slow) service. -func (s *ociImageSource) GetManifest() ([]byte, string, error) { - manifestPath, err := s.ref.blobPath(digest.Digest(s.descriptor.Digest)) - if err != nil { - return nil, "", err - } - m, err := ioutil.ReadFile(manifestPath) - if err != nil { - return nil, "", err - } - - return m, s.descriptor.MediaType, nil -} - -func (s *ociImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { - manifestPath, err := s.ref.blobPath(digest) - if err != nil { - return nil, "", err - } - - m, err := ioutil.ReadFile(manifestPath) - if err != nil { - return nil, "", err - } - - // XXX: GetTargetManifest means that we don't have the context of what - // mediaType the manifest has. In OCI this means that we don't know - // what reference it came from, so we just *assume* that its - // MediaTypeImageManifest. - return m, imgspecv1.MediaTypeImageManifest, nil -} - -// GetBlob returns a stream for the specified blob, and the blob's size. -func (s *ociImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) { - path, err := s.ref.blobPath(info.Digest) - if err != nil { - return nil, 0, err - } - - r, err := os.Open(path) - if err != nil { - return nil, 0, err - } - fi, err := r.Stat() - if err != nil { - return nil, 0, err - } - return r, fi.Size(), nil -} - -func (s *ociImageSource) GetSignatures(context.Context) ([][]byte, error) { - return [][]byte{}, nil -} diff --git a/vendor/github.com/containers/image/oci/layout/oci_transport.go b/vendor/github.com/containers/image/oci/layout/oci_transport.go deleted file mode 100644 index f1fc1071399f..000000000000 --- a/vendor/github.com/containers/image/oci/layout/oci_transport.go +++ /dev/null @@ -1,254 +0,0 @@ -package layout - -import ( - "encoding/json" - "fmt" - "os" - "path/filepath" - "regexp" - "strings" - - "github.com/containers/image/directory/explicitfilepath" - "github.com/containers/image/docker/reference" - "github.com/containers/image/image" - "github.com/containers/image/transports" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -func init() { - transports.Register(Transport) -} - -// Transport is an ImageTransport for OCI directories. -var Transport = ociTransport{} - -type ociTransport struct{} - -func (t ociTransport) Name() string { - return "oci" -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. -func (t ociTransport) ParseReference(reference string) (types.ImageReference, error) { - return ParseReference(reference) -} - -var refRegexp = regexp.MustCompile(`^([A-Za-z0-9._-]+)+$`) - -// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys -// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). -// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. -// scope passed to this function will not be "", that value is always allowed. -func (t ociTransport) ValidatePolicyConfigurationScope(scope string) error { - var dir string - sep := strings.LastIndex(scope, ":") - if sep == -1 { - dir = scope - } else { - dir = scope[:sep] - tag := scope[sep+1:] - if !refRegexp.MatchString(tag) { - return errors.Errorf("Invalid tag %s", tag) - } - } - - if strings.Contains(dir, ":") { - return errors.Errorf("Invalid OCI reference %s: path contains a colon", scope) - } - - if !strings.HasPrefix(dir, "/") { - return errors.Errorf("Invalid scope %s: must be an absolute path", scope) - } - // Refuse also "/", otherwise "/" and "" would have the same semantics, - // and "" could be unexpectedly shadowed by the "/" entry. - // (Note: we do allow "/:sometag", a bit ridiculous but why refuse it?) - if scope == "/" { - return errors.New(`Invalid scope "/": Use the generic default scope ""`) - } - cleaned := filepath.Clean(dir) - if cleaned != dir { - return errors.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned) - } - return nil -} - -// ociReference is an ImageReference for OCI directory paths. -type ociReference struct { - // Note that the interpretation of paths below depends on the underlying filesystem state, which may change under us at any time! - // Either of the paths may point to a different, or no, inode over time. resolvedDir may contain symbolic links, and so on. - - // Generally we follow the intent of the user, and use the "dir" member for filesystem operations (e.g. the user can use a relative path to avoid - // being exposed to symlinks and renames in the parent directories to the working directory). - // (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.) - dir string // As specified by the user. May be relative, contain symlinks, etc. - resolvedDir string // Absolute path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces. - tag string -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference. -func ParseReference(reference string) (types.ImageReference, error) { - var dir, tag string - sep := strings.LastIndex(reference, ":") - if sep == -1 { - dir = reference - tag = "latest" - } else { - dir = reference[:sep] - tag = reference[sep+1:] - } - return NewReference(dir, tag) -} - -// NewReference returns an OCI reference for a directory and a tag. -// -// We do not expose an API supplying the resolvedDir; we could, but recomputing it -// is generally cheap enough that we prefer being confident about the properties of resolvedDir. -func NewReference(dir, tag string) (types.ImageReference, error) { - resolved, err := explicitfilepath.ResolvePathToFullyExplicit(dir) - if err != nil { - return nil, err - } - // This is necessary to prevent directory paths returned by PolicyConfigurationNamespaces - // from being ambiguous with values of PolicyConfigurationIdentity. - if strings.Contains(resolved, ":") { - return nil, errors.Errorf("Invalid OCI reference %s:%s: path %s contains a colon", dir, tag, resolved) - } - if !refRegexp.MatchString(tag) { - return nil, errors.Errorf("Invalid tag %s", tag) - } - return ociReference{dir: dir, resolvedDir: resolved, tag: tag}, nil -} - -func (ref ociReference) Transport() types.ImageTransport { - return Transport -} - -// StringWithinTransport returns a string representation of the reference, which MUST be such that -// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. -// NOTE: The returned string is not promised to be equal to the original input to ParseReference; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. -// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. -func (ref ociReference) StringWithinTransport() string { - return fmt.Sprintf("%s:%s", ref.dir, ref.tag) -} - -// DockerReference returns a Docker reference associated with this reference -// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, -// not e.g. after redirect or alias processing), or nil if unknown/not applicable. -func (ref ociReference) DockerReference() reference.Named { - return nil -} - -// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. -// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; -// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical -// (i.e. various references with exactly the same semantics should return the same configuration identity) -// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but -// not required/guaranteed that it will be a valid input to Transport().ParseReference(). -// Returns "" if configuration identities for these references are not supported. -func (ref ociReference) PolicyConfigurationIdentity() string { - return fmt.Sprintf("%s:%s", ref.resolvedDir, ref.tag) -} - -// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search -// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed -// in order, terminating on first match, and an implicit "" is always checked at the end. -// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), -// and each following element to be a prefix of the element preceding it. -func (ref ociReference) PolicyConfigurationNamespaces() []string { - res := []string{} - path := ref.resolvedDir - for { - lastSlash := strings.LastIndex(path, "/") - // Note that we do not include "/"; it is redundant with the default "" global default, - // and rejected by ociTransport.ValidatePolicyConfigurationScope above. - if lastSlash == -1 || path == "/" { - break - } - res = append(res, path) - path = path[:lastSlash] - } - return res -} - -// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned Image. -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -func (ref ociReference) NewImage(ctx *types.SystemContext) (types.Image, error) { - src, err := newImageSource(ref) - if err != nil { - return nil, err - } - return image.FromSource(src) -} - -func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, error) { - indexJSON, err := os.Open(ref.indexPath()) - if err != nil { - return imgspecv1.Descriptor{}, err - } - defer indexJSON.Close() - index := imgspecv1.Index{} - if err := json.NewDecoder(indexJSON).Decode(&index); err != nil { - return imgspecv1.Descriptor{}, err - } - var d *imgspecv1.Descriptor - for _, md := range index.Manifests { - if md.MediaType != imgspecv1.MediaTypeImageManifest { - continue - } - refName, ok := md.Annotations["org.opencontainers.image.ref.name"] - if !ok { - continue - } - if refName == ref.tag { - d = &md - break - } - } - if d == nil { - return imgspecv1.Descriptor{}, fmt.Errorf("no descriptor found for reference %q", ref.tag) - } - return *d, nil -} - -// NewImageSource returns a types.ImageSource for this reference, -// asking the backend to use a manifest from requestedManifestMIMETypes if possible. -// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes. -// The caller must call .Close() on the returned ImageSource. -func (ref ociReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) { - return newImageSource(ref) -} - -// NewImageDestination returns a types.ImageDestination for this reference. -// The caller must call .Close() on the returned ImageDestination. -func (ref ociReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) { - return newImageDestination(ref), nil -} - -// DeleteImage deletes the named image from the registry, if supported. -func (ref ociReference) DeleteImage(ctx *types.SystemContext) error { - return errors.Errorf("Deleting images not implemented for oci: images") -} - -// ociLayoutPath returns a path for the oci-layout within a directory using OCI conventions. -func (ref ociReference) ociLayoutPath() string { - return filepath.Join(ref.dir, "oci-layout") -} - -// indexPath returns a path for the index.json within a directory using OCI conventions. -func (ref ociReference) indexPath() string { - return filepath.Join(ref.dir, "index.json") -} - -// blobPath returns a path for a blob within a directory using OCI image-layout conventions. -func (ref ociReference) blobPath(digest digest.Digest) (string, error) { - if err := digest.Validate(); err != nil { - return "", errors.Wrapf(err, "unexpected digest reference %s", digest) - } - return filepath.Join(ref.dir, "blobs", digest.Algorithm().String(), digest.Hex()), nil -} diff --git a/vendor/github.com/containers/image/oci/layout/oci_transport_test.go b/vendor/github.com/containers/image/oci/layout/oci_transport_test.go deleted file mode 100644 index 9c63addd5c34..000000000000 --- a/vendor/github.com/containers/image/oci/layout/oci_transport_test.go +++ /dev/null @@ -1,291 +0,0 @@ -package layout - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" - - "github.com/containers/image/types" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestTransportName(t *testing.T) { - assert.Equal(t, "oci", Transport.Name()) -} - -func TestTransportParseReference(t *testing.T) { - testParseReference(t, Transport.ParseReference) -} - -func TestTransportValidatePolicyConfigurationScope(t *testing.T) { - for _, scope := range []string{ - "/etc", - "/etc:notlatest", - "/this/does/not/exist", - "/this/does/not/exist:notlatest", - "/:strangecornercase", - } { - err := Transport.ValidatePolicyConfigurationScope(scope) - assert.NoError(t, err, scope) - } - - for _, scope := range []string{ - "relative/path", - "/", - "/double//slashes", - "/has/./dot", - "/has/dot/../dot", - "/trailing/slash/", - "/etc:invalid'tag!value@", - "/path:with/colons", - "/path:with/colons/and:tag", - } { - err := Transport.ValidatePolicyConfigurationScope(scope) - assert.Error(t, err, scope) - } -} - -func TestParseReference(t *testing.T) { - testParseReference(t, ParseReference) -} - -// testParseReference is a test shared for Transport.ParseReference and ParseReference. -func testParseReference(t *testing.T, fn func(string) (types.ImageReference, error)) { - tmpDir, err := ioutil.TempDir("", "oci-transport-test") - require.NoError(t, err) - defer os.RemoveAll(tmpDir) - - for _, path := range []string{ - "/", - "/etc", - tmpDir, - "relativepath", - tmpDir + "/thisdoesnotexist", - } { - for _, tag := range []struct{ suffix, tag string }{ - {":notlatest", "notlatest"}, - {"", "latest"}, - } { - input := path + tag.suffix - ref, err := fn(input) - require.NoError(t, err, input) - ociRef, ok := ref.(ociReference) - require.True(t, ok) - assert.Equal(t, path, ociRef.dir, input) - assert.Equal(t, tag.tag, ociRef.tag, input) - } - } - - _, err = fn(tmpDir + "/with:multiple:colons:and:tag") - assert.Error(t, err) - - _, err = fn(tmpDir + ":invalid'tag!value@") - assert.Error(t, err) -} - -func TestNewReference(t *testing.T) { - const tagValue = "tagValue" - - tmpDir, err := ioutil.TempDir("", "oci-transport-test") - require.NoError(t, err) - defer os.RemoveAll(tmpDir) - - ref, err := NewReference(tmpDir, tagValue) - require.NoError(t, err) - ociRef, ok := ref.(ociReference) - require.True(t, ok) - assert.Equal(t, tmpDir, ociRef.dir) - assert.Equal(t, tagValue, ociRef.tag) - - _, err = NewReference(tmpDir+"/thisparentdoesnotexist/something", tagValue) - assert.Error(t, err) - - _, err = NewReference(tmpDir+"/has:colon", tagValue) - assert.Error(t, err) - - _, err = NewReference(tmpDir, "invalid'tag!value@") - assert.Error(t, err) -} - -// refToTempOCI creates a temporary directory and returns an reference to it. -// The caller should -// defer os.RemoveAll(tmpDir) -func refToTempOCI(t *testing.T) (ref types.ImageReference, tmpDir string) { - tmpDir, err := ioutil.TempDir("", "oci-transport-test") - require.NoError(t, err) - m := `{ - "schemaVersion": 2, - "manifests": [ - { - "mediaType": "application/vnd.oci.image.manifest.v1+json", - "size": 7143, - "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", - "platform": { - "architecture": "ppc64le", - "os": "linux" - }, - "annotations": { - "org.opencontainers.image.ref.name": "tagValue" - } - } - ] - } -` - ioutil.WriteFile(filepath.Join(tmpDir, "index.json"), []byte(m), 0644) - ref, err = NewReference(tmpDir, "tagValue") - require.NoError(t, err) - return ref, tmpDir -} - -func TestReferenceTransport(t *testing.T) { - ref, tmpDir := refToTempOCI(t) - defer os.RemoveAll(tmpDir) - assert.Equal(t, Transport, ref.Transport()) -} - -func TestReferenceStringWithinTransport(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "oci-transport-test") - require.NoError(t, err) - defer os.RemoveAll(tmpDir) - - for _, c := range []struct{ input, result string }{ - {"/dir1:notlatest", "/dir1:notlatest"}, // Explicit tag - {"/dir2", "/dir2:latest"}, // Default tag - } { - ref, err := ParseReference(tmpDir + c.input) - require.NoError(t, err, c.input) - stringRef := ref.StringWithinTransport() - assert.Equal(t, tmpDir+c.result, stringRef, c.input) - // Do one more round to verify that the output can be parsed, to an equal value. - ref2, err := Transport.ParseReference(stringRef) - require.NoError(t, err, c.input) - stringRef2 := ref2.StringWithinTransport() - assert.Equal(t, stringRef, stringRef2, c.input) - } -} - -func TestReferenceDockerReference(t *testing.T) { - ref, tmpDir := refToTempOCI(t) - defer os.RemoveAll(tmpDir) - assert.Nil(t, ref.DockerReference()) -} - -func TestReferencePolicyConfigurationIdentity(t *testing.T) { - ref, tmpDir := refToTempOCI(t) - defer os.RemoveAll(tmpDir) - - assert.Equal(t, tmpDir+":tagValue", ref.PolicyConfigurationIdentity()) - // A non-canonical path. Test just one, the various other cases are - // tested in explicitfilepath.ResolvePathToFullyExplicit. - ref, err := NewReference(tmpDir+"/.", "tag2") - require.NoError(t, err) - assert.Equal(t, tmpDir+":tag2", ref.PolicyConfigurationIdentity()) - - // "/" as a corner case. - ref, err = NewReference("/", "tag3") - require.NoError(t, err) - assert.Equal(t, "/:tag3", ref.PolicyConfigurationIdentity()) -} - -func TestReferencePolicyConfigurationNamespaces(t *testing.T) { - ref, tmpDir := refToTempOCI(t) - defer os.RemoveAll(tmpDir) - // We don't really know enough to make a full equality test here. - ns := ref.PolicyConfigurationNamespaces() - require.NotNil(t, ns) - assert.True(t, len(ns) >= 2) - assert.Equal(t, tmpDir, ns[0]) - assert.Equal(t, filepath.Dir(tmpDir), ns[1]) - - // Test with a known path which should exist. Test just one non-canonical - // path, the various other cases are tested in explicitfilepath.ResolvePathToFullyExplicit. - // - // It would be nice to test a deeper hierarchy, but it is not obvious what - // deeper path is always available in the various distros, AND is not likely - // to contains a symbolic link. - for _, path := range []string{"/etc/skel", "/etc/skel/./."} { - _, err := os.Lstat(path) - require.NoError(t, err) - ref, err := NewReference(path, "sometag") - require.NoError(t, err) - ns := ref.PolicyConfigurationNamespaces() - require.NotNil(t, ns) - assert.Equal(t, []string{"/etc/skel", "/etc"}, ns) - } - - // "/" as a corner case. - ref, err := NewReference("/", "tag3") - require.NoError(t, err) - assert.Equal(t, []string{}, ref.PolicyConfigurationNamespaces()) -} - -func TestReferenceNewImage(t *testing.T) { - ref, tmpDir := refToTempOCI(t) - defer os.RemoveAll(tmpDir) - _, err := ref.NewImage(nil) - assert.Error(t, err) -} - -func TestReferenceNewImageSource(t *testing.T) { - ref, tmpDir := refToTempOCI(t) - defer os.RemoveAll(tmpDir) - _, err := ref.NewImageSource(nil, nil) - assert.NoError(t, err) -} - -func TestReferenceNewImageDestination(t *testing.T) { - ref, tmpDir := refToTempOCI(t) - defer os.RemoveAll(tmpDir) - dest, err := ref.NewImageDestination(nil) - assert.NoError(t, err) - defer dest.Close() -} - -func TestReferenceDeleteImage(t *testing.T) { - ref, tmpDir := refToTempOCI(t) - defer os.RemoveAll(tmpDir) - err := ref.DeleteImage(nil) - assert.Error(t, err) -} - -func TestReferenceOCILayoutPath(t *testing.T) { - ref, tmpDir := refToTempOCI(t) - defer os.RemoveAll(tmpDir) - ociRef, ok := ref.(ociReference) - require.True(t, ok) - assert.Equal(t, tmpDir+"/oci-layout", ociRef.ociLayoutPath()) -} - -func TestReferenceIndexPath(t *testing.T) { - ref, tmpDir := refToTempOCI(t) - defer os.RemoveAll(tmpDir) - ociRef, ok := ref.(ociReference) - require.True(t, ok) - assert.Equal(t, tmpDir+"/index.json", ociRef.indexPath()) -} - -func TestReferenceBlobPath(t *testing.T) { - const hex = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" - - ref, tmpDir := refToTempOCI(t) - defer os.RemoveAll(tmpDir) - ociRef, ok := ref.(ociReference) - require.True(t, ok) - bp, err := ociRef.blobPath("sha256:" + hex) - assert.NoError(t, err) - assert.Equal(t, tmpDir+"/blobs/sha256/"+hex, bp) -} - -func TestReferenceBlobPathInvalid(t *testing.T) { - const hex = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" - - ref, tmpDir := refToTempOCI(t) - defer os.RemoveAll(tmpDir) - ociRef, ok := ref.(ociReference) - require.True(t, ok) - _, err := ociRef.blobPath(hex) - assert.Error(t, err) - assert.Contains(t, err.Error(), "unexpected digest reference "+hex) -} diff --git a/vendor/github.com/containers/image/oci/oci.go b/vendor/github.com/containers/image/oci/oci.go deleted file mode 100644 index 03607d3288b9..000000000000 --- a/vendor/github.com/containers/image/oci/oci.go +++ /dev/null @@ -1 +0,0 @@ -package oci diff --git a/vendor/github.com/containers/image/openshift/openshift-copies.go b/vendor/github.com/containers/image/openshift/openshift-copies.go deleted file mode 100644 index 01fe71a243eb..000000000000 --- a/vendor/github.com/containers/image/openshift/openshift-copies.go +++ /dev/null @@ -1,1174 +0,0 @@ -package openshift - -import ( - "crypto/tls" - "crypto/x509" - "encoding/json" - "fmt" - "io/ioutil" - "net" - "net/http" - "net/url" - "os" - "path" - "path/filepath" - "reflect" - "strings" - "time" - - "github.com/ghodss/yaml" - "github.com/imdario/mergo" - "github.com/pkg/errors" - "golang.org/x/net/http2" - "k8s.io/client-go/util/homedir" -) - -// restTLSClientConfig is a modified copy of k8s.io/kubernets/pkg/client/restclient.TLSClientConfig. -// restTLSClientConfig contains settings to enable transport layer security -type restTLSClientConfig struct { - // Server requires TLS client certificate authentication - CertFile string - // Server requires TLS client certificate authentication - KeyFile string - // Trusted root certificates for server - CAFile string - - // CertData holds PEM-encoded bytes (typically read from a client certificate file). - // CertData takes precedence over CertFile - CertData []byte - // KeyData holds PEM-encoded bytes (typically read from a client certificate key file). - // KeyData takes precedence over KeyFile - KeyData []byte - // CAData holds PEM-encoded bytes (typically read from a root certificates bundle). - // CAData takes precedence over CAFile - CAData []byte -} - -// restConfig is a modified copy of k8s.io/kubernets/pkg/client/restclient.Config. -// Config holds the common attributes that can be passed to a Kubernetes client on -// initialization. -type restConfig struct { - // Host must be a host string, a host:port pair, or a URL to the base of the apiserver. - // If a URL is given then the (optional) Path of that URL represents a prefix that must - // be appended to all request URIs used to access the apiserver. This allows a frontend - // proxy to easily relocate all of the apiserver endpoints. - Host string - - // Server requires Basic authentication - Username string - Password string - - // Server requires Bearer authentication. This client will not attempt to use - // refresh tokens for an OAuth2 flow. - // TODO: demonstrate an OAuth2 compatible client. - BearerToken string - - // TLSClientConfig contains settings to enable transport layer security - restTLSClientConfig - - // Server should be accessed without verifying the TLS - // certificate. For testing only. - Insecure bool -} - -// ClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfig. -// ClientConfig is used to make it easy to get an api server client -type clientConfig interface { - // ClientConfig returns a complete client config - ClientConfig() (*restConfig, error) -} - -// defaultClientConfig is a modified copy of openshift/origin/pkg/cmd/util/clientcmd.DefaultClientConfig. -func defaultClientConfig() clientConfig { - loadingRules := newOpenShiftClientConfigLoadingRules() - // REMOVED: Allowing command-line overriding of loadingRules - // REMOVED: clientcmd.ConfigOverrides - - clientConfig := newNonInteractiveDeferredLoadingClientConfig(loadingRules) - - return clientConfig -} - -var recommendedHomeFile = path.Join(homedir.HomeDir(), ".kube/config") - -// newOpenShiftClientConfigLoadingRules is a modified copy of openshift/origin/pkg/cmd/cli/config.NewOpenShiftClientConfigLoadingRules. -// NewOpenShiftClientConfigLoadingRules returns file priority loading rules for OpenShift. -// 1. --config value -// 2. if KUBECONFIG env var has a value, use it. Otherwise, ~/.kube/config file -func newOpenShiftClientConfigLoadingRules() *clientConfigLoadingRules { - chain := []string{} - - envVarFile := os.Getenv("KUBECONFIG") - if len(envVarFile) != 0 { - chain = append(chain, filepath.SplitList(envVarFile)...) - } else { - chain = append(chain, recommendedHomeFile) - } - - return &clientConfigLoadingRules{ - Precedence: chain, - // REMOVED: Migration support; run (oc login) to trigger migration - } -} - -// deferredLoadingClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DeferredLoadingClientConfig. -// DeferredLoadingClientConfig is a ClientConfig interface that is backed by a set of loading rules -// It is used in cases where the loading rules may change after you've instantiated them and you want to be sure that -// the most recent rules are used. This is useful in cases where you bind flags to loading rule parameters before -// the parse happens and you want your calling code to be ignorant of how the values are being mutated to avoid -// passing extraneous information down a call stack -type deferredLoadingClientConfig struct { - loadingRules *clientConfigLoadingRules - - clientConfig clientConfig -} - -// NewNonInteractiveDeferredLoadingClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.NewNonInteractiveDeferredLoadingClientConfig. -// NewNonInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name -func newNonInteractiveDeferredLoadingClientConfig(loadingRules *clientConfigLoadingRules) clientConfig { - return &deferredLoadingClientConfig{loadingRules: loadingRules} -} - -func (config *deferredLoadingClientConfig) createClientConfig() (clientConfig, error) { - if config.clientConfig == nil { - // REMOVED: Support for concurrent use in multiple threads. - mergedConfig, err := config.loadingRules.Load() - if err != nil { - return nil, err - } - - var mergedClientConfig clientConfig - // REMOVED: Interactive fallback support. - mergedClientConfig = newNonInteractiveClientConfig(*mergedConfig) - - config.clientConfig = mergedClientConfig - } - - return config.clientConfig, nil -} - -// ClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DeferredLoadingClientConfig.ClientConfig. -// ClientConfig implements ClientConfig -func (config *deferredLoadingClientConfig) ClientConfig() (*restConfig, error) { - mergedClientConfig, err := config.createClientConfig() - if err != nil { - return nil, err - } - mergedConfig, err := mergedClientConfig.ClientConfig() - if err != nil { - return nil, err - } - // REMOVED: In-cluster service account configuration use. - - return mergedConfig, nil -} - -var ( - // DefaultCluster is the cluster config used when no other config is specified - // TODO: eventually apiserver should start on 443 and be secure by default - defaultCluster = clientcmdCluster{Server: "http://localhost:8080"} - - // EnvVarCluster allows overriding the DefaultCluster using an envvar for the server name - envVarCluster = clientcmdCluster{Server: os.Getenv("KUBERNETES_MASTER")} -) - -// directClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig. -// DirectClientConfig is a ClientConfig interface that is backed by a clientcmdapi.Config, options overrides, and an optional fallbackReader for auth information -type directClientConfig struct { - config clientcmdConfig -} - -// newNonInteractiveClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.NewNonInteractiveClientConfig. -// NewNonInteractiveClientConfig creates a DirectClientConfig using the passed context name and does not have a fallback reader for auth information -func newNonInteractiveClientConfig(config clientcmdConfig) clientConfig { - return &directClientConfig{config} -} - -// ClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.ClientConfig. -// ClientConfig implements ClientConfig -func (config *directClientConfig) ClientConfig() (*restConfig, error) { - if err := config.ConfirmUsable(); err != nil { - return nil, err - } - - configAuthInfo := config.getAuthInfo() - configClusterInfo := config.getCluster() - - clientConfig := &restConfig{} - clientConfig.Host = configClusterInfo.Server - if u, err := url.ParseRequestURI(clientConfig.Host); err == nil && u.Opaque == "" && len(u.Path) > 1 { - u.RawQuery = "" - u.Fragment = "" - clientConfig.Host = u.String() - } - - // only try to read the auth information if we are secure - if isConfigTransportTLS(*clientConfig) { - var err error - - // mergo is a first write wins for map value and a last writing wins for interface values - // NOTE: This behavior changed with https://github.com/imdario/mergo/commit/d304790b2ed594794496464fadd89d2bb266600a. - // Our mergo.Merge version is older than this change. - // REMOVED: Support for interactive fallback. - userAuthPartialConfig, err := getUserIdentificationPartialConfig(configAuthInfo) - if err != nil { - return nil, err - } - mergo.Merge(clientConfig, userAuthPartialConfig) - - serverAuthPartialConfig, err := getServerIdentificationPartialConfig(configAuthInfo, configClusterInfo) - if err != nil { - return nil, err - } - mergo.Merge(clientConfig, serverAuthPartialConfig) - } - - return clientConfig, nil -} - -// getServerIdentificationPartialConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.getServerIdentificationPartialConfig. -// clientauth.Info object contain both user identification and server identification. We want different precedence orders for -// both, so we have to split the objects and merge them separately -// we want this order of precedence for the server identification -// 1. configClusterInfo (the final result of command line flags and merged .kubeconfig files) -// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) -// 3. load the ~/.kubernetes_auth file as a default -func getServerIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo, configClusterInfo clientcmdCluster) (*restConfig, error) { - mergedConfig := &restConfig{} - - // configClusterInfo holds the information identify the server provided by .kubeconfig - configClientConfig := &restConfig{} - configClientConfig.CAFile = configClusterInfo.CertificateAuthority - configClientConfig.CAData = configClusterInfo.CertificateAuthorityData - configClientConfig.Insecure = configClusterInfo.InsecureSkipTLSVerify - mergo.Merge(mergedConfig, configClientConfig) - - return mergedConfig, nil -} - -// getUserIdentificationPartialConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.getUserIdentificationPartialConfig. -// clientauth.Info object contain both user identification and server identification. We want different precedence orders for -// both, so we have to split the objects and merge them separately -// we want this order of precedence for user identifcation -// 1. configAuthInfo minus auth-path (the final result of command line flags and merged .kubeconfig files) -// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) -// 3. if there is not enough information to idenfity the user, load try the ~/.kubernetes_auth file -// 4. if there is not enough information to identify the user, prompt if possible -func getUserIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo) (*restConfig, error) { - mergedConfig := &restConfig{} - - // blindly overwrite existing values based on precedence - if len(configAuthInfo.Token) > 0 { - mergedConfig.BearerToken = configAuthInfo.Token - } - if len(configAuthInfo.ClientCertificate) > 0 || len(configAuthInfo.ClientCertificateData) > 0 { - mergedConfig.CertFile = configAuthInfo.ClientCertificate - mergedConfig.CertData = configAuthInfo.ClientCertificateData - mergedConfig.KeyFile = configAuthInfo.ClientKey - mergedConfig.KeyData = configAuthInfo.ClientKeyData - } - if len(configAuthInfo.Username) > 0 || len(configAuthInfo.Password) > 0 { - mergedConfig.Username = configAuthInfo.Username - mergedConfig.Password = configAuthInfo.Password - } - - // REMOVED: prompting for missing information. - return mergedConfig, nil -} - -// canIdentifyUser is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.canIdentifyUser -func canIdentifyUser(config restConfig) bool { - return len(config.Username) > 0 || - (len(config.CertFile) > 0 || len(config.CertData) > 0) || - len(config.BearerToken) > 0 - -} - -// ConfirmUsable is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.ConfirmUsable. -// ConfirmUsable looks a particular context and determines if that particular part of the config is useable. There might still be errors in the config, -// but no errors in the sections requested or referenced. It does not return early so that it can find as many errors as possible. -func (config *directClientConfig) ConfirmUsable() error { - var validationErrors []error - validationErrors = append(validationErrors, validateAuthInfo(config.getAuthInfoName(), config.getAuthInfo())...) - validationErrors = append(validationErrors, validateClusterInfo(config.getClusterName(), config.getCluster())...) - // when direct client config is specified, and our only error is that no server is defined, we should - // return a standard "no config" error - if len(validationErrors) == 1 && validationErrors[0] == errEmptyCluster { - return newErrConfigurationInvalid([]error{errEmptyConfig}) - } - return newErrConfigurationInvalid(validationErrors) -} - -// getContextName is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getContextName. -func (config *directClientConfig) getContextName() string { - // REMOVED: overrides support - return config.config.CurrentContext -} - -// getAuthInfoName is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getAuthInfoName. -func (config *directClientConfig) getAuthInfoName() string { - // REMOVED: overrides support - return config.getContext().AuthInfo -} - -// getClusterName is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getClusterName. -func (config *directClientConfig) getClusterName() string { - // REMOVED: overrides support - return config.getContext().Cluster -} - -// getContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getContext. -func (config *directClientConfig) getContext() clientcmdContext { - contexts := config.config.Contexts - contextName := config.getContextName() - - var mergedContext clientcmdContext - if configContext, exists := contexts[contextName]; exists { - mergo.Merge(&mergedContext, configContext) - } - // REMOVED: overrides support - - return mergedContext -} - -var ( - errEmptyConfig = errors.New("no configuration has been provided") - // message is for consistency with old behavior - errEmptyCluster = errors.New("cluster has no server defined") -) - -// validateClusterInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.validateClusterInfo. -// validateClusterInfo looks for conflicts and errors in the cluster info -func validateClusterInfo(clusterName string, clusterInfo clientcmdCluster) []error { - var validationErrors []error - - if reflect.DeepEqual(clientcmdCluster{}, clusterInfo) { - return []error{errEmptyCluster} - } - - if len(clusterInfo.Server) == 0 { - if len(clusterName) == 0 { - validationErrors = append(validationErrors, errors.Errorf("default cluster has no server defined")) - } else { - validationErrors = append(validationErrors, errors.Errorf("no server found for cluster %q", clusterName)) - } - } - // Make sure CA data and CA file aren't both specified - if len(clusterInfo.CertificateAuthority) != 0 && len(clusterInfo.CertificateAuthorityData) != 0 { - validationErrors = append(validationErrors, errors.Errorf("certificate-authority-data and certificate-authority are both specified for %v. certificate-authority-data will override", clusterName)) - } - if len(clusterInfo.CertificateAuthority) != 0 { - clientCertCA, err := os.Open(clusterInfo.CertificateAuthority) - defer clientCertCA.Close() - if err != nil { - validationErrors = append(validationErrors, errors.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err)) - } - } - - return validationErrors -} - -// validateAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.validateAuthInfo. -// validateAuthInfo looks for conflicts and errors in the auth info -func validateAuthInfo(authInfoName string, authInfo clientcmdAuthInfo) []error { - var validationErrors []error - - usingAuthPath := false - methods := make([]string, 0, 3) - if len(authInfo.Token) != 0 { - methods = append(methods, "token") - } - if len(authInfo.Username) != 0 || len(authInfo.Password) != 0 { - methods = append(methods, "basicAuth") - } - - if len(authInfo.ClientCertificate) != 0 || len(authInfo.ClientCertificateData) != 0 { - // Make sure cert data and file aren't both specified - if len(authInfo.ClientCertificate) != 0 && len(authInfo.ClientCertificateData) != 0 { - validationErrors = append(validationErrors, errors.Errorf("client-cert-data and client-cert are both specified for %v. client-cert-data will override", authInfoName)) - } - // Make sure key data and file aren't both specified - if len(authInfo.ClientKey) != 0 && len(authInfo.ClientKeyData) != 0 { - validationErrors = append(validationErrors, errors.Errorf("client-key-data and client-key are both specified for %v; client-key-data will override", authInfoName)) - } - // Make sure a key is specified - if len(authInfo.ClientKey) == 0 && len(authInfo.ClientKeyData) == 0 { - validationErrors = append(validationErrors, errors.Errorf("client-key-data or client-key must be specified for %v to use the clientCert authentication method", authInfoName)) - } - - if len(authInfo.ClientCertificate) != 0 { - clientCertFile, err := os.Open(authInfo.ClientCertificate) - defer clientCertFile.Close() - if err != nil { - validationErrors = append(validationErrors, errors.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err)) - } - } - if len(authInfo.ClientKey) != 0 { - clientKeyFile, err := os.Open(authInfo.ClientKey) - defer clientKeyFile.Close() - if err != nil { - validationErrors = append(validationErrors, errors.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err)) - } - } - } - - // authPath also provides information for the client to identify the server, so allow multiple auth methods in that case - if (len(methods) > 1) && (!usingAuthPath) { - validationErrors = append(validationErrors, errors.Errorf("more than one authentication method found for %v; found %v, only one is allowed", authInfoName, methods)) - } - - return validationErrors -} - -// getAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getAuthInfo. -func (config *directClientConfig) getAuthInfo() clientcmdAuthInfo { - authInfos := config.config.AuthInfos - authInfoName := config.getAuthInfoName() - - var mergedAuthInfo clientcmdAuthInfo - if configAuthInfo, exists := authInfos[authInfoName]; exists { - mergo.Merge(&mergedAuthInfo, configAuthInfo) - } - // REMOVED: overrides support - - return mergedAuthInfo -} - -// getCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getCluster. -func (config *directClientConfig) getCluster() clientcmdCluster { - clusterInfos := config.config.Clusters - clusterInfoName := config.getClusterName() - - var mergedClusterInfo clientcmdCluster - mergo.Merge(&mergedClusterInfo, defaultCluster) - mergo.Merge(&mergedClusterInfo, envVarCluster) - if configClusterInfo, exists := clusterInfos[clusterInfoName]; exists { - mergo.Merge(&mergedClusterInfo, configClusterInfo) - } - // REMOVED: overrides support - - return mergedClusterInfo -} - -// aggregateErr is a modified copy of k8s.io/apimachinery/pkg/util/errors.aggregate. -// This helper implements the error and Errors interfaces. Keeping it private -// prevents people from making an aggregate of 0 errors, which is not -// an error, but does satisfy the error interface. -type aggregateErr []error - -// newAggregate is a modified copy of k8s.io/apimachinery/pkg/util/errors.NewAggregate. -// NewAggregate converts a slice of errors into an Aggregate interface, which -// is itself an implementation of the error interface. If the slice is empty, -// this returns nil. -// It will check if any of the element of input error list is nil, to avoid -// nil pointer panic when call Error(). -func newAggregate(errlist []error) error { - if len(errlist) == 0 { - return nil - } - // In case of input error list contains nil - var errs []error - for _, e := range errlist { - if e != nil { - errs = append(errs, e) - } - } - if len(errs) == 0 { - return nil - } - return aggregateErr(errs) -} - -// Error is a modified copy of k8s.io/apimachinery/pkg/util/errors.aggregate.Error. -// Error is part of the error interface. -func (agg aggregateErr) Error() string { - if len(agg) == 0 { - // This should never happen, really. - return "" - } - if len(agg) == 1 { - return agg[0].Error() - } - result := fmt.Sprintf("[%s", agg[0].Error()) - for i := 1; i < len(agg); i++ { - result += fmt.Sprintf(", %s", agg[i].Error()) - } - result += "]" - return result -} - -// REMOVED: aggregateErr.Errors - -// errConfigurationInvalid is a modified? copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.errConfigurationInvalid. -// errConfigurationInvalid is a set of errors indicating the configuration is invalid. -type errConfigurationInvalid []error - -var _ error = errConfigurationInvalid{} - -// REMOVED: utilerrors.Aggregate implementation for errConfigurationInvalid. - -// newErrConfigurationInvalid is a modified? copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.newErrConfigurationInvalid. -func newErrConfigurationInvalid(errs []error) error { - switch len(errs) { - case 0: - return nil - default: - return errConfigurationInvalid(errs) - } -} - -// Error implements the error interface -func (e errConfigurationInvalid) Error() string { - return fmt.Sprintf("invalid configuration: %v", newAggregate(e).Error()) -} - -// clientConfigLoadingRules is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules -// ClientConfigLoadingRules is an ExplicitPath and string slice of specific locations that are used for merging together a Config -// Callers can put the chain together however they want, but we'd recommend: -// EnvVarPathFiles if set (a list of files if set) OR the HomeDirectoryPath -// ExplicitPath is special, because if a user specifically requests a certain file be used and error is reported if thie file is not present -type clientConfigLoadingRules struct { - Precedence []string -} - -// Load is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.Load -// Load starts by running the MigrationRules and then -// takes the loading rules and returns a Config object based on following rules. -// if the ExplicitPath, return the unmerged explicit file -// Otherwise, return a merged config based on the Precedence slice -// A missing ExplicitPath file produces an error. Empty filenames or other missing files are ignored. -// Read errors or files with non-deserializable content produce errors. -// The first file to set a particular map key wins and map key's value is never changed. -// BUT, if you set a struct value that is NOT contained inside of map, the value WILL be changed. -// This results in some odd looking logic to merge in one direction, merge in the other, and then merge the two. -// It also means that if two files specify a "red-user", only values from the first file's red-user are used. Even -// non-conflicting entries from the second file's "red-user" are discarded. -// Relative paths inside of the .kubeconfig files are resolved against the .kubeconfig file's parent folder -// and only absolute file paths are returned. -func (rules *clientConfigLoadingRules) Load() (*clientcmdConfig, error) { - errlist := []error{} - - kubeConfigFiles := []string{} - - // REMOVED: explicit path support - kubeConfigFiles = append(kubeConfigFiles, rules.Precedence...) - - kubeconfigs := []*clientcmdConfig{} - // read and cache the config files so that we only look at them once - for _, filename := range kubeConfigFiles { - if len(filename) == 0 { - // no work to do - continue - } - - config, err := loadFromFile(filename) - if os.IsNotExist(err) { - // skip missing files - continue - } - if err != nil { - errlist = append(errlist, errors.Wrapf(err, "Error loading config file \"%s\"", filename)) - continue - } - - kubeconfigs = append(kubeconfigs, config) - } - - // first merge all of our maps - mapConfig := clientcmdNewConfig() - for _, kubeconfig := range kubeconfigs { - mergo.Merge(mapConfig, kubeconfig) - } - - // merge all of the struct values in the reverse order so that priority is given correctly - // errors are not added to the list the second time - nonMapConfig := clientcmdNewConfig() - for i := len(kubeconfigs) - 1; i >= 0; i-- { - kubeconfig := kubeconfigs[i] - mergo.Merge(nonMapConfig, kubeconfig) - } - - // since values are overwritten, but maps values are not, we can merge the non-map config on top of the map config and - // get the values we expect. - config := clientcmdNewConfig() - mergo.Merge(config, mapConfig) - mergo.Merge(config, nonMapConfig) - - // REMOVED: Possibility to skip this. - if err := resolveLocalPaths(config); err != nil { - errlist = append(errlist, err) - } - - return config, newAggregate(errlist) -} - -// loadFromFile is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.LoadFromFile -// LoadFromFile takes a filename and deserializes the contents into Config object -func loadFromFile(filename string) (*clientcmdConfig, error) { - kubeconfigBytes, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - config, err := load(kubeconfigBytes) - if err != nil { - return nil, err - } - - // set LocationOfOrigin on every Cluster, User, and Context - for key, obj := range config.AuthInfos { - obj.LocationOfOrigin = filename - config.AuthInfos[key] = obj - } - for key, obj := range config.Clusters { - obj.LocationOfOrigin = filename - config.Clusters[key] = obj - } - for key, obj := range config.Contexts { - obj.LocationOfOrigin = filename - config.Contexts[key] = obj - } - - if config.AuthInfos == nil { - config.AuthInfos = map[string]*clientcmdAuthInfo{} - } - if config.Clusters == nil { - config.Clusters = map[string]*clientcmdCluster{} - } - if config.Contexts == nil { - config.Contexts = map[string]*clientcmdContext{} - } - - return config, nil -} - -// load is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.Load -// Load takes a byte slice and deserializes the contents into Config object. -// Encapsulates deserialization without assuming the source is a file. -func load(data []byte) (*clientcmdConfig, error) { - config := clientcmdNewConfig() - // if there's no data in a file, return the default object instead of failing (DecodeInto reject empty input) - if len(data) == 0 { - return config, nil - } - // Note: This does absolutely no kind/version checking or conversions. - data, err := yaml.YAMLToJSON(data) - if err != nil { - return nil, err - } - if err := json.Unmarshal(data, config); err != nil { - return nil, err - } - return config, nil -} - -// resolveLocalPaths is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.resolveLocalPaths. -// ResolveLocalPaths resolves all relative paths in the config object with respect to the stanza's LocationOfOrigin -// this cannot be done directly inside of LoadFromFile because doing so there would make it impossible to load a file without -// modification of its contents. -func resolveLocalPaths(config *clientcmdConfig) error { - for _, cluster := range config.Clusters { - if len(cluster.LocationOfOrigin) == 0 { - continue - } - base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin)) - if err != nil { - return errors.Wrapf(err, "Could not determine the absolute path of config file %s", cluster.LocationOfOrigin) - } - - if err := resolvePaths(getClusterFileReferences(cluster), base); err != nil { - return err - } - } - for _, authInfo := range config.AuthInfos { - if len(authInfo.LocationOfOrigin) == 0 { - continue - } - base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin)) - if err != nil { - return errors.Wrapf(err, "Could not determine the absolute path of config file %s", authInfo.LocationOfOrigin) - } - - if err := resolvePaths(getAuthInfoFileReferences(authInfo), base); err != nil { - return err - } - } - - return nil -} - -// getClusterFileReferences is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.GetClusterFileReferences. -func getClusterFileReferences(cluster *clientcmdCluster) []*string { - return []*string{&cluster.CertificateAuthority} -} - -// getAuthInfoFileReferences is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.GetAuthInfoFileReferences. -func getAuthInfoFileReferences(authInfo *clientcmdAuthInfo) []*string { - return []*string{&authInfo.ClientCertificate, &authInfo.ClientKey} -} - -// resolvePaths is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.resolvePaths. -// ResolvePaths updates the given refs to be absolute paths, relative to the given base directory -func resolvePaths(refs []*string, base string) error { - for _, ref := range refs { - // Don't resolve empty paths - if len(*ref) > 0 { - // Don't resolve absolute paths - if !filepath.IsAbs(*ref) { - *ref = filepath.Join(base, *ref) - } - } - } - return nil -} - -// restClientFor is a modified copy of k8s.io/kubernets/pkg/client/restclient.RESTClientFor. -// RESTClientFor returns a RESTClient that satisfies the requested attributes on a client Config -// object. Note that a RESTClient may require fields that are optional when initializing a Client. -// A RESTClient created by this method is generic - it expects to operate on an API that follows -// the Kubernetes conventions, but may not be the Kubernetes API. -func restClientFor(config *restConfig) (*url.URL, *http.Client, error) { - // REMOVED: Configurable GroupVersion, Codec - // REMOVED: Configurable versionedAPIPath - baseURL, err := defaultServerURLFor(config) - if err != nil { - return nil, nil, err - } - - transport, err := transportFor(config) - if err != nil { - return nil, nil, err - } - - var httpClient *http.Client - if transport != http.DefaultTransport { - httpClient = &http.Client{Transport: transport} - } - - // REMOVED: Configurable QPS, Burst, ContentConfig - // REMOVED: Actually returning a RESTClient object. - return baseURL, httpClient, nil -} - -// defaultServerURL is a modified copy of k8s.io/kubernets/pkg/client/restclient.DefaultServerURL. -// DefaultServerURL converts a host, host:port, or URL string to the default base server API path -// to use with a Client at a given API version following the standard conventions for a -// Kubernetes API. -func defaultServerURL(host string, defaultTLS bool) (*url.URL, error) { - if host == "" { - return nil, errors.Errorf("host must be a URL or a host:port pair") - } - base := host - hostURL, err := url.Parse(base) - if err != nil { - return nil, err - } - if hostURL.Scheme == "" { - scheme := "http://" - if defaultTLS { - scheme = "https://" - } - hostURL, err = url.Parse(scheme + base) - if err != nil { - return nil, err - } - if hostURL.Path != "" && hostURL.Path != "/" { - return nil, errors.Errorf("host must be a URL or a host:port pair: %q", base) - } - } - - // REMOVED: versionedAPIPath computation. - return hostURL, nil -} - -// defaultServerURLFor is a modified copy of k8s.io/kubernets/pkg/client/restclient.defaultServerURLFor. -// defaultServerUrlFor is shared between IsConfigTransportTLS and RESTClientFor. It -// requires Host and Version to be set prior to being called. -func defaultServerURLFor(config *restConfig) (*url.URL, error) { - // TODO: move the default to secure when the apiserver supports TLS by default - // config.Insecure is taken to mean "I want HTTPS but don't bother checking the certs against a CA." - hasCA := len(config.CAFile) != 0 || len(config.CAData) != 0 - hasCert := len(config.CertFile) != 0 || len(config.CertData) != 0 - defaultTLS := hasCA || hasCert || config.Insecure - host := config.Host - if host == "" { - host = "localhost" - } - - // REMOVED: Configurable APIPath, GroupVersion - return defaultServerURL(host, defaultTLS) -} - -// transportFor is a modified copy of k8s.io/kubernets/pkg/client/restclient.transportFor. -// TransportFor returns an http.RoundTripper that will provide the authentication -// or transport level security defined by the provided Config. Will return the -// default http.DefaultTransport if no special case behavior is needed. -func transportFor(config *restConfig) (http.RoundTripper, error) { - // REMOVED: separation between restclient.Config and transport.Config, Transport, WrapTransport support - return transportNew(config) -} - -// isConfigTransportTLS is a modified copy of k8s.io/kubernets/pkg/client/restclient.IsConfigTransportTLS. -// IsConfigTransportTLS returns true if and only if the provided -// config will result in a protected connection to the server when it -// is passed to restclient.RESTClientFor(). Use to determine when to -// send credentials over the wire. -// -// Note: the Insecure flag is ignored when testing for this value, so MITM attacks are -// still possible. -func isConfigTransportTLS(config restConfig) bool { - baseURL, err := defaultServerURLFor(&config) - if err != nil { - return false - } - return baseURL.Scheme == "https" -} - -// transportNew is a modified copy of k8s.io/kubernetes/pkg/client/transport.New. -// New returns an http.RoundTripper that will provide the authentication -// or transport level security defined by the provided Config. -func transportNew(config *restConfig) (http.RoundTripper, error) { - // REMOVED: custom config.Transport support. - // Set transport level security - - var ( - rt http.RoundTripper - err error - ) - - rt, err = tlsCacheGet(config) - if err != nil { - return nil, err - } - - // REMOVED: HTTPWrappersForConfig(config, rt) in favor of the caller setting HTTP headers itself based on restConfig. Only this inlined check remains. - if len(config.Username) != 0 && len(config.BearerToken) != 0 { - return nil, errors.Errorf("username/password or bearer token may be set, but not both") - } - - return rt, nil -} - -// newProxierWithNoProxyCIDR is a modified copy of k8s.io/apimachinery/pkg/util/net.NewProxierWithNoProxyCIDR. -// NewProxierWithNoProxyCIDR constructs a Proxier function that respects CIDRs in NO_PROXY and delegates if -// no matching CIDRs are found -func newProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error)) func(req *http.Request) (*url.URL, error) { - // we wrap the default method, so we only need to perform our check if the NO_PROXY envvar has a CIDR in it - noProxyEnv := os.Getenv("NO_PROXY") - noProxyRules := strings.Split(noProxyEnv, ",") - - cidrs := []*net.IPNet{} - for _, noProxyRule := range noProxyRules { - _, cidr, _ := net.ParseCIDR(noProxyRule) - if cidr != nil { - cidrs = append(cidrs, cidr) - } - } - - if len(cidrs) == 0 { - return delegate - } - - return func(req *http.Request) (*url.URL, error) { - host := req.URL.Host - // for some urls, the Host is already the host, not the host:port - if net.ParseIP(host) == nil { - var err error - host, _, err = net.SplitHostPort(req.URL.Host) - if err != nil { - return delegate(req) - } - } - - ip := net.ParseIP(host) - if ip == nil { - return delegate(req) - } - - for _, cidr := range cidrs { - if cidr.Contains(ip) { - return nil, nil - } - } - - return delegate(req) - } -} - -// tlsCacheGet is a modified copy of k8s.io/kubernetes/pkg/client/transport.tlsTransportCache.get. -func tlsCacheGet(config *restConfig) (http.RoundTripper, error) { - // REMOVED: any actual caching - - // Get the TLS options for this client config - tlsConfig, err := tlsConfigFor(config) - if err != nil { - return nil, err - } - // The options didn't require a custom TLS config - if tlsConfig == nil { - return http.DefaultTransport, nil - } - - // REMOVED: Call to k8s.io/apimachinery/pkg/util/net.SetTransportDefaults; instead of the generic machinery and conditionals, hard-coded the result here. - t := &http.Transport{ - // http.ProxyFromEnvironment doesn't respect CIDRs and that makes it impossible to exclude things like pod and service IPs from proxy settings - // ProxierWithNoProxyCIDR allows CIDR rules in NO_PROXY - Proxy: newProxierWithNoProxyCIDR(http.ProxyFromEnvironment), - TLSHandshakeTimeout: 10 * time.Second, - TLSClientConfig: tlsConfig, - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - } - // Allow clients to disable http2 if needed. - if s := os.Getenv("DISABLE_HTTP2"); len(s) == 0 { - _ = http2.ConfigureTransport(t) - } - return t, nil -} - -// tlsConfigFor is a modified copy of k8s.io/kubernetes/pkg/client/transport.TLSConfigFor. -// TLSConfigFor returns a tls.Config that will provide the transport level security defined -// by the provided Config. Will return nil if no transport level security is requested. -func tlsConfigFor(c *restConfig) (*tls.Config, error) { - if !(c.HasCA() || c.HasCertAuth() || c.Insecure) { - return nil, nil - } - if c.HasCA() && c.Insecure { - return nil, errors.Errorf("specifying a root certificates file with the insecure flag is not allowed") - } - if err := loadTLSFiles(c); err != nil { - return nil, err - } - - tlsConfig := &tls.Config{ - // Change default from SSLv3 to TLSv1.0 (because of POODLE vulnerability) - MinVersion: tls.VersionTLS10, - InsecureSkipVerify: c.Insecure, - } - - if c.HasCA() { - tlsConfig.RootCAs = rootCertPool(c.CAData) - } - - if c.HasCertAuth() { - cert, err := tls.X509KeyPair(c.CertData, c.KeyData) - if err != nil { - return nil, err - } - tlsConfig.Certificates = []tls.Certificate{cert} - } - - return tlsConfig, nil -} - -// loadTLSFiles is a modified copy of k8s.io/kubernetes/pkg/client/transport.loadTLSFiles. -// loadTLSFiles copies the data from the CertFile, KeyFile, and CAFile fields into the CertData, -// KeyData, and CAFile fields, or returns an error. If no error is returned, all three fields are -// either populated or were empty to start. -func loadTLSFiles(c *restConfig) error { - var err error - c.CAData, err = dataFromSliceOrFile(c.CAData, c.CAFile) - if err != nil { - return err - } - - c.CertData, err = dataFromSliceOrFile(c.CertData, c.CertFile) - if err != nil { - return err - } - - c.KeyData, err = dataFromSliceOrFile(c.KeyData, c.KeyFile) - if err != nil { - return err - } - return nil -} - -// dataFromSliceOrFile is a modified copy of k8s.io/kubernetes/pkg/client/transport.dataFromSliceOrFile. -// dataFromSliceOrFile returns data from the slice (if non-empty), or from the file, -// or an error if an error occurred reading the file -func dataFromSliceOrFile(data []byte, file string) ([]byte, error) { - if len(data) > 0 { - return data, nil - } - if len(file) > 0 { - fileData, err := ioutil.ReadFile(file) - if err != nil { - return []byte{}, err - } - return fileData, nil - } - return nil, nil -} - -// rootCertPool is a modified copy of k8s.io/kubernetes/pkg/client/transport.rootCertPool. -// rootCertPool returns nil if caData is empty. When passed along, this will mean "use system CAs". -// When caData is not empty, it will be the ONLY information used in the CertPool. -func rootCertPool(caData []byte) *x509.CertPool { - // What we really want is a copy of x509.systemRootsPool, but that isn't exposed. It's difficult to build (see the go - // code for a look at the platform specific insanity), so we'll use the fact that RootCAs == nil gives us the system values - // It doesn't allow trusting either/or, but hopefully that won't be an issue - if len(caData) == 0 { - return nil - } - - // if we have caData, use it - certPool := x509.NewCertPool() - certPool.AppendCertsFromPEM(caData) - return certPool -} - -// HasCA is a modified copy of k8s.io/kubernetes/pkg/client/transport.Config.HasCA. -// HasCA returns whether the configuration has a certificate authority or not. -func (c *restConfig) HasCA() bool { - return len(c.CAData) > 0 || len(c.CAFile) > 0 -} - -// HasCertAuth is a modified copy of k8s.io/kubernetes/pkg/client/transport.Config.HasCertAuth. -// HasCertAuth returns whether the configuration has certificate authentication or not. -func (c *restConfig) HasCertAuth() bool { - return len(c.CertData) != 0 || len(c.CertFile) != 0 -} - -// clientcmdConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Config. -// Config holds the information needed to build connect to remote kubernetes clusters as a given user -// IMPORTANT if you add fields to this struct, please update IsConfigEmpty() -type clientcmdConfig struct { - // Clusters is a map of referencable names to cluster configs - Clusters clustersMap `json:"clusters"` - // AuthInfos is a map of referencable names to user configs - AuthInfos authInfosMap `json:"users"` - // Contexts is a map of referencable names to context configs - Contexts contextsMap `json:"contexts"` - // CurrentContext is the name of the context that you would like to use by default - CurrentContext string `json:"current-context"` -} - -type clustersMap map[string]*clientcmdCluster - -func (m *clustersMap) UnmarshalJSON(data []byte) error { - var a []v1NamedCluster - if err := json.Unmarshal(data, &a); err != nil { - return err - } - for _, e := range a { - cluster := e.Cluster // Allocates a new instance in each iteration - (*m)[e.Name] = &cluster - } - return nil -} - -type authInfosMap map[string]*clientcmdAuthInfo - -func (m *authInfosMap) UnmarshalJSON(data []byte) error { - var a []v1NamedAuthInfo - if err := json.Unmarshal(data, &a); err != nil { - return err - } - for _, e := range a { - authInfo := e.AuthInfo // Allocates a new instance in each iteration - (*m)[e.Name] = &authInfo - } - return nil -} - -type contextsMap map[string]*clientcmdContext - -func (m *contextsMap) UnmarshalJSON(data []byte) error { - var a []v1NamedContext - if err := json.Unmarshal(data, &a); err != nil { - return err - } - for _, e := range a { - context := e.Context // Allocates a new instance in each iteration - (*m)[e.Name] = &context - } - return nil -} - -// clientcmdNewConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.NewConfig. -// NewConfig is a convenience function that returns a new Config object with non-nil maps -func clientcmdNewConfig() *clientcmdConfig { - return &clientcmdConfig{ - Clusters: make(map[string]*clientcmdCluster), - AuthInfos: make(map[string]*clientcmdAuthInfo), - Contexts: make(map[string]*clientcmdContext), - } -} - -// clientcmdCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Cluster. -// Cluster contains information about how to communicate with a kubernetes cluster -type clientcmdCluster struct { - // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. - LocationOfOrigin string - // Server is the address of the kubernetes cluster (https://hostname:port). - Server string `json:"server"` - // InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure. - InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"` - // CertificateAuthority is the path to a cert file for the certificate authority. - CertificateAuthority string `json:"certificate-authority,omitempty"` - // CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority - CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"` -} - -// clientcmdAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.AuthInfo. -// AuthInfo contains information that describes identity information. This is use to tell the kubernetes cluster who you are. -type clientcmdAuthInfo struct { - // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. - LocationOfOrigin string - // ClientCertificate is the path to a client cert file for TLS. - ClientCertificate string `json:"client-certificate,omitempty"` - // ClientCertificateData contains PEM-encoded data from a client cert file for TLS. Overrides ClientCertificate - ClientCertificateData []byte `json:"client-certificate-data,omitempty"` - // ClientKey is the path to a client key file for TLS. - ClientKey string `json:"client-key,omitempty"` - // ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey - ClientKeyData []byte `json:"client-key-data,omitempty"` - // Token is the bearer token for authentication to the kubernetes cluster. - Token string `json:"token,omitempty"` - // Username is the username for basic authentication to the kubernetes cluster. - Username string `json:"username,omitempty"` - // Password is the password for basic authentication to the kubernetes cluster. - Password string `json:"password,omitempty"` -} - -// clientcmdContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Context. -// Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with) -type clientcmdContext struct { - // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. - LocationOfOrigin string - // Cluster is the name of the cluster for this context - Cluster string `json:"cluster"` - // AuthInfo is the name of the authInfo for this context - AuthInfo string `json:"user"` - // Namespace is the default namespace to use on unspecified requests - Namespace string `json:"namespace,omitempty"` -} - -// v1NamedCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedCluster. -// NamedCluster relates nicknames to cluster information -type v1NamedCluster struct { - // Name is the nickname for this Cluster - Name string `json:"name"` - // Cluster holds the cluster information - Cluster clientcmdCluster `json:"cluster"` -} - -// v1NamedContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedContext. -// NamedContext relates nicknames to context information -type v1NamedContext struct { - // Name is the nickname for this Context - Name string `json:"name"` - // Context holds the context information - Context clientcmdContext `json:"context"` -} - -// v1NamedAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedAuthInfo. -// NamedAuthInfo relates nicknames to auth information -type v1NamedAuthInfo struct { - // Name is the nickname for this AuthInfo - Name string `json:"name"` - // AuthInfo holds the auth information - AuthInfo clientcmdAuthInfo `json:"user"` -} diff --git a/vendor/github.com/containers/image/openshift/openshift.go b/vendor/github.com/containers/image/openshift/openshift.go deleted file mode 100644 index f494420d1a8b..000000000000 --- a/vendor/github.com/containers/image/openshift/openshift.go +++ /dev/null @@ -1,539 +0,0 @@ -package openshift - -import ( - "bytes" - "context" - "crypto/rand" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strings" - - "github.com/containers/image/docker" - "github.com/containers/image/docker/reference" - "github.com/containers/image/manifest" - "github.com/containers/image/types" - "github.com/containers/image/version" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// openshiftClient is configuration for dealing with a single image stream, for reading or writing. -type openshiftClient struct { - ref openshiftReference - baseURL *url.URL - // Values from Kubernetes configuration - httpClient *http.Client - bearerToken string // "" if not used - username string // "" if not used - password string // if username != "" -} - -// newOpenshiftClient creates a new openshiftClient for the specified reference. -func newOpenshiftClient(ref openshiftReference) (*openshiftClient, error) { - // We have already done this parsing in ParseReference, but thrown away - // httpClient. So, parse again. - // (We could also rework/split restClientFor to "get base URL" to be done - // in ParseReference, and "get httpClient" to be done here. But until/unless - // we support non-default clusters, this is good enough.) - - // Overall, this is modelled on openshift/origin/pkg/cmd/util/clientcmd.New().ClientConfig() and openshift/origin/pkg/client. - cmdConfig := defaultClientConfig() - logrus.Debugf("cmdConfig: %#v", cmdConfig) - restConfig, err := cmdConfig.ClientConfig() - if err != nil { - return nil, err - } - // REMOVED: SetOpenShiftDefaults (values are not overridable in config files, so hard-coded these defaults.) - logrus.Debugf("restConfig: %#v", restConfig) - baseURL, httpClient, err := restClientFor(restConfig) - if err != nil { - return nil, err - } - logrus.Debugf("URL: %#v", *baseURL) - - if httpClient == nil { - httpClient = http.DefaultClient - } - - return &openshiftClient{ - ref: ref, - baseURL: baseURL, - httpClient: httpClient, - bearerToken: restConfig.BearerToken, - username: restConfig.Username, - password: restConfig.Password, - }, nil -} - -// doRequest performs a correctly authenticated request to a specified path, and returns response body or an error object. -func (c *openshiftClient) doRequest(ctx context.Context, method, path string, requestBody []byte) ([]byte, error) { - url := *c.baseURL - url.Path = path - var requestBodyReader io.Reader - if requestBody != nil { - logrus.Debugf("Will send body: %s", requestBody) - requestBodyReader = bytes.NewReader(requestBody) - } - req, err := http.NewRequest(method, url.String(), requestBodyReader) - if err != nil { - return nil, err - } - req = req.WithContext(ctx) - - if len(c.bearerToken) != 0 { - req.Header.Set("Authorization", "Bearer "+c.bearerToken) - } else if len(c.username) != 0 { - req.SetBasicAuth(c.username, c.password) - } - req.Header.Set("Accept", "application/json, */*") - req.Header.Set("User-Agent", fmt.Sprintf("skopeo/%s", version.Version)) - if requestBody != nil { - req.Header.Set("Content-Type", "application/json") - } - - logrus.Debugf("%s %s", method, url) - res, err := c.httpClient.Do(req) - if err != nil { - return nil, err - } - defer res.Body.Close() - body, err := ioutil.ReadAll(res.Body) - if err != nil { - return nil, err - } - logrus.Debugf("Got body: %s", body) - // FIXME: Just throwing this useful information away only to try to guess later... - logrus.Debugf("Got content-type: %s", res.Header.Get("Content-Type")) - - var status status - statusValid := false - if err := json.Unmarshal(body, &status); err == nil && len(status.Status) > 0 { - statusValid = true - } - - switch { - case res.StatusCode == http.StatusSwitchingProtocols: // FIXME?! No idea why this weird case exists in k8s.io/kubernetes/pkg/client/restclient. - if statusValid && status.Status != "Success" { - return nil, errors.New(status.Message) - } - case res.StatusCode >= http.StatusOK && res.StatusCode <= http.StatusPartialContent: - // OK. - default: - if statusValid { - return nil, errors.New(status.Message) - } - return nil, errors.Errorf("HTTP error: status code: %d, body: %s", res.StatusCode, string(body)) - } - - return body, nil -} - -// getImage loads the specified image object. -func (c *openshiftClient) getImage(ctx context.Context, imageStreamImageName string) (*image, error) { - // FIXME: validate components per validation.IsValidPathSegmentName? - path := fmt.Sprintf("/apis/image.openshift.io/v1/namespaces/%s/imagestreamimages/%s@%s", c.ref.namespace, c.ref.stream, imageStreamImageName) - body, err := c.doRequest(ctx, "GET", path, nil) - if err != nil { - return nil, err - } - // Note: This does absolutely no kind/version checking or conversions. - var isi imageStreamImage - if err := json.Unmarshal(body, &isi); err != nil { - return nil, err - } - return &isi.Image, nil -} - -// convertDockerImageReference takes an image API DockerImageReference value and returns a reference we can actually use; -// currently OpenShift stores the cluster-internal service IPs here, which are unusable from the outside. -func (c *openshiftClient) convertDockerImageReference(ref string) (string, error) { - parts := strings.SplitN(ref, "/", 2) - if len(parts) != 2 { - return "", errors.Errorf("Invalid format of docker reference %s: missing '/'", ref) - } - return reference.Domain(c.ref.dockerReference) + "/" + parts[1], nil -} - -type openshiftImageSource struct { - client *openshiftClient - // Values specific to this image - ctx *types.SystemContext - requestedManifestMIMETypes []string - // State - docker types.ImageSource // The Docker Registry endpoint, or nil if not resolved yet - imageStreamImageName string // Resolved image identifier, or "" if not known yet -} - -// newImageSource creates a new ImageSource for the specified reference, -// asking the backend to use a manifest from requestedManifestMIMETypes if possible. -// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes. -// The caller must call .Close() on the returned ImageSource. -func newImageSource(ctx *types.SystemContext, ref openshiftReference, requestedManifestMIMETypes []string) (types.ImageSource, error) { - client, err := newOpenshiftClient(ref) - if err != nil { - return nil, err - } - - return &openshiftImageSource{ - client: client, - ctx: ctx, - requestedManifestMIMETypes: requestedManifestMIMETypes, - }, nil -} - -// Reference returns the reference used to set up this source, _as specified by the user_ -// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. -func (s *openshiftImageSource) Reference() types.ImageReference { - return s.client.ref -} - -// Close removes resources associated with an initialized ImageSource, if any. -func (s *openshiftImageSource) Close() error { - if s.docker != nil { - err := s.docker.Close() - s.docker = nil - - return err - } - - return nil -} - -func (s *openshiftImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { - if err := s.ensureImageIsResolved(context.TODO()); err != nil { - return nil, "", err - } - return s.docker.GetTargetManifest(digest) -} - -// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). -// It may use a remote (= slow) service. -func (s *openshiftImageSource) GetManifest() ([]byte, string, error) { - if err := s.ensureImageIsResolved(context.TODO()); err != nil { - return nil, "", err - } - return s.docker.GetManifest() -} - -// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). -func (s *openshiftImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) { - if err := s.ensureImageIsResolved(context.TODO()); err != nil { - return nil, 0, err - } - return s.docker.GetBlob(info) -} - -func (s *openshiftImageSource) GetSignatures(ctx context.Context) ([][]byte, error) { - if err := s.ensureImageIsResolved(ctx); err != nil { - return nil, err - } - - image, err := s.client.getImage(ctx, s.imageStreamImageName) - if err != nil { - return nil, err - } - var sigs [][]byte - for _, sig := range image.Signatures { - if sig.Type == imageSignatureTypeAtomic { - sigs = append(sigs, sig.Content) - } - } - return sigs, nil -} - -// ensureImageIsResolved sets up s.docker and s.imageStreamImageName -func (s *openshiftImageSource) ensureImageIsResolved(ctx context.Context) error { - if s.docker != nil { - return nil - } - - // FIXME: validate components per validation.IsValidPathSegmentName? - path := fmt.Sprintf("/apis/image.openshift.io/v1/namespaces/%s/imagestreams/%s", s.client.ref.namespace, s.client.ref.stream) - body, err := s.client.doRequest(ctx, "GET", path, nil) - if err != nil { - return err - } - // Note: This does absolutely no kind/version checking or conversions. - var is imageStream - if err := json.Unmarshal(body, &is); err != nil { - return err - } - var te *tagEvent - for _, tag := range is.Status.Tags { - if tag.Tag != s.client.ref.dockerReference.Tag() { - continue - } - if len(tag.Items) > 0 { - te = &tag.Items[0] - break - } - } - if te == nil { - return errors.Errorf("No matching tag found") - } - logrus.Debugf("tag event %#v", te) - dockerRefString, err := s.client.convertDockerImageReference(te.DockerImageReference) - if err != nil { - return err - } - logrus.Debugf("Resolved reference %#v", dockerRefString) - dockerRef, err := docker.ParseReference("//" + dockerRefString) - if err != nil { - return err - } - d, err := dockerRef.NewImageSource(s.ctx, s.requestedManifestMIMETypes) - if err != nil { - return err - } - s.docker = d - s.imageStreamImageName = te.Image - return nil -} - -type openshiftImageDestination struct { - client *openshiftClient - docker types.ImageDestination // The Docker Registry endpoint - // State - imageStreamImageName string // "" if not yet known -} - -// newImageDestination creates a new ImageDestination for the specified reference. -func newImageDestination(ctx *types.SystemContext, ref openshiftReference) (types.ImageDestination, error) { - client, err := newOpenshiftClient(ref) - if err != nil { - return nil, err - } - - // FIXME: Should this always use a digest, not a tag? Uploading to Docker by tag requires the tag _inside_ the manifest to match, - // i.e. a single signed image cannot be available under multiple tags. But with types.ImageDestination, we don't know - // the manifest digest at this point. - dockerRefString := fmt.Sprintf("//%s/%s/%s:%s", reference.Domain(client.ref.dockerReference), client.ref.namespace, client.ref.stream, client.ref.dockerReference.Tag()) - dockerRef, err := docker.ParseReference(dockerRefString) - if err != nil { - return nil, err - } - docker, err := dockerRef.NewImageDestination(ctx) - if err != nil { - return nil, err - } - - return &openshiftImageDestination{ - client: client, - docker: docker, - }, nil -} - -// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, -// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. -func (d *openshiftImageDestination) Reference() types.ImageReference { - return d.client.ref -} - -// Close removes resources associated with an initialized ImageDestination, if any. -func (d *openshiftImageDestination) Close() error { - return d.docker.Close() -} - -func (d *openshiftImageDestination) SupportedManifestMIMETypes() []string { - return d.docker.SupportedManifestMIMETypes() -} - -// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. -// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. -func (d *openshiftImageDestination) SupportsSignatures() error { - return nil -} - -// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination. -func (d *openshiftImageDestination) ShouldCompressLayers() bool { - return true -} - -// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually -// uploaded to the image destination, true otherwise. -func (d *openshiftImageDestination) AcceptsForeignLayerURLs() bool { - return true -} - -// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. -func (d *openshiftImageDestination) MustMatchRuntimeOS() bool { - return false -} - -// PutBlob writes contents of stream and returns data representing the result (with all data filled in). -// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. -// inputInfo.Size is the expected length of stream, if known. -// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available -// to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *openshiftImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) { - return d.docker.PutBlob(stream, inputInfo) -} - -// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob. -// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned. -// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil); -// it returns a non-nil error only on an unexpected failure. -func (d *openshiftImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) { - return d.docker.HasBlob(info) -} - -func (d *openshiftImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) { - return d.docker.ReapplyBlob(info) -} - -// PutManifest writes manifest to the destination. -// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. -// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), -// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. -func (d *openshiftImageDestination) PutManifest(m []byte) error { - manifestDigest, err := manifest.Digest(m) - if err != nil { - return err - } - d.imageStreamImageName = manifestDigest.String() - - return d.docker.PutManifest(m) -} - -func (d *openshiftImageDestination) PutSignatures(signatures [][]byte) error { - if d.imageStreamImageName == "" { - return errors.Errorf("Internal error: Unknown manifest digest, can't add signatures") - } - // Because image signatures are a shared resource in Atomic Registry, the default upload - // always adds signatures. Eventually we should also allow removing signatures. - - if len(signatures) == 0 { - return nil // No need to even read the old state. - } - - image, err := d.client.getImage(context.TODO(), d.imageStreamImageName) - if err != nil { - return err - } - existingSigNames := map[string]struct{}{} - for _, sig := range image.Signatures { - existingSigNames[sig.objectMeta.Name] = struct{}{} - } - -sigExists: - for _, newSig := range signatures { - for _, existingSig := range image.Signatures { - if existingSig.Type == imageSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) { - continue sigExists - } - } - - // The API expect us to invent a new unique name. This is racy, but hopefully good enough. - var signatureName string - for { - randBytes := make([]byte, 16) - n, err := rand.Read(randBytes) - if err != nil || n != 16 { - return errors.Wrapf(err, "Error generating random signature len %d", n) - } - signatureName = fmt.Sprintf("%s@%032x", d.imageStreamImageName, randBytes) - if _, ok := existingSigNames[signatureName]; !ok { - break - } - } - // Note: This does absolutely no kind/version checking or conversions. - sig := imageSignature{ - typeMeta: typeMeta{ - Kind: "ImageSignature", - APIVersion: "v1", - }, - objectMeta: objectMeta{Name: signatureName}, - Type: imageSignatureTypeAtomic, - Content: newSig, - } - body, err := json.Marshal(sig) - _, err = d.client.doRequest(context.TODO(), "POST", "/apis/image.openshift.io/v1/imagesignatures", body) - if err != nil { - return err - } - } - - return nil -} - -// Commit marks the process of storing the image as successful and asks for the image to be persisted. -// WARNING: This does not have any transactional semantics: -// - Uploaded data MAY be visible to others before Commit() is called -// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) -func (d *openshiftImageDestination) Commit() error { - return d.docker.Commit() -} - -// These structs are subsets of github.com/openshift/origin/pkg/image/api/v1 and its dependencies. -type imageStream struct { - Status imageStreamStatus `json:"status,omitempty"` -} -type imageStreamStatus struct { - DockerImageRepository string `json:"dockerImageRepository"` - Tags []namedTagEventList `json:"tags,omitempty"` -} -type namedTagEventList struct { - Tag string `json:"tag"` - Items []tagEvent `json:"items"` -} -type tagEvent struct { - DockerImageReference string `json:"dockerImageReference"` - Image string `json:"image"` -} -type imageStreamImage struct { - Image image `json:"image"` -} -type image struct { - objectMeta `json:"metadata,omitempty"` - DockerImageReference string `json:"dockerImageReference,omitempty"` - // DockerImageMetadata runtime.RawExtension `json:"dockerImageMetadata,omitempty"` - DockerImageMetadataVersion string `json:"dockerImageMetadataVersion,omitempty"` - DockerImageManifest string `json:"dockerImageManifest,omitempty"` - // DockerImageLayers []ImageLayer `json:"dockerImageLayers"` - Signatures []imageSignature `json:"signatures,omitempty"` -} - -const imageSignatureTypeAtomic string = "atomic" - -type imageSignature struct { - typeMeta `json:",inline"` - objectMeta `json:"metadata,omitempty"` - Type string `json:"type"` - Content []byte `json:"content"` - // Conditions []SignatureCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` - // ImageIdentity string `json:"imageIdentity,omitempty"` - // SignedClaims map[string]string `json:"signedClaims,omitempty"` - // Created *unversioned.Time `json:"created,omitempty"` - // IssuedBy SignatureIssuer `json:"issuedBy,omitempty"` - // IssuedTo SignatureSubject `json:"issuedTo,omitempty"` -} -type typeMeta struct { - Kind string `json:"kind,omitempty"` - APIVersion string `json:"apiVersion,omitempty"` -} -type objectMeta struct { - Name string `json:"name,omitempty"` - GenerateName string `json:"generateName,omitempty"` - Namespace string `json:"namespace,omitempty"` - SelfLink string `json:"selfLink,omitempty"` - ResourceVersion string `json:"resourceVersion,omitempty"` - Generation int64 `json:"generation,omitempty"` - DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty"` - Labels map[string]string `json:"labels,omitempty"` - Annotations map[string]string `json:"annotations,omitempty"` -} - -// A subset of k8s.io/kubernetes/pkg/api/unversioned/Status -type status struct { - Status string `json:"status,omitempty"` - Message string `json:"message,omitempty"` - // Reason StatusReason `json:"reason,omitempty"` - // Details *StatusDetails `json:"details,omitempty"` - Code int32 `json:"code,omitempty"` -} diff --git a/vendor/github.com/containers/image/openshift/openshift_transport.go b/vendor/github.com/containers/image/openshift/openshift_transport.go deleted file mode 100644 index 108e11023276..000000000000 --- a/vendor/github.com/containers/image/openshift/openshift_transport.go +++ /dev/null @@ -1,157 +0,0 @@ -package openshift - -import ( - "fmt" - "regexp" - "strings" - - "github.com/containers/image/docker/policyconfiguration" - "github.com/containers/image/docker/reference" - genericImage "github.com/containers/image/image" - "github.com/containers/image/transports" - "github.com/containers/image/types" - "github.com/pkg/errors" -) - -func init() { - transports.Register(Transport) -} - -// Transport is an ImageTransport for OpenShift registry-hosted images. -var Transport = openshiftTransport{} - -type openshiftTransport struct{} - -func (t openshiftTransport) Name() string { - return "atomic" -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. -func (t openshiftTransport) ParseReference(reference string) (types.ImageReference, error) { - return ParseReference(reference) -} - -// Note that imageNameRegexp is namespace/stream:tag, this -// is HOSTNAME/namespace/stream:tag or parent prefixes. -// Keep this in sync with imageNameRegexp! -var scopeRegexp = regexp.MustCompile("^[^/]*(/[^:/]*(/[^:/]*(:[^:/]*)?)?)?$") - -// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys -// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). -// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. -// scope passed to this function will not be "", that value is always allowed. -func (t openshiftTransport) ValidatePolicyConfigurationScope(scope string) error { - if scopeRegexp.FindStringIndex(scope) == nil { - return errors.Errorf("Invalid scope name %s", scope) - } - return nil -} - -// openshiftReference is an ImageReference for OpenShift images. -type openshiftReference struct { - dockerReference reference.NamedTagged - namespace string // Computed from dockerReference in advance. - stream string // Computed from dockerReference in advance. -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OpenShift ImageReference. -func ParseReference(ref string) (types.ImageReference, error) { - r, err := reference.ParseNormalizedNamed(ref) - if err != nil { - return nil, errors.Wrapf(err, "failed to parse image reference %q", ref) - } - tagged, ok := r.(reference.NamedTagged) - if !ok { - return nil, errors.Errorf("invalid image reference %s, expected format: 'hostname/namespace/stream:tag'", ref) - } - return NewReference(tagged) -} - -// NewReference returns an OpenShift reference for a reference.NamedTagged -func NewReference(dockerRef reference.NamedTagged) (types.ImageReference, error) { - r := strings.SplitN(reference.Path(dockerRef), "/", 3) - if len(r) != 2 { - return nil, errors.Errorf("invalid image reference: %s, expected format: 'hostname/namespace/stream:tag'", - reference.FamiliarString(dockerRef)) - } - return openshiftReference{ - namespace: r[0], - stream: r[1], - dockerReference: dockerRef, - }, nil -} - -func (ref openshiftReference) Transport() types.ImageTransport { - return Transport -} - -// StringWithinTransport returns a string representation of the reference, which MUST be such that -// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. -// NOTE: The returned string is not promised to be equal to the original input to ParseReference; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. -// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. -func (ref openshiftReference) StringWithinTransport() string { - return reference.FamiliarString(ref.dockerReference) -} - -// DockerReference returns a Docker reference associated with this reference -// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, -// not e.g. after redirect or alias processing), or nil if unknown/not applicable. -func (ref openshiftReference) DockerReference() reference.Named { - return ref.dockerReference -} - -// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. -// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; -// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical -// (i.e. various references with exactly the same semantics should return the same configuration identity) -// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but -// not required/guaranteed that it will be a valid input to Transport().ParseReference(). -// Returns "" if configuration identities for these references are not supported. -func (ref openshiftReference) PolicyConfigurationIdentity() string { - res, err := policyconfiguration.DockerReferenceIdentity(ref.dockerReference) - if res == "" || err != nil { // Coverage: Should never happen, NewReference constructs a valid tagged reference. - panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err)) - } - return res -} - -// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search -// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed -// in order, terminating on first match, and an implicit "" is always checked at the end. -// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), -// and each following element to be a prefix of the element preceding it. -func (ref openshiftReference) PolicyConfigurationNamespaces() []string { - return policyconfiguration.DockerReferenceNamespaces(ref.dockerReference) -} - -// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned Image. -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -func (ref openshiftReference) NewImage(ctx *types.SystemContext) (types.Image, error) { - src, err := newImageSource(ctx, ref, nil) - if err != nil { - return nil, err - } - return genericImage.FromSource(src) -} - -// NewImageSource returns a types.ImageSource for this reference, -// asking the backend to use a manifest from requestedManifestMIMETypes if possible. -// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes. -// The caller must call .Close() on the returned ImageSource. -func (ref openshiftReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) { - return newImageSource(ctx, ref, requestedManifestMIMETypes) -} - -// NewImageDestination returns a types.ImageDestination for this reference. -// The caller must call .Close() on the returned ImageDestination. -func (ref openshiftReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) { - return newImageDestination(ctx, ref) -} - -// DeleteImage deletes the named image from the registry, if supported. -func (ref openshiftReference) DeleteImage(ctx *types.SystemContext) error { - return errors.Errorf("Deleting images not implemented for atomic: images") -} diff --git a/vendor/github.com/containers/image/openshift/openshift_transport_test.go b/vendor/github.com/containers/image/openshift/openshift_transport_test.go deleted file mode 100644 index 5c589c192323..000000000000 --- a/vendor/github.com/containers/image/openshift/openshift_transport_test.go +++ /dev/null @@ -1,125 +0,0 @@ -package openshift - -import ( - "testing" - - "github.com/containers/image/docker/reference" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -const ( - sha256digestHex = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" - sha256digest = "@sha256:" + sha256digestHex -) - -func TestTransportName(t *testing.T) { - assert.Equal(t, "atomic", Transport.Name()) -} - -func TestTransportValidatePolicyConfigurationScope(t *testing.T) { - for _, scope := range []string{ - "registry.example.com/ns/stream" + sha256digest, - "registry.example.com/ns/stream:notlatest", - "registry.example.com/ns/stream", - "registry.example.com/ns", - "registry.example.com", - } { - err := Transport.ValidatePolicyConfigurationScope(scope) - assert.NoError(t, err, scope) - } - - for _, scope := range []string{ - "registry.example.com/too/deep/hierarchy", - "registry.example.com/ns/stream:tag1:tag2", - } { - err := Transport.ValidatePolicyConfigurationScope(scope) - assert.Error(t, err, scope) - } -} - -func TestNewReference(t *testing.T) { - // too many ns - r, err := reference.ParseNormalizedNamed("registry.example.com/ns1/ns2/ns3/stream:tag") - require.NoError(t, err) - tagged, ok := r.(reference.NamedTagged) - require.True(t, ok) - _, err = NewReference(tagged) - assert.Error(t, err) - - r, err = reference.ParseNormalizedNamed("registry.example.com/ns/stream:tag") - require.NoError(t, err) - tagged, ok = r.(reference.NamedTagged) - require.True(t, ok) - _, err = NewReference(tagged) - assert.NoError(t, err) -} - -func TestParseReference(t *testing.T) { - // Success - ref, err := ParseReference("registry.example.com:8443/ns/stream:notlatest") - require.NoError(t, err) - osRef, ok := ref.(openshiftReference) - require.True(t, ok) - assert.Equal(t, "ns", osRef.namespace) - assert.Equal(t, "stream", osRef.stream) - assert.Equal(t, "notlatest", osRef.dockerReference.Tag()) - assert.Equal(t, "registry.example.com:8443", reference.Domain(osRef.dockerReference)) - - // Components creating an invalid Docker Reference name - _, err = ParseReference("registry.example.com/ns/UPPERCASEISINVALID:notlatest") - assert.Error(t, err) - - _, err = ParseReference("registry.example.com/ns/stream:invalid!tag@value=") - assert.Error(t, err) -} - -func TestReferenceDockerReference(t *testing.T) { - ref, err := ParseReference("registry.example.com:8443/ns/stream:notlatest") - require.NoError(t, err) - dockerRef := ref.DockerReference() - require.NotNil(t, dockerRef) - assert.Equal(t, "registry.example.com:8443/ns/stream:notlatest", dockerRef.String()) -} - -func TestReferenceTransport(t *testing.T) { - ref, err := ParseReference("registry.example.com:8443/ns/stream:notlatest") - require.NoError(t, err) - assert.Equal(t, Transport, ref.Transport()) -} - -func TestReferenceStringWithinTransport(t *testing.T) { - ref, err := ParseReference("registry.example.com:8443/ns/stream:notlatest") - require.NoError(t, err) - assert.Equal(t, "registry.example.com:8443/ns/stream:notlatest", ref.StringWithinTransport()) - // We should do one more round to verify that the output can be parsed, to an equal value, - // but that is untested because it depends on per-user configuration. -} - -func TestReferencePolicyConfigurationIdentity(t *testing.T) { - // Just a smoke test, the substance is tested in policyconfiguration.TestDockerReference. - ref, err := ParseReference("registry.example.com:8443/ns/stream:notlatest") - require.NoError(t, err) - assert.Equal(t, "registry.example.com:8443/ns/stream:notlatest", ref.PolicyConfigurationIdentity()) -} - -func TestReferencePolicyConfigurationNamespaces(t *testing.T) { - // Just a smoke test, the substance is tested in policyconfiguration.TestDockerReference. - ref, err := ParseReference("registry.example.com:8443/ns/stream:notlatest") - require.NoError(t, err) - assert.Equal(t, []string{ - "registry.example.com:8443/ns/stream", - "registry.example.com:8443/ns", - "registry.example.com:8443", - }, ref.PolicyConfigurationNamespaces()) -} - -// openshiftReference.NewImage, openshiftReference.NewImageSource, openshiftReference.NewImageDestination untested because they depend -// on per-user configuration when initializing httpClient. - -func TestReferenceDeleteImage(t *testing.T) { - ref, err := ParseReference("registry.example.com:8443/ns/stream:notlatest") - require.NoError(t, err) - err = ref.DeleteImage(nil) - assert.Error(t, err) -} diff --git a/vendor/github.com/containers/image/origin.sha b/vendor/github.com/containers/image/origin.sha deleted file mode 100644 index 484e13fdaf46..000000000000 --- a/vendor/github.com/containers/image/origin.sha +++ /dev/null @@ -1 +0,0 @@ -49fb41961e01e7c08a46129bda531148288a9df2 diff --git a/vendor/github.com/containers/image/ostree/ostree_dest.go b/vendor/github.com/containers/image/ostree/ostree_dest.go deleted file mode 100644 index c056b83bb7c0..000000000000 --- a/vendor/github.com/containers/image/ostree/ostree_dest.go +++ /dev/null @@ -1,323 +0,0 @@ -package ostree - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "strconv" - "strings" - "time" - - "github.com/containers/image/manifest" - "github.com/containers/image/types" - "github.com/containers/storage/pkg/archive" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - - "github.com/ostreedev/ostree-go/pkg/otbuiltin" -) - -type blobToImport struct { - Size int64 - Digest digest.Digest - BlobPath string -} - -type descriptor struct { - Size int64 `json:"size"` - Digest digest.Digest `json:"digest"` -} - -type manifestSchema struct { - ConfigDescriptor descriptor `json:"config"` - LayersDescriptors []descriptor `json:"layers"` -} - -type ostreeImageDestination struct { - ref ostreeReference - manifest string - schema manifestSchema - tmpDirPath string - blobs map[string]*blobToImport -} - -// newImageDestination returns an ImageDestination for writing to an existing ostree. -func newImageDestination(ref ostreeReference, tmpDirPath string) (types.ImageDestination, error) { - tmpDirPath = filepath.Join(tmpDirPath, ref.branchName) - if err := ensureDirectoryExists(tmpDirPath); err != nil { - return nil, err - } - return &ostreeImageDestination{ref, "", manifestSchema{}, tmpDirPath, map[string]*blobToImport{}}, nil -} - -// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, -// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. -func (d *ostreeImageDestination) Reference() types.ImageReference { - return d.ref -} - -// Close removes resources associated with an initialized ImageDestination, if any. -func (d *ostreeImageDestination) Close() error { - return os.RemoveAll(d.tmpDirPath) -} - -func (d *ostreeImageDestination) SupportedManifestMIMETypes() []string { - return []string{ - manifest.DockerV2Schema2MediaType, - } -} - -// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. -// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. -func (d *ostreeImageDestination) SupportsSignatures() error { - return nil -} - -// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination. -func (d *ostreeImageDestination) ShouldCompressLayers() bool { - return false -} - -// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually -// uploaded to the image destination, true otherwise. -func (d *ostreeImageDestination) AcceptsForeignLayerURLs() bool { - return false -} - -// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. -func (d *ostreeImageDestination) MustMatchRuntimeOS() bool { - return true -} - -func (d *ostreeImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) { - tmpDir, err := ioutil.TempDir(d.tmpDirPath, "blob") - if err != nil { - return types.BlobInfo{}, err - } - - blobPath := filepath.Join(tmpDir, "content") - blobFile, err := os.Create(blobPath) - if err != nil { - return types.BlobInfo{}, err - } - defer blobFile.Close() - - digester := digest.Canonical.Digester() - tee := io.TeeReader(stream, digester.Hash()) - - size, err := io.Copy(blobFile, tee) - if err != nil { - return types.BlobInfo{}, err - } - computedDigest := digester.Digest() - if inputInfo.Size != -1 && size != inputInfo.Size { - return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size) - } - if err := blobFile.Sync(); err != nil { - return types.BlobInfo{}, err - } - - hash := computedDigest.Hex() - d.blobs[hash] = &blobToImport{Size: size, Digest: computedDigest, BlobPath: blobPath} - return types.BlobInfo{Digest: computedDigest, Size: size}, nil -} - -func fixFiles(dir string, usermode bool) error { - entries, err := ioutil.ReadDir(dir) - if err != nil { - return err - } - - for _, info := range entries { - fullpath := filepath.Join(dir, info.Name()) - if info.Mode()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 { - if err := os.Remove(fullpath); err != nil { - return err - } - continue - } - if info.IsDir() { - if usermode { - if err := os.Chmod(fullpath, info.Mode()|0700); err != nil { - return err - } - } - err = fixFiles(fullpath, usermode) - if err != nil { - return err - } - } else if usermode && (info.Mode().IsRegular() || (info.Mode()&os.ModeSymlink) != 0) { - if err := os.Chmod(fullpath, info.Mode()|0600); err != nil { - return err - } - } - } - - return nil -} - -func (d *ostreeImageDestination) ostreeCommit(repo *otbuiltin.Repo, branch string, root string, metadata []string) error { - opts := otbuiltin.NewCommitOptions() - opts.AddMetadataString = metadata - opts.Timestamp = time.Now() - // OCI layers have no parent OSTree commit - opts.Parent = "0000000000000000000000000000000000000000000000000000000000000000" - _, err := repo.Commit(root, branch, opts) - return err -} - -func (d *ostreeImageDestination) importBlob(repo *otbuiltin.Repo, blob *blobToImport) error { - ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex()) - destinationPath := filepath.Join(d.tmpDirPath, blob.Digest.Hex(), "root") - if err := ensureDirectoryExists(destinationPath); err != nil { - return err - } - defer func() { - os.Remove(blob.BlobPath) - os.RemoveAll(destinationPath) - }() - - if os.Getuid() == 0 { - if err := archive.UntarPath(blob.BlobPath, destinationPath); err != nil { - return err - } - if err := fixFiles(destinationPath, false); err != nil { - return err - } - } else { - os.MkdirAll(destinationPath, 0755) - if err := exec.Command("tar", "-C", destinationPath, "--no-same-owner", "--no-same-permissions", "--delay-directory-restore", "-xf", blob.BlobPath).Run(); err != nil { - return err - } - - if err := fixFiles(destinationPath, true); err != nil { - return err - } - } - return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size)}) -} - -func (d *ostreeImageDestination) importConfig(blob *blobToImport) error { - ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex()) - - return exec.Command("ostree", "commit", - "--repo", d.ref.repo, - fmt.Sprintf("--add-metadata-string=docker.size=%d", blob.Size), - "--branch", ostreeBranch, filepath.Dir(blob.BlobPath)).Run() -} - -func (d *ostreeImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) { - branch := fmt.Sprintf("ociimage/%s", info.Digest.Hex()) - output, err := exec.Command("ostree", "show", "--repo", d.ref.repo, "--print-metadata-key=docker.size", branch).CombinedOutput() - if err != nil { - if bytes.Index(output, []byte("not found")) >= 0 || bytes.Index(output, []byte("No such")) >= 0 { - return false, -1, nil - } - return false, -1, err - } - size, err := strconv.ParseInt(strings.Trim(string(output), "'\n"), 10, 64) - if err != nil { - return false, -1, err - } - - return true, size, nil -} - -func (d *ostreeImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) { - return info, nil -} - -// PutManifest writes manifest to the destination. -// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. -// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), -// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. -func (d *ostreeImageDestination) PutManifest(manifest []byte) error { - d.manifest = string(manifest) - - if err := json.Unmarshal(manifest, &d.schema); err != nil { - return err - } - - manifestPath := filepath.Join(d.tmpDirPath, d.ref.manifestPath()) - if err := ensureParentDirectoryExists(manifestPath); err != nil { - return err - } - - return ioutil.WriteFile(manifestPath, manifest, 0644) -} - -func (d *ostreeImageDestination) PutSignatures(signatures [][]byte) error { - path := filepath.Join(d.tmpDirPath, d.ref.signaturePath(0)) - if err := ensureParentDirectoryExists(path); err != nil { - return err - } - - for i, sig := range signatures { - signaturePath := filepath.Join(d.tmpDirPath, d.ref.signaturePath(i)) - if err := ioutil.WriteFile(signaturePath, sig, 0644); err != nil { - return err - } - } - return nil -} - -func (d *ostreeImageDestination) Commit() error { - repo, err := otbuiltin.OpenRepo(d.ref.repo) - if err != nil { - return err - } - - _, err = repo.PrepareTransaction() - if err != nil { - return err - } - - for _, layer := range d.schema.LayersDescriptors { - hash := layer.Digest.Hex() - blob := d.blobs[hash] - // if the blob is not present in d.blobs then it is already stored in OSTree, - // and we don't need to import it. - if blob == nil { - continue - } - err := d.importBlob(repo, blob) - if err != nil { - return err - } - } - - hash := d.schema.ConfigDescriptor.Digest.Hex() - blob := d.blobs[hash] - if blob != nil { - err := d.importConfig(blob) - if err != nil { - return err - } - } - - manifestPath := filepath.Join(d.tmpDirPath, "manifest") - - metadata := []string{fmt.Sprintf("docker.manifest=%s", string(d.manifest))} - err = d.ostreeCommit(repo, fmt.Sprintf("ociimage/%s", d.ref.branchName), manifestPath, metadata) - - _, err = repo.CommitTransaction() - return err -} - -func ensureDirectoryExists(path string) error { - if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { - if err := os.MkdirAll(path, 0755); err != nil { - return err - } - } - return nil -} - -func ensureParentDirectoryExists(path string) error { - return ensureDirectoryExists(filepath.Dir(path)) -} diff --git a/vendor/github.com/containers/image/ostree/ostree_transport.go b/vendor/github.com/containers/image/ostree/ostree_transport.go deleted file mode 100644 index f165b13f0202..000000000000 --- a/vendor/github.com/containers/image/ostree/ostree_transport.go +++ /dev/null @@ -1,235 +0,0 @@ -package ostree - -import ( - "bytes" - "fmt" - "os" - "path/filepath" - "regexp" - "strings" - - "github.com/pkg/errors" - - "github.com/containers/image/directory/explicitfilepath" - "github.com/containers/image/docker/reference" - "github.com/containers/image/transports" - "github.com/containers/image/types" -) - -const defaultOSTreeRepo = "/ostree/repo" - -// Transport is an ImageTransport for ostree paths. -var Transport = ostreeTransport{} - -type ostreeTransport struct{} - -func (t ostreeTransport) Name() string { - return "ostree" -} - -func init() { - transports.Register(Transport) -} - -// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys -// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). -// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. -// scope passed to this function will not be "", that value is always allowed. -func (t ostreeTransport) ValidatePolicyConfigurationScope(scope string) error { - sep := strings.Index(scope, ":") - if sep < 0 { - return errors.Errorf("Invalid ostree: scope %s: Must include a repo", scope) - } - repo := scope[:sep] - - if !strings.HasPrefix(repo, "/") { - return errors.Errorf("Invalid ostree: scope %s: repository must be an absolute path", scope) - } - cleaned := filepath.Clean(repo) - if cleaned != repo { - return errors.Errorf(`Invalid ostree: scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned) - } - - // FIXME? In the namespaces within a repo, - // we could be verifying the various character set and length restrictions - // from docker/distribution/reference.regexp.go, but other than that there - // are few semantically invalid strings. - return nil -} - -// ostreeReference is an ImageReference for ostree paths. -type ostreeReference struct { - image string - branchName string - repo string -} - -func (t ostreeTransport) ParseReference(ref string) (types.ImageReference, error) { - var repo = "" - var image = "" - s := strings.SplitN(ref, "@/", 2) - if len(s) == 1 { - image, repo = s[0], defaultOSTreeRepo - } else { - image, repo = s[0], "/"+s[1] - } - - return NewReference(image, repo) -} - -// NewReference returns an OSTree reference for a specified repo and image. -func NewReference(image string, repo string) (types.ImageReference, error) { - // image is not _really_ in a containers/image/docker/reference format; - // as far as the libOSTree ociimage/* namespace is concerned, it is more or - // less an arbitrary string with an implied tag. - // We use the reference.* parsers basically for the default tag name in - // reference.TagNameOnly, and incidentally for some character set and length - // restrictions. - var ostreeImage reference.Named - s := strings.SplitN(image, ":", 2) - - named, err := reference.WithName(s[0]) - if err != nil { - return nil, err - } - - if len(s) == 1 { - ostreeImage = reference.TagNameOnly(named) - } else { - ostreeImage, err = reference.WithTag(named, s[1]) - if err != nil { - return nil, err - } - } - - resolved, err := explicitfilepath.ResolvePathToFullyExplicit(repo) - if err != nil { - // With os.IsNotExist(err), the parent directory of repo is also not existent; - // that should ordinarily not happen, but it would be a bit weird to reject - // references which do not specify a repo just because the implicit defaultOSTreeRepo - // does not exist. - if os.IsNotExist(err) && repo == defaultOSTreeRepo { - resolved = repo - } else { - return nil, err - } - } - // This is necessary to prevent directory paths returned by PolicyConfigurationNamespaces - // from being ambiguous with values of PolicyConfigurationIdentity. - if strings.Contains(resolved, ":") { - return nil, errors.Errorf("Invalid OSTreeCI reference %s@%s: path %s contains a colon", image, repo, resolved) - } - - return ostreeReference{ - image: ostreeImage.String(), - branchName: encodeOStreeRef(ostreeImage.String()), - repo: resolved, - }, nil -} - -func (ref ostreeReference) Transport() types.ImageTransport { - return Transport -} - -// StringWithinTransport returns a string representation of the reference, which MUST be such that -// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. -// NOTE: The returned string is not promised to be equal to the original input to ParseReference; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. -// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. -func (ref ostreeReference) StringWithinTransport() string { - return fmt.Sprintf("%s@%s", ref.image, ref.repo) -} - -// DockerReference returns a Docker reference associated with this reference -// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, -// not e.g. after redirect or alias processing), or nil if unknown/not applicable. -func (ref ostreeReference) DockerReference() reference.Named { - return nil -} - -func (ref ostreeReference) PolicyConfigurationIdentity() string { - return fmt.Sprintf("%s:%s", ref.repo, ref.image) -} - -// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search -// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed -// in order, terminating on first match, and an implicit "" is always checked at the end. -// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), -// and each following element to be a prefix of the element preceding it. -func (ref ostreeReference) PolicyConfigurationNamespaces() []string { - s := strings.SplitN(ref.image, ":", 2) - if len(s) != 2 { // Coverage: Should never happen, NewReference above ensures ref.image has a :tag. - panic(fmt.Sprintf("Internal inconsistency: ref.image value %q does not have a :tag", ref.image)) - } - name := s[0] - res := []string{} - for { - res = append(res, fmt.Sprintf("%s:%s", ref.repo, name)) - - lastSlash := strings.LastIndex(name, "/") - if lastSlash == -1 { - break - } - name = name[:lastSlash] - } - return res -} - -// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned Image. -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -func (ref ostreeReference) NewImage(ctx *types.SystemContext) (types.Image, error) { - return nil, errors.New("Reading ostree: images is currently not supported") -} - -// NewImageSource returns a types.ImageSource for this reference, -// asking the backend to use a manifest from requestedManifestMIMETypes if possible. -// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes. -// The caller must call .Close() on the returned ImageSource. -func (ref ostreeReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) { - return nil, errors.New("Reading ostree: images is currently not supported") -} - -// NewImageDestination returns a types.ImageDestination for this reference. -// The caller must call .Close() on the returned ImageDestination. -func (ref ostreeReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) { - var tmpDir string - if ctx == nil || ctx.OSTreeTmpDirPath == "" { - tmpDir = os.TempDir() - } else { - tmpDir = ctx.OSTreeTmpDirPath - } - return newImageDestination(ref, tmpDir) -} - -// DeleteImage deletes the named image from the registry, if supported. -func (ref ostreeReference) DeleteImage(ctx *types.SystemContext) error { - return errors.Errorf("Deleting images not implemented for ostree: images") -} - -var ostreeRefRegexp = regexp.MustCompile(`^[A-Za-z0-9.-]$`) - -func encodeOStreeRef(in string) string { - var buffer bytes.Buffer - for i := range in { - sub := in[i : i+1] - if ostreeRefRegexp.MatchString(sub) { - buffer.WriteString(sub) - } else { - buffer.WriteString(fmt.Sprintf("_%02X", sub[0])) - } - - } - return buffer.String() -} - -// manifestPath returns a path for the manifest within a ostree using our conventions. -func (ref ostreeReference) manifestPath() string { - return filepath.Join("manifest", "manifest.json") -} - -// signaturePath returns a path for a signature within a ostree using our conventions. -func (ref ostreeReference) signaturePath(index int) string { - return filepath.Join("manifest", fmt.Sprintf("signature-%d", index+1)) -} diff --git a/vendor/github.com/containers/image/ostree/ostree_transport_test.go b/vendor/github.com/containers/image/ostree/ostree_transport_test.go deleted file mode 100644 index 2fae742a4116..000000000000 --- a/vendor/github.com/containers/image/ostree/ostree_transport_test.go +++ /dev/null @@ -1,316 +0,0 @@ -package ostree - -import ( - "fmt" - "io/ioutil" - "os" - "strings" - "testing" - - "path/filepath" - - "github.com/containers/image/types" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -const ( - sha256digestHex = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" - sha256digest = "@sha256:" + sha256digestHex -) - -func TestTransportName(t *testing.T) { - assert.Equal(t, "ostree", Transport.Name()) -} - -// A helper to replace $TMP in a repo path with a real temporary directory -func withTmpDir(repo string, tmpDir string) string { - return strings.Replace(repo, "$TMP", tmpDir, -1) -} - -// A common list of repo suffixes to test for the various ImageReference methods. -var repoSuffixes = []struct{ repoSuffix, resolvedRepo string }{ - {"", "/ostree/repo"}, - {"@/ostree/repo", "/ostree/repo"}, // /ostree/repo is accepted even if neither /ostree/repo nor /ostree exists, as a special case. - {"@$TMP/at@sign@repo", "$TMP/at@sign@repo"}, - // Rejected as ambiguous: /repo:with:colons could either be an (/repo, with:colons) policy configuration identity, or a (/repo:with, colons) policy configuration namespace. - {"@$TMP/repo:with:colons", ""}, -} - -// A common list of cases for image name parsing and normalization -var imageNameTestcases = []struct{ input, normalized, branchName string }{ - {"busybox:notlatest", "busybox:notlatest", "busybox_3Anotlatest"}, // Explicit tag - {"busybox", "busybox:latest", "busybox_3Alatest"}, // Default tag - {"docker.io/library/busybox:latest", "docker.io/library/busybox:latest", "docker.io_2Flibrary_2Fbusybox_3Alatest"}, // A hierarchical name - {"UPPERCASEISINVALID", "", ""}, // Invalid input - {"busybox" + sha256digest, "", ""}, // Digested references are not supported (parsed as invalid repository name) - {"busybox:invalid+tag", "", ""}, // Invalid tag value - {"busybox:tag:with:colons", "", ""}, // Multiple colons - treated as a tag which contains a colon, which is invalid - {"", "", ""}, // Empty input is rejected (invalid repository.Named) -} - -func TestTransportParseReference(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "ostreeParseReference") - require.NoError(t, err) - defer os.RemoveAll(tmpDir) - - for _, c := range imageNameTestcases { - for _, suffix := range repoSuffixes { - fullInput := c.input + withTmpDir(suffix.repoSuffix, tmpDir) - ref, err := Transport.ParseReference(fullInput) - if c.normalized == "" || suffix.resolvedRepo == "" { - assert.Error(t, err, fullInput) - } else { - require.NoError(t, err, fullInput) - ostreeRef, ok := ref.(ostreeReference) - require.True(t, ok, fullInput) - assert.Equal(t, c.normalized, ostreeRef.image, fullInput) - assert.Equal(t, c.branchName, ostreeRef.branchName, fullInput) - assert.Equal(t, withTmpDir(suffix.resolvedRepo, tmpDir), ostreeRef.repo, fullInput) - } - } - } -} - -func TestTransportValidatePolicyConfigurationScope(t *testing.T) { - for _, scope := range []string{ - "/etc:docker.io/library/busybox:notlatest", // This also demonstrates that two colons are interpreted as repo:name:tag. - "/etc:docker.io/library/busybox", - "/etc:docker.io/library", - "/etc:docker.io", - "/etc:repo", - "/this/does/not/exist:notlatest", - } { - err := Transport.ValidatePolicyConfigurationScope(scope) - assert.NoError(t, err, scope) - } - - for _, scope := range []string{ - "/colon missing as a path-reference delimiter", - "relative/path:busybox", - "/double//slashes:busybox", - "/has/./dot:busybox", - "/has/dot/../dot:busybox", - "/trailing/slash/:busybox", - } { - err := Transport.ValidatePolicyConfigurationScope(scope) - assert.Error(t, err, scope) - } -} - -func TestNewReference(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "ostreeNewReference") - require.NoError(t, err) - defer os.RemoveAll(tmpDir) - - for _, c := range imageNameTestcases { - for _, suffix := range repoSuffixes { - if suffix.repoSuffix == "" { - continue - } - caseName := c.input + suffix.repoSuffix - ref, err := NewReference(c.input, withTmpDir(strings.TrimPrefix(suffix.repoSuffix, "@"), tmpDir)) - if c.normalized == "" || suffix.resolvedRepo == "" { - assert.Error(t, err, caseName) - } else { - require.NoError(t, err, caseName) - ostreeRef, ok := ref.(ostreeReference) - require.True(t, ok, caseName) - assert.Equal(t, c.normalized, ostreeRef.image, caseName) - assert.Equal(t, c.branchName, ostreeRef.branchName, caseName) - assert.Equal(t, withTmpDir(suffix.resolvedRepo, tmpDir), ostreeRef.repo, caseName) - } - } - } - - for _, path := range []string{ - "/", - "/etc", - tmpDir, - "relativepath", - tmpDir + "/thisdoesnotexist", - } { - _, err := NewReference("busybox", path) - require.NoError(t, err, path) - } - - _, err = NewReference("busybox", tmpDir+"/thisparentdoesnotexist/something") - assert.Error(t, err) -} - -// A common list of reference formats to test for the various ImageReference methods. -var validReferenceTestCases = []struct{ input, stringWithinTransport, policyConfigurationIdentity string }{ - {"busybox", "busybox:latest@/ostree/repo", "/ostree/repo:busybox:latest"}, // Everything implied - {"busybox:latest@/ostree/repo", "busybox:latest@/ostree/repo", "/ostree/repo:busybox:latest"}, // All implied values explicitly specified - {"example.com/ns/foo:bar@$TMP/non-DEFAULT", "example.com/ns/foo:bar@$TMP/non-DEFAULT", "$TMP/non-DEFAULT:example.com/ns/foo:bar"}, // All values explicitly specified, a hierarchical name - // A non-canonical path. Testing just one, the various other cases are tested in explicitfilepath.ResolvePathToFullyExplicit. - {"busybox@$TMP/.", "busybox:latest@$TMP", "$TMP:busybox:latest"}, - // "/" as a corner case - {"busybox@/", "busybox:latest@/", "/:busybox:latest"}, -} - -func TestReferenceTransport(t *testing.T) { - ref, err := Transport.ParseReference("busybox") - require.NoError(t, err) - assert.Equal(t, Transport, ref.Transport()) -} - -func TestReferenceStringWithinTransport(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "ostreeStringWithinTransport") - require.NoError(t, err) - defer os.RemoveAll(tmpDir) - - for _, c := range validReferenceTestCases { - ref, err := Transport.ParseReference(withTmpDir(c.input, tmpDir)) - require.NoError(t, err, c.input) - stringRef := ref.StringWithinTransport() - assert.Equal(t, withTmpDir(c.stringWithinTransport, tmpDir), stringRef, c.input) - // Do one more round to verify that the output can be parsed, to an equal value. - ref2, err := Transport.ParseReference(stringRef) - require.NoError(t, err, c.input) - stringRef2 := ref2.StringWithinTransport() - assert.Equal(t, stringRef, stringRef2, c.input) - } -} - -func TestReferenceDockerReference(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "ostreeDockerReference") - require.NoError(t, err) - defer os.RemoveAll(tmpDir) - - for _, c := range validReferenceTestCases { - ref, err := Transport.ParseReference(withTmpDir(c.input, tmpDir)) - require.NoError(t, err, c.input) - dockerRef := ref.DockerReference() - assert.Nil(t, dockerRef, c.input) - } -} - -func TestReferencePolicyConfigurationIdentity(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "ostreePolicyConfigurationIdentity") - require.NoError(t, err) - defer os.RemoveAll(tmpDir) - - for _, c := range validReferenceTestCases { - ref, err := Transport.ParseReference(withTmpDir(c.input, tmpDir)) - require.NoError(t, err, c.input) - assert.Equal(t, withTmpDir(c.policyConfigurationIdentity, tmpDir), ref.PolicyConfigurationIdentity(), c.input) - } -} - -func TestReferencePolicyConfigurationNamespaces(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "ostreePolicyConfigurationNamespaces") - require.NoError(t, err) - defer os.RemoveAll(tmpDir) - - // Test both that DockerReferenceIdentity returns the expected value (fullName+suffix), - // and that DockerReferenceNamespaces starts with the expected value (fullName), i.e. that the two functions are - // consistent. - for inputName, expectedNS := range map[string][]string{ - "example.com/ns/repo": {"example.com/ns/repo", "example.com/ns", "example.com"}, - "example.com/repo": {"example.com/repo", "example.com"}, - "localhost/ns/repo": {"localhost/ns/repo", "localhost/ns", "localhost"}, - "localhost/repo": {"localhost/repo", "localhost"}, - "ns/repo": {"ns/repo", "ns"}, - "repo": {"repo"}, - } { - // Test with a known path which should exist. Test just one non-canonical - // path, the various other cases are tested in explicitfilepath.ResolvePathToFullyExplicit. - for _, repoInput := range []string{tmpDir, tmpDir + "/./."} { - fullName := inputName + ":notlatest" - ref, err := NewReference(fullName, repoInput) - require.NoError(t, err, fullName) - - identity := ref.PolicyConfigurationIdentity() - assert.Equal(t, tmpDir+":"+expectedNS[0]+":notlatest", identity, fullName) - - ns := ref.PolicyConfigurationNamespaces() - require.NotNil(t, ns, fullName) - require.Len(t, ns, len(expectedNS), fullName) - moreSpecific := identity - for i := range expectedNS { - assert.Equal(t, tmpDir+":"+expectedNS[i], ns[i], fmt.Sprintf("%s item %d", fullName, i)) - assert.True(t, strings.HasPrefix(moreSpecific, ns[i])) - moreSpecific = ns[i] - } - } - } -} - -func TestReferenceNewImage(t *testing.T) { - ref, err := Transport.ParseReference("busybox") - require.NoError(t, err) - _, err = ref.NewImage(nil) - assert.Error(t, err) -} - -func TestReferenceNewImageSource(t *testing.T) { - ref, err := Transport.ParseReference("busybox") - require.NoError(t, err) - _, err = ref.NewImageSource(nil, nil) - assert.Error(t, err) -} - -func TestReferenceNewImageDestination(t *testing.T) { - otherTmpDir, err := ioutil.TempDir("", "ostree-transport-test") - require.NoError(t, err) - defer os.RemoveAll(otherTmpDir) - - for _, c := range []struct { - ctx *types.SystemContext - tmpDir string - }{ - {nil, os.TempDir()}, - {&types.SystemContext{}, os.TempDir()}, - {&types.SystemContext{OSTreeTmpDirPath: otherTmpDir}, otherTmpDir}, - } { - ref, err := Transport.ParseReference("busybox") - require.NoError(t, err) - dest, err := ref.NewImageDestination(c.ctx) - require.NoError(t, err) - ostreeDest, ok := dest.(*ostreeImageDestination) - require.True(t, ok) - assert.Equal(t, c.tmpDir+"/busybox_3Alatest", ostreeDest.tmpDirPath) - defer dest.Close() - } -} - -func TestReferenceDeleteImage(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "ostreeDeleteImage") - require.NoError(t, err) - defer os.RemoveAll(tmpDir) - - ref, err := Transport.ParseReference(withTmpDir("busybox@$TMP/this-repo-does-not-exist", tmpDir)) - require.NoError(t, err) - err = ref.DeleteImage(nil) - assert.Error(t, err) -} - -func TestEncodeOSTreeRef(t *testing.T) { - // Just a smoke test - assert.Equal(t, "busybox_3Alatest", encodeOStreeRef("busybox:latest")) -} - -func TestReferenceManifestPath(t *testing.T) { - ref, err := Transport.ParseReference("busybox") - require.NoError(t, err) - ostreeRef, ok := ref.(ostreeReference) - require.True(t, ok) - assert.Equal(t, fmt.Sprintf("manifest%cmanifest.json", filepath.Separator), ostreeRef.manifestPath()) -} - -func TestReferenceSignaturePath(t *testing.T) { - ref, err := Transport.ParseReference("busybox") - require.NoError(t, err) - ostreeRef, ok := ref.(ostreeReference) - require.True(t, ok) - for _, c := range []struct { - input int - suffix string - }{ - {0, "-1"}, - {42, "-43"}, - } { - assert.Equal(t, fmt.Sprintf("manifest%csignature%s", filepath.Separator, c.suffix), ostreeRef.signaturePath(c.input), string(c.input)) - } -} diff --git a/vendor/github.com/containers/image/pkg/compression/compression.go b/vendor/github.com/containers/image/pkg/compression/compression.go deleted file mode 100644 index c19d962ee543..000000000000 --- a/vendor/github.com/containers/image/pkg/compression/compression.go +++ /dev/null @@ -1,67 +0,0 @@ -package compression - -import ( - "bytes" - "compress/bzip2" - "compress/gzip" - "io" - - "github.com/pkg/errors" - - "github.com/sirupsen/logrus" -) - -// DecompressorFunc returns the decompressed stream, given a compressed stream. -type DecompressorFunc func(io.Reader) (io.Reader, error) - -// GzipDecompressor is a DecompressorFunc for the gzip compression algorithm. -func GzipDecompressor(r io.Reader) (io.Reader, error) { - return gzip.NewReader(r) -} - -// Bzip2Decompressor is a DecompressorFunc for the bzip2 compression algorithm. -func Bzip2Decompressor(r io.Reader) (io.Reader, error) { - return bzip2.NewReader(r), nil -} - -// XzDecompressor is a DecompressorFunc for the xz compression algorithm. -func XzDecompressor(r io.Reader) (io.Reader, error) { - return nil, errors.New("Decompressing xz streams is not supported") -} - -// compressionAlgos is an internal implementation detail of DetectCompression -var compressionAlgos = map[string]struct { - prefix []byte - decompressor DecompressorFunc -}{ - "gzip": {[]byte{0x1F, 0x8B, 0x08}, GzipDecompressor}, // gzip (RFC 1952) - "bzip2": {[]byte{0x42, 0x5A, 0x68}, Bzip2Decompressor}, // bzip2 (decompress.c:BZ2_decompress) - "xz": {[]byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, XzDecompressor}, // xz (/usr/share/doc/xz/xz-file-format.txt) -} - -// DetectCompression returns a DecompressorFunc if the input is recognized as a compressed format, nil otherwise. -// Because it consumes the start of input, other consumers must use the returned io.Reader instead to also read from the beginning. -func DetectCompression(input io.Reader) (DecompressorFunc, io.Reader, error) { - buffer := [8]byte{} - - n, err := io.ReadAtLeast(input, buffer[:], len(buffer)) - if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { - // This is a “real” error. We could just ignore it this time, process the data we have, and hope that the source will report the same error again. - // Instead, fail immediately with the original error cause instead of a possibly secondary/misleading error returned later. - return nil, nil, err - } - - var decompressor DecompressorFunc - for name, algo := range compressionAlgos { - if bytes.HasPrefix(buffer[:n], algo.prefix) { - logrus.Debugf("Detected compression format %s", name) - decompressor = algo.decompressor - break - } - } - if decompressor == nil { - logrus.Debugf("No compression detected") - } - - return decompressor, io.MultiReader(bytes.NewReader(buffer[:n]), input), nil -} diff --git a/vendor/github.com/containers/image/pkg/compression/compression_test.go b/vendor/github.com/containers/image/pkg/compression/compression_test.go deleted file mode 100644 index 2dd429317a9f..000000000000 --- a/vendor/github.com/containers/image/pkg/compression/compression_test.go +++ /dev/null @@ -1,86 +0,0 @@ -package compression - -import ( - "bytes" - "io" - "io/ioutil" - "os" - "testing" - - "github.com/pkg/errors" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestDetectCompression(t *testing.T) { - cases := []struct { - filename string - unimplemented bool - }{ - {"fixtures/Hello.uncompressed", false}, - {"fixtures/Hello.gz", false}, - {"fixtures/Hello.bz2", false}, - {"fixtures/Hello.xz", true}, - } - - // The original stream is preserved. - for _, c := range cases { - originalContents, err := ioutil.ReadFile(c.filename) - require.NoError(t, err, c.filename) - - stream, err := os.Open(c.filename) - require.NoError(t, err, c.filename) - defer stream.Close() - - _, updatedStream, err := DetectCompression(stream) - require.NoError(t, err, c.filename) - - updatedContents, err := ioutil.ReadAll(updatedStream) - require.NoError(t, err, c.filename) - assert.Equal(t, originalContents, updatedContents, c.filename) - } - - // The correct decompressor is chosen, and the result is as expected. - for _, c := range cases { - stream, err := os.Open(c.filename) - require.NoError(t, err, c.filename) - defer stream.Close() - - decompressor, updatedStream, err := DetectCompression(stream) - require.NoError(t, err, c.filename) - - var uncompressedStream io.Reader - switch { - case decompressor == nil: - uncompressedStream = updatedStream - case c.unimplemented: - _, err := decompressor(updatedStream) - assert.Error(t, err) - continue - default: - s, err := decompressor(updatedStream) - require.NoError(t, err) - uncompressedStream = s - } - - uncompressedContents, err := ioutil.ReadAll(uncompressedStream) - require.NoError(t, err, c.filename) - assert.Equal(t, []byte("Hello"), uncompressedContents, c.filename) - } - - // Empty input is handled reasonably. - decompressor, updatedStream, err := DetectCompression(bytes.NewReader([]byte{})) - require.NoError(t, err) - assert.Nil(t, decompressor) - updatedContents, err := ioutil.ReadAll(updatedStream) - require.NoError(t, err) - assert.Equal(t, []byte{}, updatedContents) - - // Error reading input - reader, writer := io.Pipe() - defer reader.Close() - writer.CloseWithError(errors.New("Expected error reading input in DetectCompression")) - _, _, err = DetectCompression(reader) - assert.Error(t, err) -} diff --git a/vendor/github.com/containers/image/pkg/compression/fixtures/Hello.bz2 b/vendor/github.com/containers/image/pkg/compression/fixtures/Hello.bz2 deleted file mode 100644 index e822f5e5e9e1..000000000000 Binary files a/vendor/github.com/containers/image/pkg/compression/fixtures/Hello.bz2 and /dev/null differ diff --git a/vendor/github.com/containers/image/pkg/compression/fixtures/Hello.gz b/vendor/github.com/containers/image/pkg/compression/fixtures/Hello.gz deleted file mode 100644 index 22c895b7d178..000000000000 Binary files a/vendor/github.com/containers/image/pkg/compression/fixtures/Hello.gz and /dev/null differ diff --git a/vendor/github.com/containers/image/pkg/compression/fixtures/Hello.uncompressed b/vendor/github.com/containers/image/pkg/compression/fixtures/Hello.uncompressed deleted file mode 100644 index 5ab2f8a4323a..000000000000 --- a/vendor/github.com/containers/image/pkg/compression/fixtures/Hello.uncompressed +++ /dev/null @@ -1 +0,0 @@ -Hello \ No newline at end of file diff --git a/vendor/github.com/containers/image/pkg/compression/fixtures/Hello.xz b/vendor/github.com/containers/image/pkg/compression/fixtures/Hello.xz deleted file mode 100644 index 6e9b0b6648fb..000000000000 Binary files a/vendor/github.com/containers/image/pkg/compression/fixtures/Hello.xz and /dev/null differ diff --git a/vendor/github.com/containers/image/pkg/strslice/README.md b/vendor/github.com/containers/image/pkg/strslice/README.md deleted file mode 100644 index ae6097e82edf..000000000000 --- a/vendor/github.com/containers/image/pkg/strslice/README.md +++ /dev/null @@ -1 +0,0 @@ -This package was replicated from [github.com/docker/docker v17.04.0-ce](https://github.com/docker/docker/tree/v17.04.0-ce/api/types/strslice). diff --git a/vendor/github.com/containers/image/pkg/strslice/strslice.go b/vendor/github.com/containers/image/pkg/strslice/strslice.go deleted file mode 100644 index bad493fb89fd..000000000000 --- a/vendor/github.com/containers/image/pkg/strslice/strslice.go +++ /dev/null @@ -1,30 +0,0 @@ -package strslice - -import "encoding/json" - -// StrSlice represents a string or an array of strings. -// We need to override the json decoder to accept both options. -type StrSlice []string - -// UnmarshalJSON decodes the byte slice whether it's a string or an array of -// strings. This method is needed to implement json.Unmarshaler. -func (e *StrSlice) UnmarshalJSON(b []byte) error { - if len(b) == 0 { - // With no input, we preserve the existing value by returning nil and - // leaving the target alone. This allows defining default values for - // the type. - return nil - } - - p := make([]string, 0, 1) - if err := json.Unmarshal(b, &p); err != nil { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - p = append(p, s) - } - - *e = p - return nil -} diff --git a/vendor/github.com/containers/image/pkg/strslice/strslice_test.go b/vendor/github.com/containers/image/pkg/strslice/strslice_test.go deleted file mode 100644 index 1163b3652c98..000000000000 --- a/vendor/github.com/containers/image/pkg/strslice/strslice_test.go +++ /dev/null @@ -1,86 +0,0 @@ -package strslice - -import ( - "encoding/json" - "reflect" - "testing" -) - -func TestStrSliceMarshalJSON(t *testing.T) { - for _, testcase := range []struct { - input StrSlice - expected string - }{ - // MADNESS(stevvooe): No clue why nil would be "" but empty would be - // "null". Had to make a change here that may affect compatibility. - {input: nil, expected: "null"}, - {StrSlice{}, "[]"}, - {StrSlice{"/bin/sh", "-c", "echo"}, `["/bin/sh","-c","echo"]`}, - } { - data, err := json.Marshal(testcase.input) - if err != nil { - t.Fatal(err) - } - if string(data) != testcase.expected { - t.Fatalf("%#v: expected %v, got %v", testcase.input, testcase.expected, string(data)) - } - } -} - -func TestStrSliceUnmarshalJSON(t *testing.T) { - parts := map[string][]string{ - "": {"default", "values"}, - "[]": {}, - `["/bin/sh","-c","echo"]`: {"/bin/sh", "-c", "echo"}, - } - for json, expectedParts := range parts { - strs := StrSlice{"default", "values"} - if err := strs.UnmarshalJSON([]byte(json)); err != nil { - t.Fatal(err) - } - - actualParts := []string(strs) - if !reflect.DeepEqual(actualParts, expectedParts) { - t.Fatalf("%#v: expected %v, got %v", json, expectedParts, actualParts) - } - - } -} - -func TestStrSliceUnmarshalString(t *testing.T) { - var e StrSlice - echo, err := json.Marshal("echo") - if err != nil { - t.Fatal(err) - } - if err := json.Unmarshal(echo, &e); err != nil { - t.Fatal(err) - } - - if len(e) != 1 { - t.Fatalf("expected 1 element after unmarshal: %q", e) - } - - if e[0] != "echo" { - t.Fatalf("expected `echo`, got: %q", e[0]) - } -} - -func TestStrSliceUnmarshalSlice(t *testing.T) { - var e StrSlice - echo, err := json.Marshal([]string{"echo"}) - if err != nil { - t.Fatal(err) - } - if err := json.Unmarshal(echo, &e); err != nil { - t.Fatal(err) - } - - if len(e) != 1 { - t.Fatalf("expected 1 element after unmarshal: %q", e) - } - - if e[0] != "echo" { - t.Fatalf("expected `echo`, got: %q", e[0]) - } -} diff --git a/vendor/github.com/containers/image/signature/docker.go b/vendor/github.com/containers/image/signature/docker.go deleted file mode 100644 index 16eb3f799376..000000000000 --- a/vendor/github.com/containers/image/signature/docker.go +++ /dev/null @@ -1,65 +0,0 @@ -// Note: Consider the API unstable until the code supports at least three different image formats or transports. - -package signature - -import ( - "fmt" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/manifest" - "github.com/opencontainers/go-digest" -) - -// SignDockerManifest returns a signature for manifest as the specified dockerReference, -// using mech and keyIdentity. -func SignDockerManifest(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string) ([]byte, error) { - manifestDigest, err := manifest.Digest(m) - if err != nil { - return nil, err - } - sig := newUntrustedSignature(manifestDigest, dockerReference) - return sig.sign(mech, keyIdentity) -} - -// VerifyDockerManifestSignature checks that unverifiedSignature uses expectedKeyIdentity to sign unverifiedManifest as expectedDockerReference, -// using mech. -func VerifyDockerManifestSignature(unverifiedSignature, unverifiedManifest []byte, - expectedDockerReference string, mech SigningMechanism, expectedKeyIdentity string) (*Signature, error) { - expectedRef, err := reference.ParseNormalizedNamed(expectedDockerReference) - if err != nil { - return nil, err - } - sig, err := verifyAndExtractSignature(mech, unverifiedSignature, signatureAcceptanceRules{ - validateKeyIdentity: func(keyIdentity string) error { - if keyIdentity != expectedKeyIdentity { - return InvalidSignatureError{msg: fmt.Sprintf("Signature by %s does not match expected fingerprint %s", keyIdentity, expectedKeyIdentity)} - } - return nil - }, - validateSignedDockerReference: func(signedDockerReference string) error { - signedRef, err := reference.ParseNormalizedNamed(signedDockerReference) - if err != nil { - return InvalidSignatureError{msg: fmt.Sprintf("Invalid docker reference %s in signature", signedDockerReference)} - } - if signedRef.String() != expectedRef.String() { - return InvalidSignatureError{msg: fmt.Sprintf("Docker reference %s does not match %s", - signedDockerReference, expectedDockerReference)} - } - return nil - }, - validateSignedDockerManifestDigest: func(signedDockerManifestDigest digest.Digest) error { - matches, err := manifest.MatchesDigest(unverifiedManifest, signedDockerManifestDigest) - if err != nil { - return err - } - if !matches { - return InvalidSignatureError{msg: fmt.Sprintf("Signature for docker digest %q does not match", signedDockerManifestDigest)} - } - return nil - }, - }) - if err != nil { - return nil, err - } - return sig, nil -} diff --git a/vendor/github.com/containers/image/signature/docker_test.go b/vendor/github.com/containers/image/signature/docker_test.go deleted file mode 100644 index 3776c8f386a0..000000000000 --- a/vendor/github.com/containers/image/signature/docker_test.go +++ /dev/null @@ -1,108 +0,0 @@ -package signature - -import ( - "io/ioutil" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestSignDockerManifest(t *testing.T) { - mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory) - require.NoError(t, err) - defer mech.Close() - - if err := mech.SupportsSigning(); err != nil { - t.Skipf("Signing not supported: %v", err) - } - - manifest, err := ioutil.ReadFile("fixtures/image.manifest.json") - require.NoError(t, err) - - // Successful signing - signature, err := SignDockerManifest(manifest, TestImageSignatureReference, mech, TestKeyFingerprint) - require.NoError(t, err) - - verified, err := VerifyDockerManifestSignature(signature, manifest, TestImageSignatureReference, mech, TestKeyFingerprint) - assert.NoError(t, err) - assert.Equal(t, TestImageSignatureReference, verified.DockerReference) - assert.Equal(t, TestImageManifestDigest, verified.DockerManifestDigest) - - // Error computing Docker manifest - invalidManifest, err := ioutil.ReadFile("fixtures/v2s1-invalid-signatures.manifest.json") - require.NoError(t, err) - _, err = SignDockerManifest(invalidManifest, TestImageSignatureReference, mech, TestKeyFingerprint) - assert.Error(t, err) - - // Error creating blob to sign - _, err = SignDockerManifest(manifest, "", mech, TestKeyFingerprint) - assert.Error(t, err) - - // Error signing - _, err = SignDockerManifest(manifest, TestImageSignatureReference, mech, "this fingerprint doesn't exist") - assert.Error(t, err) -} - -func TestVerifyDockerManifestSignature(t *testing.T) { - mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory) - require.NoError(t, err) - defer mech.Close() - manifest, err := ioutil.ReadFile("fixtures/image.manifest.json") - require.NoError(t, err) - signature, err := ioutil.ReadFile("fixtures/image.signature") - require.NoError(t, err) - - // Successful verification - sig, err := VerifyDockerManifestSignature(signature, manifest, TestImageSignatureReference, mech, TestKeyFingerprint) - require.NoError(t, err) - assert.Equal(t, TestImageSignatureReference, sig.DockerReference) - assert.Equal(t, TestImageManifestDigest, sig.DockerManifestDigest) - - // Verification using a different canonicalization of TestImageSignatureReference - sig, err = VerifyDockerManifestSignature(signature, manifest, "docker.io/"+TestImageSignatureReference, mech, TestKeyFingerprint) - require.NoError(t, err) - assert.Equal(t, TestImageSignatureReference, sig.DockerReference) - assert.Equal(t, TestImageManifestDigest, sig.DockerManifestDigest) - - // For extra paranoia, test that we return nil data on error. - - // Invalid docker reference on input - sig, err = VerifyDockerManifestSignature(signature, manifest, "UPPERCASEISINVALID", mech, TestKeyFingerprint) - assert.Error(t, err) - assert.Nil(t, sig) - - // Error computing Docker manifest - invalidManifest, err := ioutil.ReadFile("fixtures/v2s1-invalid-signatures.manifest.json") - require.NoError(t, err) - sig, err = VerifyDockerManifestSignature(signature, invalidManifest, TestImageSignatureReference, mech, TestKeyFingerprint) - assert.Error(t, err) - assert.Nil(t, sig) - - // Error verifying signature - corruptSignature, err := ioutil.ReadFile("fixtures/corrupt.signature") - sig, err = VerifyDockerManifestSignature(corruptSignature, manifest, TestImageSignatureReference, mech, TestKeyFingerprint) - assert.Error(t, err) - assert.Nil(t, sig) - - // Key fingerprint mismatch - sig, err = VerifyDockerManifestSignature(signature, manifest, TestImageSignatureReference, mech, "unexpected fingerprint") - assert.Error(t, err) - assert.Nil(t, sig) - - // Invalid reference in the signature - invalidReferenceSignature, err := ioutil.ReadFile("fixtures/invalid-reference.signature") - sig, err = VerifyDockerManifestSignature(invalidReferenceSignature, manifest, TestImageSignatureReference, mech, TestKeyFingerprint) - assert.Error(t, err) - assert.Nil(t, sig) - - // Docker reference mismatch - sig, err = VerifyDockerManifestSignature(signature, manifest, "example.com/doesnt/match", mech, TestKeyFingerprint) - assert.Error(t, err) - assert.Nil(t, sig) - - // Docker manifest digest mismatch - sig, err = VerifyDockerManifestSignature(signature, []byte("unexpected manifest"), TestImageSignatureReference, mech, TestKeyFingerprint) - assert.Error(t, err) - assert.Nil(t, sig) -} diff --git a/vendor/github.com/containers/image/signature/fixtures/.gitignore b/vendor/github.com/containers/image/signature/fixtures/.gitignore deleted file mode 100644 index 2772b97f7dfb..000000000000 --- a/vendor/github.com/containers/image/signature/fixtures/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -/*.gpg~ -/.gpg-v21-migrated -/private-keys-v1.d -/random_seed -/gnupg_spawn_agent_sentinel.lock -/.#* diff --git a/vendor/github.com/containers/image/signature/fixtures/corrupt.signature b/vendor/github.com/containers/image/signature/fixtures/corrupt.signature deleted file mode 100644 index 95c29087125f..000000000000 Binary files a/vendor/github.com/containers/image/signature/fixtures/corrupt.signature and /dev/null differ diff --git a/vendor/github.com/containers/image/signature/fixtures/corrupt.signature-v3 b/vendor/github.com/containers/image/signature/fixtures/corrupt.signature-v3 deleted file mode 100644 index 51f986b961d2..000000000000 Binary files a/vendor/github.com/containers/image/signature/fixtures/corrupt.signature-v3 and /dev/null differ diff --git a/vendor/github.com/containers/image/signature/fixtures/dir-img-manifest-digest-error/manifest.json b/vendor/github.com/containers/image/signature/fixtures/dir-img-manifest-digest-error/manifest.json deleted file mode 120000 index 3dee14b4a8d1..000000000000 --- a/vendor/github.com/containers/image/signature/fixtures/dir-img-manifest-digest-error/manifest.json +++ /dev/null @@ -1 +0,0 @@ -../v2s1-invalid-signatures.manifest.json \ No newline at end of file diff --git a/vendor/github.com/containers/image/signature/fixtures/dir-img-manifest-digest-error/signature-1 b/vendor/github.com/containers/image/signature/fixtures/dir-img-manifest-digest-error/signature-1 deleted file mode 120000 index f010fd4c41e6..000000000000 --- a/vendor/github.com/containers/image/signature/fixtures/dir-img-manifest-digest-error/signature-1 +++ /dev/null @@ -1 +0,0 @@ -../dir-img-valid/signature-1 \ No newline at end of file diff --git a/vendor/github.com/containers/image/signature/fixtures/dir-img-mixed/manifest.json b/vendor/github.com/containers/image/signature/fixtures/dir-img-mixed/manifest.json deleted file mode 120000 index ff7d2ffadff4..000000000000 --- a/vendor/github.com/containers/image/signature/fixtures/dir-img-mixed/manifest.json +++ /dev/null @@ -1 +0,0 @@ -../dir-img-valid/manifest.json \ No newline at end of file diff --git a/vendor/github.com/containers/image/signature/fixtures/dir-img-mixed/signature-1 b/vendor/github.com/containers/image/signature/fixtures/dir-img-mixed/signature-1 deleted file mode 120000 index b27cdc458530..000000000000 --- a/vendor/github.com/containers/image/signature/fixtures/dir-img-mixed/signature-1 +++ /dev/null @@ -1 +0,0 @@ -../invalid-blob.signature \ No newline at end of file diff --git a/vendor/github.com/containers/image/signature/fixtures/dir-img-mixed/signature-2 b/vendor/github.com/containers/image/signature/fixtures/dir-img-mixed/signature-2 deleted file mode 120000 index f010fd4c41e6..000000000000 --- a/vendor/github.com/containers/image/signature/fixtures/dir-img-mixed/signature-2 +++ /dev/null @@ -1 +0,0 @@ -../dir-img-valid/signature-1 \ No newline at end of file diff --git a/vendor/github.com/containers/image/signature/fixtures/dir-img-modified-manifest/manifest.json b/vendor/github.com/containers/image/signature/fixtures/dir-img-modified-manifest/manifest.json deleted file mode 100644 index 82fde3811eac..000000000000 --- a/vendor/github.com/containers/image/signature/fixtures/dir-img-modified-manifest/manifest.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "schemaVersion": 2, - "mediaType": "application/vnd.docker.distribution.manifest.v2+json", - "config": { - "mediaType": "application/vnd.docker.container.image.v1+json", - "size": 7023, - "digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7" - }, - "layers": [ - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "size": 32654, - "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f" - }, - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "size": 16724, - "digest": "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b" - }, - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "size": 73109, - "digest": "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736" - } - ], - "extra": "this manifest has been modified" -} diff --git a/vendor/github.com/containers/image/signature/fixtures/dir-img-modified-manifest/signature-1 b/vendor/github.com/containers/image/signature/fixtures/dir-img-modified-manifest/signature-1 deleted file mode 120000 index f010fd4c41e6..000000000000 --- a/vendor/github.com/containers/image/signature/fixtures/dir-img-modified-manifest/signature-1 +++ /dev/null @@ -1 +0,0 @@ -../dir-img-valid/signature-1 \ No newline at end of file diff --git a/vendor/github.com/containers/image/signature/fixtures/dir-img-no-manifest/signature-1 b/vendor/github.com/containers/image/signature/fixtures/dir-img-no-manifest/signature-1 deleted file mode 120000 index f010fd4c41e6..000000000000 --- a/vendor/github.com/containers/image/signature/fixtures/dir-img-no-manifest/signature-1 +++ /dev/null @@ -1 +0,0 @@ -../dir-img-valid/signature-1 \ No newline at end of file diff --git a/vendor/github.com/containers/image/signature/fixtures/dir-img-unsigned/manifest.json b/vendor/github.com/containers/image/signature/fixtures/dir-img-unsigned/manifest.json deleted file mode 120000 index ff7d2ffadff4..000000000000 --- a/vendor/github.com/containers/image/signature/fixtures/dir-img-unsigned/manifest.json +++ /dev/null @@ -1 +0,0 @@ -../dir-img-valid/manifest.json \ No newline at end of file diff --git a/vendor/github.com/containers/image/signature/fixtures/dir-img-valid-2/manifest.json b/vendor/github.com/containers/image/signature/fixtures/dir-img-valid-2/manifest.json deleted file mode 120000 index ff7d2ffadff4..000000000000 --- a/vendor/github.com/containers/image/signature/fixtures/dir-img-valid-2/manifest.json +++ /dev/null @@ -1 +0,0 @@ -../dir-img-valid/manifest.json \ No newline at end of file diff --git a/vendor/github.com/containers/image/signature/fixtures/dir-img-valid-2/signature-1 b/vendor/github.com/containers/image/signature/fixtures/dir-img-valid-2/signature-1 deleted file mode 120000 index f010fd4c41e6..000000000000 --- a/vendor/github.com/containers/image/signature/fixtures/dir-img-valid-2/signature-1 +++ /dev/null @@ -1 +0,0 @@ -../dir-img-valid/signature-1 \ No newline at end of file diff --git a/vendor/github.com/containers/image/signature/fixtures/dir-img-valid-2/signature-2 b/vendor/github.com/containers/image/signature/fixtures/dir-img-valid-2/signature-2 deleted file mode 100644 index dbba8f422811..000000000000 Binary files a/vendor/github.com/containers/image/signature/fixtures/dir-img-valid-2/signature-2 and /dev/null differ diff --git a/vendor/github.com/containers/image/signature/fixtures/dir-img-valid/manifest.json b/vendor/github.com/containers/image/signature/fixtures/dir-img-valid/manifest.json deleted file mode 120000 index c5bd25431f85..000000000000 --- a/vendor/github.com/containers/image/signature/fixtures/dir-img-valid/manifest.json +++ /dev/null @@ -1 +0,0 @@ -../image.manifest.json \ No newline at end of file diff --git a/vendor/github.com/containers/image/signature/fixtures/dir-img-valid/signature-1 b/vendor/github.com/containers/image/signature/fixtures/dir-img-valid/signature-1 deleted file mode 100644 index d0e18720d982..000000000000 Binary files a/vendor/github.com/containers/image/signature/fixtures/dir-img-valid/signature-1 and /dev/null differ diff --git a/vendor/github.com/containers/image/signature/fixtures/double.signature b/vendor/github.com/containers/image/signature/fixtures/double.signature deleted file mode 100644 index 76b17e2a1b88..000000000000 Binary files a/vendor/github.com/containers/image/signature/fixtures/double.signature and /dev/null differ diff --git a/vendor/github.com/containers/image/signature/fixtures/expired.signature b/vendor/github.com/containers/image/signature/fixtures/expired.signature deleted file mode 100644 index c609c37927a2..000000000000 Binary files a/vendor/github.com/containers/image/signature/fixtures/expired.signature and /dev/null differ diff --git a/vendor/github.com/containers/image/signature/fixtures/image.manifest.json b/vendor/github.com/containers/image/signature/fixtures/image.manifest.json deleted file mode 100644 index 198da23f9264..000000000000 --- a/vendor/github.com/containers/image/signature/fixtures/image.manifest.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "schemaVersion": 2, - "mediaType": "application/vnd.docker.distribution.manifest.v2+json", - "config": { - "mediaType": "application/vnd.docker.container.image.v1+json", - "size": 7023, - "digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7" - }, - "layers": [ - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "size": 32654, - "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f" - }, - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "size": 16724, - "digest": "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b" - }, - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "size": 73109, - "digest": "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736" - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/containers/image/signature/fixtures/image.signature b/vendor/github.com/containers/image/signature/fixtures/image.signature deleted file mode 100644 index f89272127575..000000000000 Binary files a/vendor/github.com/containers/image/signature/fixtures/image.signature and /dev/null differ diff --git a/vendor/github.com/containers/image/signature/fixtures/invalid-blob.signature b/vendor/github.com/containers/image/signature/fixtures/invalid-blob.signature deleted file mode 100644 index c8db18cac28c..000000000000 Binary files a/vendor/github.com/containers/image/signature/fixtures/invalid-blob.signature and /dev/null differ diff --git a/vendor/github.com/containers/image/signature/fixtures/invalid-blob.signature-v3 b/vendor/github.com/containers/image/signature/fixtures/invalid-blob.signature-v3 deleted file mode 100644 index 246bdd9a8e5b..000000000000 Binary files a/vendor/github.com/containers/image/signature/fixtures/invalid-blob.signature-v3 and /dev/null differ diff --git a/vendor/github.com/containers/image/signature/fixtures/invalid-reference.signature b/vendor/github.com/containers/image/signature/fixtures/invalid-reference.signature deleted file mode 100644 index 0236598301ed..000000000000 Binary files a/vendor/github.com/containers/image/signature/fixtures/invalid-reference.signature and /dev/null differ diff --git a/vendor/github.com/containers/image/signature/fixtures/no-optional-fields.signature b/vendor/github.com/containers/image/signature/fixtures/no-optional-fields.signature deleted file mode 100644 index 482ae3acf0d8..000000000000 Binary files a/vendor/github.com/containers/image/signature/fixtures/no-optional-fields.signature and /dev/null differ diff --git a/vendor/github.com/containers/image/signature/fixtures/policy.json b/vendor/github.com/containers/image/signature/fixtures/policy.json deleted file mode 100644 index 8e39e5c62670..000000000000 --- a/vendor/github.com/containers/image/signature/fixtures/policy.json +++ /dev/null @@ -1,96 +0,0 @@ -{ - "default": [ - { - "type": "reject" - } - ], - "transports": { - "dir": { - "": [ - { - "type": "insecureAcceptAnything" - } - ] - }, - "docker": { - "example.com/playground": [ - { - "type": "insecureAcceptAnything" - } - ], - "example.com/production": [ - { - "type": "signedBy", - "keyType": "GPGKeys", - "keyPath": "/keys/employee-gpg-keyring" - } - ], - "example.com/hardened": [ - { - "type": "signedBy", - "keyType": "GPGKeys", - "keyPath": "/keys/employee-gpg-keyring", - "signedIdentity": { - "type": "matchRepository" - } - }, - { - "type": "signedBy", - "keyType": "signedByGPGKeys", - "keyPath": "/keys/public-key-signing-gpg-keyring", - "signedIdentity": { - "type": "matchExact" - } - }, - { - "type": "signedBaseLayer", - "baseLayerIdentity": { - "type": "exactRepository", - "dockerRepository": "registry.access.redhat.com/rhel7/rhel" - } - } - ], - "example.com/hardened-x509": [ - { - "type": "signedBy", - "keyType": "X509Certificates", - "keyPath": "/keys/employee-cert-file", - "signedIdentity": { - "type": "matchRepository" - } - }, - { - "type": "signedBy", - "keyType": "signedByX509CAs", - "keyPath": "/keys/public-key-signing-ca-file" - } - ], - "registry.access.redhat.com": [ - { - "type": "signedBy", - "keyType": "signedByGPGKeys", - "keyPath": "/keys/RH-key-signing-key-gpg-keyring", - "signedIdentity": { - "type": "matchRepoDigestOrExact" - } - } - ], - "bogus/key-data-example": [ - { - "type": "signedBy", - "keyType": "signedByGPGKeys", - "keyData": "bm9uc2Vuc2U=" - } - ], - "bogus/signed-identity-example": [ - { - "type": "signedBaseLayer", - "baseLayerIdentity": { - "type": "exactReference", - "dockerReference": "registry.access.redhat.com/rhel7/rhel:latest" - } - } - ] - } - } -} \ No newline at end of file diff --git a/vendor/github.com/containers/image/signature/fixtures/public-key.gpg b/vendor/github.com/containers/image/signature/fixtures/public-key.gpg deleted file mode 100644 index 46901d58dbb2..000000000000 --- a/vendor/github.com/containers/image/signature/fixtures/public-key.gpg +++ /dev/null @@ -1,19 +0,0 @@ ------BEGIN PGP PUBLIC KEY BLOCK----- -Version: GnuPG v1 - -mI0EVurzqQEEAL3qkFq4K2URtSWVDYnQUNA9HdM9sqS2eAWfqUFMrkD5f+oN+LBL -tPyaE5GNLA0vXY7nHAM2TeM8ijZ/eMP17Raj64JL8GhCymL3wn2jNvb9XaF0R0s6 -H0IaRPPu45A3SnxLwm4Orc/9Z7/UxtYjKSg9xOaTiVPzJgaf5Vm4J4ApABEBAAG0 -EnNrb3BlbyB0ZXN0aW5nIGtleYi4BBMBAgAiBQJW6vOpAhsDBgsJCAcDAgYVCAIJ -CgsEFgIDAQIeAQIXgAAKCRDbcvIYi7RsyBbOBACgJFiKDlQ1UyvsNmGqJ7D0OpbS -1OppJlradKgZXyfahFswhFI+7ZREvELLHbinq3dBy5cLXRWzQKdJZNHknSN5Tjf2 -0ipVBQuqpcBo+dnKiG4zH6fhTri7yeTZksIDfsqlI6FXDOdKLUSnahagEBn4yU+x -jHPvZk5SuuZv56A45biNBFbq86kBBADIC/9CsAlOmRALuYUmkhcqEjuFwn3wKz2d -IBjzgvro7zcVNNCgxQfMEjcUsvEh5cx13G3QQHcwOKy3M6Bv6VMhfZjd+1P1el4P -0fJS8GFmhWRBknMN8jFsgyohQeouQ798RFFv94KszfStNnr/ae8oao5URmoUXSCa -/MdUxn0YKwARAQABiJ8EGAECAAkFAlbq86kCGwwACgkQ23LyGIu0bMjUywQAq0dn -lUpDNSoLTcpNWuVvHQ7c/qmnE4TyiSLiRiAywdEWA6gMiyhUUucuGsEhMFP1WX1k -UNwArZ6UG7BDOUsvngP7jKGNqyUOQrq1s/r8D+0MrJGOWErGLlfttO2WeoijECkI -5qm8cXzAra3Xf/Z3VjxYTKSnNu37LtZkakdTdYE= -=tJAt ------END PGP PUBLIC KEY BLOCK----- diff --git a/vendor/github.com/containers/image/signature/fixtures/pubring.gpg b/vendor/github.com/containers/image/signature/fixtures/pubring.gpg deleted file mode 100644 index 2d922b42d882..000000000000 Binary files a/vendor/github.com/containers/image/signature/fixtures/pubring.gpg and /dev/null differ diff --git a/vendor/github.com/containers/image/signature/fixtures/secring.gpg b/vendor/github.com/containers/image/signature/fixtures/secring.gpg deleted file mode 100644 index 36cf0f7db274..000000000000 Binary files a/vendor/github.com/containers/image/signature/fixtures/secring.gpg and /dev/null differ diff --git a/vendor/github.com/containers/image/signature/fixtures/trustdb.gpg b/vendor/github.com/containers/image/signature/fixtures/trustdb.gpg deleted file mode 100644 index 5449174f87d1..000000000000 Binary files a/vendor/github.com/containers/image/signature/fixtures/trustdb.gpg and /dev/null differ diff --git a/vendor/github.com/containers/image/signature/fixtures/unknown-key.signature b/vendor/github.com/containers/image/signature/fixtures/unknown-key.signature deleted file mode 100644 index 393ace4a92bc..000000000000 Binary files a/vendor/github.com/containers/image/signature/fixtures/unknown-key.signature and /dev/null differ diff --git a/vendor/github.com/containers/image/signature/fixtures/unknown-key.signature-v3 b/vendor/github.com/containers/image/signature/fixtures/unknown-key.signature-v3 deleted file mode 100644 index 67f429b0d82a..000000000000 Binary files a/vendor/github.com/containers/image/signature/fixtures/unknown-key.signature-v3 and /dev/null differ diff --git a/vendor/github.com/containers/image/signature/fixtures/unsigned-encrypted.signature b/vendor/github.com/containers/image/signature/fixtures/unsigned-encrypted.signature deleted file mode 100644 index 7da65dec261f..000000000000 Binary files a/vendor/github.com/containers/image/signature/fixtures/unsigned-encrypted.signature and /dev/null differ diff --git a/vendor/github.com/containers/image/signature/fixtures/unsigned-literal.signature b/vendor/github.com/containers/image/signature/fixtures/unsigned-literal.signature deleted file mode 100644 index 9b660cb214e3..000000000000 Binary files a/vendor/github.com/containers/image/signature/fixtures/unsigned-literal.signature and /dev/null differ diff --git a/vendor/github.com/containers/image/signature/fixtures/v2s1-invalid-signatures.manifest.json b/vendor/github.com/containers/image/signature/fixtures/v2s1-invalid-signatures.manifest.json deleted file mode 100644 index 8dfefd4e1b71..000000000000 --- a/vendor/github.com/containers/image/signature/fixtures/v2s1-invalid-signatures.manifest.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "schemaVersion": 1, - "name": "mitr/buxybox", - "tag": "latest", - "architecture": "amd64", - "fsLayers": [ - ], - "history": [ - ], - "signatures": 1 -} diff --git a/vendor/github.com/containers/image/signature/fixtures_info_test.go b/vendor/github.com/containers/image/signature/fixtures_info_test.go deleted file mode 100644 index a44a6b7364a1..000000000000 --- a/vendor/github.com/containers/image/signature/fixtures_info_test.go +++ /dev/null @@ -1,14 +0,0 @@ -package signature - -import "github.com/opencontainers/go-digest" - -const ( - // TestImageManifestDigest is the Docker manifest digest of "image.manifest.json" - TestImageManifestDigest = digest.Digest("sha256:20bf21ed457b390829cdbeec8795a7bea1626991fda603e0d01b4e7f60427e55") - // TestImageSignatureReference is the Docker image reference signed in "image.signature" - TestImageSignatureReference = "testing/manifest" - // TestKeyFingerprint is the fingerprint of the private key in this directory. - TestKeyFingerprint = "1D8230F6CDB6A06716E414C1DB72F2188BB46CC8" - // TestKeyShortID is the short ID of the private key in this directory. - TestKeyShortID = "DB72F2188BB46CC8" -) diff --git a/vendor/github.com/containers/image/signature/json.go b/vendor/github.com/containers/image/signature/json.go deleted file mode 100644 index 9e592863dae5..000000000000 --- a/vendor/github.com/containers/image/signature/json.go +++ /dev/null @@ -1,88 +0,0 @@ -package signature - -import ( - "bytes" - "encoding/json" - "fmt" - "io" -) - -// jsonFormatError is returned when JSON does not match expected format. -type jsonFormatError string - -func (err jsonFormatError) Error() string { - return string(err) -} - -// paranoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect -// (including duplicated keys, unrecognized keys, and non-matching types). Uses fieldResolver to -// determine the destination for a field value, which should return a pointer to the destination if valid, or nil if the key is rejected. -// -// The fieldResolver approach is useful for decoding the Policy.Transports map; using it for structs is a bit lazy, -// we could use reflection to automate this. Later? -func paranoidUnmarshalJSONObject(data []byte, fieldResolver func(string) interface{}) error { - seenKeys := map[string]struct{}{} - - dec := json.NewDecoder(bytes.NewReader(data)) - t, err := dec.Token() - if err != nil { - return jsonFormatError(err.Error()) - } - if t != json.Delim('{') { - return jsonFormatError(fmt.Sprintf("JSON object expected, got \"%s\"", t)) - } - for { - t, err := dec.Token() - if err != nil { - return jsonFormatError(err.Error()) - } - if t == json.Delim('}') { - break - } - - key, ok := t.(string) - if !ok { - // Coverage: This should never happen, dec.Token() rejects non-string-literals in this state. - return jsonFormatError(fmt.Sprintf("Key string literal expected, got \"%s\"", t)) - } - if _, ok := seenKeys[key]; ok { - return jsonFormatError(fmt.Sprintf("Duplicate key \"%s\"", key)) - } - seenKeys[key] = struct{}{} - - valuePtr := fieldResolver(key) - if valuePtr == nil { - return jsonFormatError(fmt.Sprintf("Unknown key \"%s\"", key)) - } - // This works like json.Unmarshal, in particular it allows us to implement UnmarshalJSON to implement strict parsing of the field value. - if err := dec.Decode(valuePtr); err != nil { - return jsonFormatError(err.Error()) - } - } - if _, err := dec.Token(); err != io.EOF { - return jsonFormatError("Unexpected data after JSON object") - } - return nil -} - -// paranoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect -// (including duplicated keys, unrecognized keys, and non-matching types). Each of the fields in exactFields -// must be present exactly once, and none other fields are accepted. -func paranoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]interface{}) error { - seenKeys := map[string]struct{}{} - if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { - if valuePtr, ok := exactFields[key]; ok { - seenKeys[key] = struct{}{} - return valuePtr - } - return nil - }); err != nil { - return err - } - for key := range exactFields { - if _, ok := seenKeys[key]; !ok { - return jsonFormatError(fmt.Sprintf(`Key "%s" missing in a JSON object`, key)) - } - } - return nil -} diff --git a/vendor/github.com/containers/image/signature/json_test.go b/vendor/github.com/containers/image/signature/json_test.go deleted file mode 100644 index 83fe2929896b..000000000000 --- a/vendor/github.com/containers/image/signature/json_test.go +++ /dev/null @@ -1,137 +0,0 @@ -package signature - -import ( - "encoding/json" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -type mSI map[string]interface{} // To minimize typing the long name - -// A short-hand way to get a JSON object field value or panic. No error handling done, we know -// what we are working with, a panic in a test is good enough, and fitting test cases on a single line -// is a priority. -func x(m mSI, fields ...string) mSI { - for _, field := range fields { - // Not .(mSI) because type assertion of an unnamed type to a named type always fails (the types - // are not "identical"), but the assignment is fine because they are "assignable". - m = m[field].(map[string]interface{}) - } - return m -} - -// implementsUnmarshalJSON is a minimalistic type used to detect that -// paranoidUnmarshalJSONObject uses the json.Unmarshaler interface of resolved -// pointers. -type implementsUnmarshalJSON bool - -// Compile-time check that Policy implements json.Unmarshaler. -var _ json.Unmarshaler = (*implementsUnmarshalJSON)(nil) - -func (dest *implementsUnmarshalJSON) UnmarshalJSON(data []byte) error { - _ = data // We don't care, not really. - *dest = true // Mark handler as called - return nil -} - -func TestParanoidUnmarshalJSONObject(t *testing.T) { - type testStruct struct { - A string - B int - } - ts := testStruct{} - var unmarshalJSONCalled implementsUnmarshalJSON - tsResolver := func(key string) interface{} { - switch key { - case "a": - return &ts.A - case "b": - return &ts.B - case "implementsUnmarshalJSON": - return &unmarshalJSONCalled - default: - return nil - } - } - - // Empty object - ts = testStruct{} - err := paranoidUnmarshalJSONObject([]byte(`{}`), tsResolver) - require.NoError(t, err) - assert.Equal(t, testStruct{}, ts) - - // Success - ts = testStruct{} - err = paranoidUnmarshalJSONObject([]byte(`{"a":"x", "b":2}`), tsResolver) - require.NoError(t, err) - assert.Equal(t, testStruct{A: "x", B: 2}, ts) - - // json.Unamarshaler is used for decoding values - ts = testStruct{} - unmarshalJSONCalled = implementsUnmarshalJSON(false) - err = paranoidUnmarshalJSONObject([]byte(`{"implementsUnmarshalJSON":true}`), tsResolver) - require.NoError(t, err) - assert.Equal(t, unmarshalJSONCalled, implementsUnmarshalJSON(true)) - - // Various kinds of invalid input - for _, input := range []string{ - ``, // Empty input - `&`, // Entirely invalid JSON - `1`, // Not an object - `{&}`, // Invalid key JSON - `{1:1}`, // Key not a string - `{"b":1, "b":1}`, // Duplicate key - `{"thisdoesnotexist":1}`, // Key rejected by resolver - `{"a":&}`, // Invalid value JSON - `{"a":1}`, // Type mismatch - `{"a":"value"}{}`, // Extra data after object - } { - ts = testStruct{} - err := paranoidUnmarshalJSONObject([]byte(input), tsResolver) - assert.Error(t, err, input) - } -} - -func TestParanoidUnmarshalJSONObjectExactFields(t *testing.T) { - var stringValue string - var float64Value float64 - var rawValue json.RawMessage - var unmarshallCalled implementsUnmarshalJSON - exactFields := map[string]interface{}{ - "string": &stringValue, - "float64": &float64Value, - "raw": &rawValue, - "unmarshaller": &unmarshallCalled, - } - - // Empty object - err := paranoidUnmarshalJSONObjectExactFields([]byte(`{}`), map[string]interface{}{}) - require.NoError(t, err) - - // Success - err = paranoidUnmarshalJSONObjectExactFields([]byte(`{"string": "a", "float64": 3.5, "raw": {"a":"b"}, "unmarshaller": true}`), exactFields) - require.NoError(t, err) - assert.Equal(t, "a", stringValue) - assert.Equal(t, 3.5, float64Value) - assert.Equal(t, json.RawMessage(`{"a":"b"}`), rawValue) - assert.Equal(t, implementsUnmarshalJSON(true), unmarshallCalled) - - // Various kinds of invalid input - for _, input := range []string{ - ``, // Empty input - `&`, // Entirely invalid JSON - `1`, // Not an object - `{&}`, // Invalid key JSON - `{1:1}`, // Key not a string - `{"string": "a", "string": "a", "float64": 3.5, "raw": {"a":"b"}, "unmarshaller": true}`, // Duplicate key - `{"string": "a", "float64": 3.5, "raw": {"a":"b"}, "unmarshaller": true, "thisisunknown", 1}`, // Unknown key - `{"string": &, "float64": 3.5, "raw": {"a":"b"}, "unmarshaller": true}`, // Invalid value JSON - `{"string": 1, "float64": 3.5, "raw": {"a":"b"}, "unmarshaller": true}`, // Type mismatch - `{"string": "a", "float64": 3.5, "raw": {"a":"b"}, "unmarshaller": true}{}`, // Extra data after object - } { - err := paranoidUnmarshalJSONObjectExactFields([]byte(input), exactFields) - assert.Error(t, err, input) - } -} diff --git a/vendor/github.com/containers/image/signature/mechanism.go b/vendor/github.com/containers/image/signature/mechanism.go deleted file mode 100644 index bdf26c531ffb..000000000000 --- a/vendor/github.com/containers/image/signature/mechanism.go +++ /dev/null @@ -1,85 +0,0 @@ -// Note: Consider the API unstable until the code supports at least three different image formats or transports. - -package signature - -import ( - "bytes" - "errors" - "fmt" - "io/ioutil" - "strings" - - "golang.org/x/crypto/openpgp" -) - -// SigningMechanism abstracts a way to sign binary blobs and verify their signatures. -// Each mechanism should eventually be closed by calling Close(). -// FIXME: Eventually expand on keyIdentity (namespace them between mechanisms to -// eliminate ambiguities, support CA signatures and perhaps other key properties) -type SigningMechanism interface { - // Close removes resources associated with the mechanism, if any. - Close() error - // SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError. - SupportsSigning() error - // Sign creates a (non-detached) signature of input using keyIdentity. - // Fails with a SigningNotSupportedError if the mechanism does not support signing. - Sign(input []byte, keyIdentity string) ([]byte, error) - // Verify parses unverifiedSignature and returns the content and the signer's identity - Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) - // UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, - // along with a short identifier of the key used for signing. - // WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys) - // is NOT the same as a "key identity" used in other calls ot this interface, and - // the values may have no recognizable relationship if the public key is not available. - UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) -} - -// SigningNotSupportedError is returned when trying to sign using a mechanism which does not support that. -type SigningNotSupportedError string - -func (err SigningNotSupportedError) Error() string { - return string(err) -} - -// NewGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism for the user’s default -// GPG configuration ($GNUPGHOME / ~/.gnupg) -// The caller must call .Close() on the returned SigningMechanism. -func NewGPGSigningMechanism() (SigningMechanism, error) { - return newGPGSigningMechanismInDirectory("") -} - -// NewEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which -// recognizes _only_ public keys from the supplied blob, and returns the identities -// of these keys. -// The caller must call .Close() on the returned SigningMechanism. -func NewEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) { - return newEphemeralGPGSigningMechanism(blob) -} - -// gpgUntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, -// along with a short identifier of the key used for signing. -// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys) -// is NOT the same as a "key identity" used in other calls ot this interface, and -// the values may have no recognizable relationship if the public key is not available. -func gpgUntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) { - // This uses the Golang-native OpenPGP implementation instead of gpgme because we are not doing any cryptography. - md, err := openpgp.ReadMessage(bytes.NewReader(untrustedSignature), openpgp.EntityList{}, nil, nil) - if err != nil { - return nil, "", err - } - if !md.IsSigned { - return nil, "", errors.New("The input is not a signature") - } - content, err := ioutil.ReadAll(md.UnverifiedBody) - if err != nil { - // Coverage: An error during reading the body can happen only if - // 1) the message is encrypted, which is not our case (and we don’t give ReadMessage the key - // to decrypt the contents anyway), or - // 2) the message is signed AND we give ReadMessage a correspnding public key, which we don’t. - return nil, "", err - } - - // Uppercase the key ID for minimal consistency with the gpgme-returned fingerprints - // (but note that key ID is a suffix of the fingerprint only for V4 keys, not V3)! - return content, strings.ToUpper(fmt.Sprintf("%016X", md.SignedByKeyId)), nil -} diff --git a/vendor/github.com/containers/image/signature/mechanism_gpgme.go b/vendor/github.com/containers/image/signature/mechanism_gpgme.go deleted file mode 100644 index cf71d3ed73a8..000000000000 --- a/vendor/github.com/containers/image/signature/mechanism_gpgme.go +++ /dev/null @@ -1,175 +0,0 @@ -// +build !containers_image_openpgp,linux,cgo - -package signature - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - - "github.com/mtrmac/gpgme" -) - -// A GPG/OpenPGP signing mechanism, implemented using gpgme. -type gpgmeSigningMechanism struct { - ctx *gpgme.Context - ephemeralDir string // If not "", a directory to be removed on Close() -} - -// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty. -// The caller must call .Close() on the returned SigningMechanism. -func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, error) { - ctx, err := newGPGMEContext(optionalDir) - if err != nil { - return nil, err - } - return &gpgmeSigningMechanism{ - ctx: ctx, - ephemeralDir: "", - }, nil -} - -// newEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which -// recognizes _only_ public keys from the supplied blob, and returns the identities -// of these keys. -// The caller must call .Close() on the returned SigningMechanism. -func newEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) { - dir, err := ioutil.TempDir("", "containers-ephemeral-gpg-") - if err != nil { - return nil, nil, err - } - removeDir := true - defer func() { - if removeDir { - os.RemoveAll(dir) - } - }() - ctx, err := newGPGMEContext(dir) - if err != nil { - return nil, nil, err - } - mech := &gpgmeSigningMechanism{ - ctx: ctx, - ephemeralDir: dir, - } - keyIdentities, err := mech.importKeysFromBytes(blob) - if err != nil { - return nil, nil, err - } - - removeDir = false - return mech, keyIdentities, nil -} - -// newGPGMEContext returns a new *gpgme.Context, using optionalDir if not empty. -func newGPGMEContext(optionalDir string) (*gpgme.Context, error) { - ctx, err := gpgme.New() - if err != nil { - return nil, err - } - if err = ctx.SetProtocol(gpgme.ProtocolOpenPGP); err != nil { - return nil, err - } - if optionalDir != "" { - err := ctx.SetEngineInfo(gpgme.ProtocolOpenPGP, "", optionalDir) - if err != nil { - return nil, err - } - } - ctx.SetArmor(false) - ctx.SetTextMode(false) - return ctx, nil -} - -func (m *gpgmeSigningMechanism) Close() error { - if m.ephemeralDir != "" { - os.RemoveAll(m.ephemeralDir) // Ignore an error, if any - } - return nil -} - -// importKeysFromBytes imports public keys from the supplied blob and returns their identities. -// The blob is assumed to have an appropriate format (the caller is expected to know which one). -// NOTE: This may modify long-term state (e.g. key storage in a directory underlying the mechanism); -// but we do not make this public, it can only be used through newEphemeralGPGSigningMechanism. -func (m *gpgmeSigningMechanism) importKeysFromBytes(blob []byte) ([]string, error) { - inputData, err := gpgme.NewDataBytes(blob) - if err != nil { - return nil, err - } - res, err := m.ctx.Import(inputData) - if err != nil { - return nil, err - } - keyIdentities := []string{} - for _, i := range res.Imports { - if i.Result == nil { - keyIdentities = append(keyIdentities, i.Fingerprint) - } - } - return keyIdentities, nil -} - -// SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError. -func (m *gpgmeSigningMechanism) SupportsSigning() error { - return nil -} - -// Sign creates a (non-detached) signature of input using keyIdentity. -// Fails with a SigningNotSupportedError if the mechanism does not support signing. -func (m *gpgmeSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) { - key, err := m.ctx.GetKey(keyIdentity, true) - if err != nil { - return nil, err - } - inputData, err := gpgme.NewDataBytes(input) - if err != nil { - return nil, err - } - var sigBuffer bytes.Buffer - sigData, err := gpgme.NewDataWriter(&sigBuffer) - if err != nil { - return nil, err - } - if err = m.ctx.Sign([]*gpgme.Key{key}, inputData, sigData, gpgme.SigModeNormal); err != nil { - return nil, err - } - return sigBuffer.Bytes(), nil -} - -// Verify parses unverifiedSignature and returns the content and the signer's identity -func (m gpgmeSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) { - signedBuffer := bytes.Buffer{} - signedData, err := gpgme.NewDataWriter(&signedBuffer) - if err != nil { - return nil, "", err - } - unverifiedSignatureData, err := gpgme.NewDataBytes(unverifiedSignature) - if err != nil { - return nil, "", err - } - _, sigs, err := m.ctx.Verify(unverifiedSignatureData, nil, signedData) - if err != nil { - return nil, "", err - } - if len(sigs) != 1 { - return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Unexpected GPG signature count %d", len(sigs))} - } - sig := sigs[0] - // This is sig.Summary == gpgme.SigSumValid except for key trust, which we handle ourselves - if sig.Status != nil || sig.Validity == gpgme.ValidityNever || sig.ValidityReason != nil || sig.WrongKeyUsage { - // FIXME: Better error reporting eventually - return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Invalid GPG signature: %#v", sig)} - } - return signedBuffer.Bytes(), sig.Fingerprint, nil -} - -// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, -// along with a short identifier of the key used for signing. -// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys) -// is NOT the same as a "key identity" used in other calls ot this interface, and -// the values may have no recognizable relationship if the public key is not available. -func (m gpgmeSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) { - return gpgUntrustedSignatureContents(untrustedSignature) -} diff --git a/vendor/github.com/containers/image/signature/mechanism_gpgme_test.go b/vendor/github.com/containers/image/signature/mechanism_gpgme_test.go deleted file mode 100644 index 9f5dc31bb490..000000000000 --- a/vendor/github.com/containers/image/signature/mechanism_gpgme_test.go +++ /dev/null @@ -1,37 +0,0 @@ -// +build !containers_image_openpgp - -package signature - -import ( - "os" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestGPGMESigningMechanismClose(t *testing.T) { - // Closing an ephemeral mechanism removes the directory. - // (The non-ephemeral case is tested in the common TestGPGSigningMechanismClose) - mech, _, err := NewEphemeralGPGSigningMechanism([]byte{}) - require.NoError(t, err) - gpgMech, ok := mech.(*gpgmeSigningMechanism) - require.True(t, ok) - dir := gpgMech.ephemeralDir - assert.NotEmpty(t, dir) - _, err = os.Lstat(dir) - require.NoError(t, err) - err = mech.Close() - assert.NoError(t, err) - _, err = os.Lstat(dir) - require.Error(t, err) - assert.True(t, os.IsNotExist(err)) -} - -func TestGPGMESigningMechanismSupportsSigning(t *testing.T) { - mech, _, err := NewEphemeralGPGSigningMechanism([]byte{}) - require.NoError(t, err) - defer mech.Close() - err = mech.SupportsSigning() - assert.NoError(t, err) -} diff --git a/vendor/github.com/containers/image/signature/mechanism_openpgp.go b/vendor/github.com/containers/image/signature/mechanism_openpgp.go deleted file mode 100644 index 57b440767580..000000000000 --- a/vendor/github.com/containers/image/signature/mechanism_openpgp.go +++ /dev/null @@ -1,159 +0,0 @@ -// +build containers_image_openpgp !cgo !linux - -package signature - -import ( - "bytes" - "errors" - "fmt" - "io/ioutil" - "os" - "path" - "strings" - "time" - - "github.com/containers/storage/pkg/homedir" - "golang.org/x/crypto/openpgp" -) - -// A GPG/OpenPGP signing mechanism, implemented using x/crypto/openpgp. -type openpgpSigningMechanism struct { - keyring openpgp.EntityList -} - -// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty. -// The caller must call .Close() on the returned SigningMechanism. -func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, error) { - m := &openpgpSigningMechanism{ - keyring: openpgp.EntityList{}, - } - - gpgHome := optionalDir - if gpgHome == "" { - gpgHome = os.Getenv("GNUPGHOME") - if gpgHome == "" { - gpgHome = path.Join(homedir.Get(), ".gnupg") - } - } - - pubring, err := ioutil.ReadFile(path.Join(gpgHome, "pubring.gpg")) - if err != nil { - if !os.IsNotExist(err) { - return nil, err - } - } else { - _, err := m.importKeysFromBytes(pubring) - if err != nil { - return nil, err - } - } - return m, nil -} - -// newEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which -// recognizes _only_ public keys from the supplied blob, and returns the identities -// of these keys. -// The caller must call .Close() on the returned SigningMechanism. -func newEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) { - m := &openpgpSigningMechanism{ - keyring: openpgp.EntityList{}, - } - keyIdentities, err := m.importKeysFromBytes(blob) - if err != nil { - return nil, nil, err - } - return m, keyIdentities, nil -} - -func (m *openpgpSigningMechanism) Close() error { - return nil -} - -// importKeysFromBytes imports public keys from the supplied blob and returns their identities. -// The blob is assumed to have an appropriate format (the caller is expected to know which one). -func (m *openpgpSigningMechanism) importKeysFromBytes(blob []byte) ([]string, error) { - keyring, err := openpgp.ReadKeyRing(bytes.NewReader(blob)) - if err != nil { - k, e2 := openpgp.ReadArmoredKeyRing(bytes.NewReader(blob)) - if e2 != nil { - return nil, err // The original error -- FIXME: is this better? - } - keyring = k - } - - keyIdentities := []string{} - for _, entity := range keyring { - if entity.PrimaryKey == nil { - // Coverage: This should never happen, openpgp.ReadEntity fails with a - // openpgp.errors.StructuralError instead of returning an entity with this - // field set to nil. - continue - } - // Uppercase the fingerprint to be compatible with gpgme - keyIdentities = append(keyIdentities, strings.ToUpper(fmt.Sprintf("%x", entity.PrimaryKey.Fingerprint))) - m.keyring = append(m.keyring, entity) - } - return keyIdentities, nil -} - -// SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError. -func (m *openpgpSigningMechanism) SupportsSigning() error { - return SigningNotSupportedError("signing is not supported in github.com/containers/image built with the containers_image_openpgp build tag") -} - -// Sign creates a (non-detached) signature of input using keyIdentity. -// Fails with a SigningNotSupportedError if the mechanism does not support signing. -func (m *openpgpSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) { - return nil, SigningNotSupportedError("signing is not supported in github.com/containers/image built with the containers_image_openpgp build tag") -} - -// Verify parses unverifiedSignature and returns the content and the signer's identity -func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) { - md, err := openpgp.ReadMessage(bytes.NewReader(unverifiedSignature), m.keyring, nil, nil) - if err != nil { - return nil, "", err - } - if !md.IsSigned { - return nil, "", errors.New("not signed") - } - content, err := ioutil.ReadAll(md.UnverifiedBody) - if err != nil { - // Coverage: md.UnverifiedBody.Read only fails if the body is encrypted - // (and possibly also signed, but it _must_ be encrypted) and the signing - // “modification detection code” detects a mismatch. But in that case, - // we would expect the signature verification to fail as well, and that is checked - // first. Besides, we are not supplying any decryption keys, so we really - // can never reach this “encrypted data MDC mismatch” path. - return nil, "", err - } - if md.SignatureError != nil { - return nil, "", fmt.Errorf("signature error: %v", md.SignatureError) - } - if md.SignedBy == nil { - return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Invalid GPG signature: %#v", md.Signature)} - } - if md.Signature != nil { - if md.Signature.SigLifetimeSecs != nil { - expiry := md.Signature.CreationTime.Add(time.Duration(*md.Signature.SigLifetimeSecs) * time.Second) - if time.Now().After(expiry) { - return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Signature expired on %s", expiry)} - } - } - } else if md.SignatureV3 == nil { - // Coverage: If md.SignedBy != nil, the final md.UnverifiedBody.Read() either sets one of md.Signature or md.SignatureV3, - // or sets md.SignatureError. - return nil, "", InvalidSignatureError{msg: "Unexpected openpgp.MessageDetails: neither Signature nor SignatureV3 is set"} - } - - // Uppercase the fingerprint to be compatible with gpgme - return content, strings.ToUpper(fmt.Sprintf("%x", md.SignedBy.PublicKey.Fingerprint)), nil -} - -// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, -// along with a short identifier of the key used for signing. -// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys) -// is NOT the same as a "key identity" used in other calls ot this interface, and -// the values may have no recognizable relationship if the public key is not available. -func (m openpgpSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) { - return gpgUntrustedSignatureContents(untrustedSignature) -} diff --git a/vendor/github.com/containers/image/signature/mechanism_openpgp_test.go b/vendor/github.com/containers/image/signature/mechanism_openpgp_test.go deleted file mode 100644 index cfaf937b25d8..000000000000 --- a/vendor/github.com/containers/image/signature/mechanism_openpgp_test.go +++ /dev/null @@ -1,28 +0,0 @@ -// +build containers_image_openpgp - -package signature - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestOpenpgpSigningMechanismSupportsSigning(t *testing.T) { - mech, _, err := NewEphemeralGPGSigningMechanism([]byte{}) - require.NoError(t, err) - defer mech.Close() - err = mech.SupportsSigning() - assert.Error(t, err) - assert.IsType(t, SigningNotSupportedError(""), err) -} - -func TestOpenpgpSigningMechanismSign(t *testing.T) { - mech, _, err := NewEphemeralGPGSigningMechanism([]byte{}) - require.NoError(t, err) - defer mech.Close() - _, err = mech.Sign([]byte{}, TestKeyFingerprint) - assert.Error(t, err) - assert.IsType(t, SigningNotSupportedError(""), err) -} diff --git a/vendor/github.com/containers/image/signature/mechanism_test.go b/vendor/github.com/containers/image/signature/mechanism_test.go deleted file mode 100644 index a6587c61c4fb..000000000000 --- a/vendor/github.com/containers/image/signature/mechanism_test.go +++ /dev/null @@ -1,327 +0,0 @@ -package signature - -// These tests are expected to pass unmodified for _both_ mechanism_gpgme.go and mechanism_openpgp.go. - -import ( - "bytes" - "io/ioutil" - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -const ( - testGPGHomeDirectory = "./fixtures" -) - -// Many of the tests use two fixtures: V4 signature packets (*.signature), and V3 signature packets (*.signature-v3) - -// fixtureVariants loads V3 and V4 signature fixture variants based on the v4 fixture path, and returns a map which makes it easy to test both. -func fixtureVariants(t *testing.T, v4Path string) map[string][]byte { - v4, err := ioutil.ReadFile(v4Path) - require.NoError(t, err) - v3Path := v4Path + "-v3" - v3, err := ioutil.ReadFile(v3Path) - require.NoError(t, err) - return map[string][]byte{v4Path: v4, v3Path: v3} -} - -func TestSigningNotSupportedError(t *testing.T) { - // A stupid test just to keep code coverage - s := "test" - err := SigningNotSupportedError(s) - assert.Equal(t, s, err.Error()) -} - -func TestNewGPGSigningMechanism(t *testing.T) { - // A dumb test just for code coverage. We test more with newGPGSigningMechanismInDirectory(). - mech, err := NewGPGSigningMechanism() - assert.NoError(t, err) - mech.Close() -} - -func TestNewGPGSigningMechanismInDirectory(t *testing.T) { - // A dumb test just for code coverage. - mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory) - assert.NoError(t, err) - mech.Close() - // The various GPG failure cases are not obviously easy to reach. - - // Test that using the default directory (presumably in user’s home) - // cannot use TestKeyFingerprint. - signatures := fixtureVariants(t, "./fixtures/invalid-blob.signature") - mech, err = newGPGSigningMechanismInDirectory("") - require.NoError(t, err) - defer mech.Close() - for version, signature := range signatures { - _, _, err := mech.Verify(signature) - assert.Error(t, err, version) - } - - // Similarly, using a newly created empty directory makes TestKeyFingerprint - // unavailable - emptyDir, err := ioutil.TempDir("", "signing-empty-directory") - require.NoError(t, err) - defer os.RemoveAll(emptyDir) - mech, err = newGPGSigningMechanismInDirectory(emptyDir) - require.NoError(t, err) - defer mech.Close() - for version, signature := range signatures { - _, _, err := mech.Verify(signature) - assert.Error(t, err, version) - } - - // If pubring.gpg is unreadable in the directory, either initializing - // the mechanism fails (with openpgp), or it succeeds (sadly, gpgme) and - // later verification fails. - unreadableDir, err := ioutil.TempDir("", "signing-unreadable-directory") - require.NoError(t, err) - defer os.RemoveAll(unreadableDir) - f, err := os.OpenFile(filepath.Join(unreadableDir, "pubring.gpg"), os.O_RDONLY|os.O_CREATE, 0000) - require.NoError(t, err) - f.Close() - mech, err = newGPGSigningMechanismInDirectory(unreadableDir) - if err == nil { - defer mech.Close() - for version, signature := range signatures { - _, _, err := mech.Verify(signature) - assert.Error(t, err, version) - } - } - - // Setting the directory parameter to testGPGHomeDirectory makes the key available. - mech, err = newGPGSigningMechanismInDirectory(testGPGHomeDirectory) - require.NoError(t, err) - defer mech.Close() - for version, signature := range signatures { - _, _, err := mech.Verify(signature) - assert.NoError(t, err, version) - } - - // If we use the default directory mechanism, GNUPGHOME is respected. - origGNUPGHOME := os.Getenv("GNUPGHOME") - defer os.Setenv("GNUPGHOME", origGNUPGHOME) - os.Setenv("GNUPGHOME", testGPGHomeDirectory) - mech, err = newGPGSigningMechanismInDirectory("") - require.NoError(t, err) - defer mech.Close() - for version, signature := range signatures { - _, _, err := mech.Verify(signature) - assert.NoError(t, err, version) - } -} - -func TestNewEphemeralGPGSigningMechanism(t *testing.T) { - // Empty input: This is accepted anyway by GPG, just returns no keys. - mech, keyIdentities, err := NewEphemeralGPGSigningMechanism([]byte{}) - require.NoError(t, err) - defer mech.Close() - assert.Empty(t, keyIdentities) - // Try validating a signature when the key is unknown. - signatures := fixtureVariants(t, "./fixtures/invalid-blob.signature") - for version, signature := range signatures { - _, _, err := mech.Verify(signature) - require.Error(t, err, version) - } - - // Successful import - keyBlob, err := ioutil.ReadFile("./fixtures/public-key.gpg") - require.NoError(t, err) - mech, keyIdentities, err = NewEphemeralGPGSigningMechanism(keyBlob) - require.NoError(t, err) - defer mech.Close() - assert.Equal(t, []string{TestKeyFingerprint}, keyIdentities) - // After import, the signature should validate. - for version, signature := range signatures { - content, signingFingerprint, err := mech.Verify(signature) - require.NoError(t, err, version) - assert.Equal(t, []byte("This is not JSON\n"), content, version) - assert.Equal(t, TestKeyFingerprint, signingFingerprint, version) - } - - // Two keys: Read the binary-format pubring.gpg, and concatenate it twice. - // (Using two copies of public-key.gpg, in the ASCII-armored format, works with - // gpgmeSigningMechanism but not openpgpSigningMechanism.) - keyBlob, err = ioutil.ReadFile("./fixtures/pubring.gpg") - require.NoError(t, err) - mech, keyIdentities, err = NewEphemeralGPGSigningMechanism(bytes.Join([][]byte{keyBlob, keyBlob}, nil)) - require.NoError(t, err) - defer mech.Close() - assert.Equal(t, []string{TestKeyFingerprint, TestKeyFingerprint}, keyIdentities) - - // Invalid input: This is, sadly, accepted anyway by GPG, just returns no keys. - // For openpgpSigningMechanism we can detect this and fail. - mech, keyIdentities, err = NewEphemeralGPGSigningMechanism([]byte("This is invalid")) - assert.True(t, err != nil || len(keyIdentities) == 0) - if err == nil { - mech.Close() - } - assert.Empty(t, keyIdentities) - // The various GPG/GPGME failures cases are not obviously easy to reach. -} - -func TestGPGSigningMechanismClose(t *testing.T) { - // Closing a non-ephemeral mechanism does not remove anything in the directory. - mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory) - require.NoError(t, err) - err = mech.Close() - assert.NoError(t, err) - _, err = os.Lstat(testGPGHomeDirectory) - assert.NoError(t, err) - _, err = os.Lstat(filepath.Join(testGPGHomeDirectory, "pubring.gpg")) - assert.NoError(t, err) -} - -func TestGPGSigningMechanismSign(t *testing.T) { - mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory) - require.NoError(t, err) - defer mech.Close() - - if err := mech.SupportsSigning(); err != nil { - t.Skipf("Signing not supported: %v", err) - } - - // Successful signing - content := []byte("content") - signature, err := mech.Sign(content, TestKeyFingerprint) - require.NoError(t, err) - - signedContent, signingFingerprint, err := mech.Verify(signature) - require.NoError(t, err) - assert.EqualValues(t, content, signedContent) - assert.Equal(t, TestKeyFingerprint, signingFingerprint) - - // Error signing - _, err = mech.Sign(content, "this fingerprint doesn't exist") - assert.Error(t, err) - // The various GPG/GPGME failures cases are not obviously easy to reach. -} - -func assertSigningError(t *testing.T, content []byte, fingerprint string, err error, msgAndArgs ...interface{}) { - assert.Error(t, err, msgAndArgs...) - assert.Nil(t, content, msgAndArgs...) - assert.Empty(t, fingerprint, msgAndArgs...) -} - -func TestGPGSigningMechanismVerify(t *testing.T) { - mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory) - require.NoError(t, err) - defer mech.Close() - - // Successful verification - signatures := fixtureVariants(t, "./fixtures/invalid-blob.signature") - for variant, signature := range signatures { - content, signingFingerprint, err := mech.Verify(signature) - require.NoError(t, err, variant) - assert.Equal(t, []byte("This is not JSON\n"), content, variant) - assert.Equal(t, TestKeyFingerprint, signingFingerprint, variant) - } - - // For extra paranoia, test that we return nil data on error. - - // Completely invalid signature. - content, signingFingerprint, err := mech.Verify([]byte{}) - assertSigningError(t, content, signingFingerprint, err) - - content, signingFingerprint, err = mech.Verify([]byte("invalid signature")) - assertSigningError(t, content, signingFingerprint, err) - - // Literal packet, not a signature - signature, err := ioutil.ReadFile("./fixtures/unsigned-literal.signature") // Not fixtureVariants, the “literal data” packet does not have V3/V4 versions. - require.NoError(t, err) - content, signingFingerprint, err = mech.Verify(signature) - assertSigningError(t, content, signingFingerprint, err) - - // Encrypted data, not a signature. - signature, err = ioutil.ReadFile("./fixtures/unsigned-encrypted.signature") // Not fixtureVariants, the “public-key encrypted session key” does not have V3/V4 versions. - require.NoError(t, err) - content, signingFingerprint, err = mech.Verify(signature) - assertSigningError(t, content, signingFingerprint, err) - - // FIXME? Is there a way to create a multi-signature so that gpgme_op_verify returns multiple signatures? - - // Expired signature - signature, err = ioutil.ReadFile("./fixtures/expired.signature") // Not fixtureVariants, V3 signature packets don’t support expiration. - require.NoError(t, err) - content, signingFingerprint, err = mech.Verify(signature) - assertSigningError(t, content, signingFingerprint, err) - - // Corrupt signature - signatures = fixtureVariants(t, "./fixtures/corrupt.signature") - for version, signature := range signatures { - content, signingFingerprint, err := mech.Verify(signature) - assertSigningError(t, content, signingFingerprint, err, version) - } - - // Valid signature with an unknown key - signatures = fixtureVariants(t, "./fixtures/unknown-key.signature") - for version, signature := range signatures { - content, signingFingerprint, err := mech.Verify(signature) - assertSigningError(t, content, signingFingerprint, err, version) - } - - // The various GPG/GPGME failures cases are not obviously easy to reach. -} - -func TestGPGSigningMechanismUntrustedSignatureContents(t *testing.T) { - mech, _, err := NewEphemeralGPGSigningMechanism([]byte{}) - require.NoError(t, err) - defer mech.Close() - - // A valid signature - signatures := fixtureVariants(t, "./fixtures/invalid-blob.signature") - for version, signature := range signatures { - content, shortKeyID, err := mech.UntrustedSignatureContents(signature) - require.NoError(t, err, version) - assert.Equal(t, []byte("This is not JSON\n"), content, version) - assert.Equal(t, TestKeyShortID, shortKeyID, version) - } - - // Completely invalid signature. - _, _, err = mech.UntrustedSignatureContents([]byte{}) - assert.Error(t, err) - - _, _, err = mech.UntrustedSignatureContents([]byte("invalid signature")) - assert.Error(t, err) - - // Literal packet, not a signature - signature, err := ioutil.ReadFile("./fixtures/unsigned-literal.signature") // Not fixtureVariants, the “literal data” packet does not have V3/V4 versions. - require.NoError(t, err) - content, shortKeyID, err := mech.UntrustedSignatureContents(signature) - assert.Error(t, err) - - // Encrypted data, not a signature. - signature, err = ioutil.ReadFile("./fixtures/unsigned-encrypted.signature") // Not fixtureVariants, the “public-key encrypted session key” does not have V3/V4 versions. - require.NoError(t, err) - content, shortKeyID, err = mech.UntrustedSignatureContents(signature) - assert.Error(t, err) - - // Expired signature - signature, err = ioutil.ReadFile("./fixtures/expired.signature") // Not fixtureVariants, V3 signature packets don’t support expiration. - require.NoError(t, err) - content, shortKeyID, err = mech.UntrustedSignatureContents(signature) - require.NoError(t, err) - assert.Equal(t, []byte("This signature is expired.\n"), content) - assert.Equal(t, TestKeyShortID, shortKeyID) - - // Corrupt signature - signatures = fixtureVariants(t, "./fixtures/corrupt.signature") - for version, signature := range signatures { - content, shortKeyID, err := mech.UntrustedSignatureContents(signature) - require.NoError(t, err, version) - assert.Equal(t, []byte(`{"critical":{"identity":{"docker-reference":"testing/manifest"},"image":{"docker-manifest-digest":"sha256:20bf21ed457b390829cdbeec8795a7bea1626991fda603e0d01b4e7f60427e55"},"type":"atomic container signature"},"optional":{"creator":"atomic ","timestamp":1458239713}}`), content, version) - assert.Equal(t, TestKeyShortID, shortKeyID, version) - } - - // Valid signature with an unknown key - signatures = fixtureVariants(t, "./fixtures/unknown-key.signature") - for version, signature := range signatures { - content, shortKeyID, err := mech.UntrustedSignatureContents(signature) - require.NoError(t, err, version) - assert.Equal(t, []byte(`{"critical":{"identity":{"docker-reference":"testing/manifest"},"image":{"docker-manifest-digest":"sha256:20bf21ed457b390829cdbeec8795a7bea1626991fda603e0d01b4e7f60427e55"},"type":"atomic container signature"},"optional":{"creator":"atomic 0.1.13-dev","timestamp":1464633474}}`), content, version) - assert.Equal(t, "BB75E91990DF8F7E", shortKeyID, version) - } -} diff --git a/vendor/github.com/containers/image/signature/policy_config.go b/vendor/github.com/containers/image/signature/policy_config.go deleted file mode 100644 index bc6c5e9a7de2..000000000000 --- a/vendor/github.com/containers/image/signature/policy_config.go +++ /dev/null @@ -1,684 +0,0 @@ -// policy_config.go hanles creation of policy objects, either by parsing JSON -// or by programs building them programmatically. - -// The New* constructors are intended to be a stable API. FIXME: after an independent review. - -// Do not invoke the internals of the JSON marshaling/unmarshaling directly. - -// We can't just blindly call json.Unmarshal because that would silently ignore -// typos, and that would just not do for security policy. - -// FIXME? This is by no means an user-friendly parser: No location information in error messages, no other context. -// But at least it is not worse than blind json.Unmarshal()… - -package signature - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "path/filepath" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/transports" - "github.com/containers/image/types" - "github.com/pkg/errors" -) - -// systemDefaultPolicyPath is the policy path used for DefaultPolicy(). -// You can override this at build time with -// -ldflags '-X github.com/containers/image/signature.systemDefaultPolicyPath=$your_path' -var systemDefaultPolicyPath = builtinDefaultPolicyPath - -// builtinDefaultPolicyPath is the policy pat used for DefaultPolicy(). -// DO NOT change this, instead see systemDefaultPolicyPath above. -const builtinDefaultPolicyPath = "/etc/containers/policy.json" - -// InvalidPolicyFormatError is returned when parsing an invalid policy configuration. -type InvalidPolicyFormatError string - -func (err InvalidPolicyFormatError) Error() string { - return string(err) -} - -// DefaultPolicy returns the default policy of the system. -// Most applications should be using this method to get the policy configured -// by the system administrator. -// ctx should usually be nil, can be set to override the default. -// NOTE: When this function returns an error, report it to the user and abort. -// DO NOT hard-code fallback policies in your application. -func DefaultPolicy(ctx *types.SystemContext) (*Policy, error) { - return NewPolicyFromFile(defaultPolicyPath(ctx)) -} - -// defaultPolicyPath returns a path to the default policy of the system. -func defaultPolicyPath(ctx *types.SystemContext) string { - if ctx != nil { - if ctx.SignaturePolicyPath != "" { - return ctx.SignaturePolicyPath - } - if ctx.RootForImplicitAbsolutePaths != "" { - return filepath.Join(ctx.RootForImplicitAbsolutePaths, systemDefaultPolicyPath) - } - } - return systemDefaultPolicyPath -} - -// NewPolicyFromFile returns a policy configured in the specified file. -func NewPolicyFromFile(fileName string) (*Policy, error) { - contents, err := ioutil.ReadFile(fileName) - if err != nil { - return nil, err - } - return NewPolicyFromBytes(contents) -} - -// NewPolicyFromBytes returns a policy parsed from the specified blob. -// Use this function instead of calling json.Unmarshal directly. -func NewPolicyFromBytes(data []byte) (*Policy, error) { - p := Policy{} - if err := json.Unmarshal(data, &p); err != nil { - return nil, InvalidPolicyFormatError(err.Error()) - } - return &p, nil -} - -// Compile-time check that Policy implements json.Unmarshaler. -var _ json.Unmarshaler = (*Policy)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (p *Policy) UnmarshalJSON(data []byte) error { - *p = Policy{} - transports := policyTransportsMap{} - if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { - switch key { - case "default": - return &p.Default - case "transports": - return &transports - default: - return nil - } - }); err != nil { - return err - } - - if p.Default == nil { - return InvalidPolicyFormatError("Default policy is missing") - } - p.Transports = map[string]PolicyTransportScopes(transports) - return nil -} - -// policyTransportsMap is a specialization of this map type for the strict JSON parsing semantics appropriate for the Policy.Transports member. -type policyTransportsMap map[string]PolicyTransportScopes - -// Compile-time check that policyTransportsMap implements json.Unmarshaler. -var _ json.Unmarshaler = (*policyTransportsMap)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (m *policyTransportsMap) UnmarshalJSON(data []byte) error { - // We can't unmarshal directly into map values because it is not possible to take an address of a map value. - // So, use a temporary map of pointers-to-slices and convert. - tmpMap := map[string]*PolicyTransportScopes{} - if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { - // transport can be nil - transport := transports.Get(key) - // paranoidUnmarshalJSONObject detects key duplication for us, check just to be safe. - if _, ok := tmpMap[key]; ok { - return nil - } - ptsWithTransport := policyTransportScopesWithTransport{ - transport: transport, - dest: &PolicyTransportScopes{}, // This allocates a new instance on each call. - } - tmpMap[key] = ptsWithTransport.dest - return &ptsWithTransport - }); err != nil { - return err - } - for key, ptr := range tmpMap { - (*m)[key] = *ptr - } - return nil -} - -// Compile-time check that PolicyTransportScopes "implements"" json.Unmarshaler. -// we want to only use policyTransportScopesWithTransport -var _ json.Unmarshaler = (*PolicyTransportScopes)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (m *PolicyTransportScopes) UnmarshalJSON(data []byte) error { - return errors.New("Do not try to unmarshal PolicyTransportScopes directly") -} - -// policyTransportScopesWithTransport is a way to unmarshal a PolicyTransportScopes -// while validating using a specific ImageTransport if not nil. -type policyTransportScopesWithTransport struct { - transport types.ImageTransport - dest *PolicyTransportScopes -} - -// Compile-time check that policyTransportScopesWithTransport implements json.Unmarshaler. -var _ json.Unmarshaler = (*policyTransportScopesWithTransport)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (m *policyTransportScopesWithTransport) UnmarshalJSON(data []byte) error { - // We can't unmarshal directly into map values because it is not possible to take an address of a map value. - // So, use a temporary map of pointers-to-slices and convert. - tmpMap := map[string]*PolicyRequirements{} - if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { - // paranoidUnmarshalJSONObject detects key duplication for us, check just to be safe. - if _, ok := tmpMap[key]; ok { - return nil - } - if key != "" && m.transport != nil { - if err := m.transport.ValidatePolicyConfigurationScope(key); err != nil { - return nil - } - } - ptr := &PolicyRequirements{} // This allocates a new instance on each call. - tmpMap[key] = ptr - return ptr - }); err != nil { - return err - } - for key, ptr := range tmpMap { - (*m.dest)[key] = *ptr - } - return nil -} - -// Compile-time check that PolicyRequirements implements json.Unmarshaler. -var _ json.Unmarshaler = (*PolicyRequirements)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (m *PolicyRequirements) UnmarshalJSON(data []byte) error { - reqJSONs := []json.RawMessage{} - if err := json.Unmarshal(data, &reqJSONs); err != nil { - return err - } - if len(reqJSONs) == 0 { - return InvalidPolicyFormatError("List of verification policy requirements must not be empty") - } - res := make([]PolicyRequirement, len(reqJSONs)) - for i, reqJSON := range reqJSONs { - req, err := newPolicyRequirementFromJSON(reqJSON) - if err != nil { - return err - } - res[i] = req - } - *m = res - return nil -} - -// newPolicyRequirementFromJSON parses JSON data into a PolicyRequirement implementation. -func newPolicyRequirementFromJSON(data []byte) (PolicyRequirement, error) { - var typeField prCommon - if err := json.Unmarshal(data, &typeField); err != nil { - return nil, err - } - var res PolicyRequirement - switch typeField.Type { - case prTypeInsecureAcceptAnything: - res = &prInsecureAcceptAnything{} - case prTypeReject: - res = &prReject{} - case prTypeSignedBy: - res = &prSignedBy{} - case prTypeSignedBaseLayer: - res = &prSignedBaseLayer{} - default: - return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy requirement type \"%s\"", typeField.Type)) - } - if err := json.Unmarshal(data, &res); err != nil { - return nil, err - } - return res, nil -} - -// newPRInsecureAcceptAnything is NewPRInsecureAcceptAnything, except it returns the private type. -func newPRInsecureAcceptAnything() *prInsecureAcceptAnything { - return &prInsecureAcceptAnything{prCommon{Type: prTypeInsecureAcceptAnything}} -} - -// NewPRInsecureAcceptAnything returns a new "insecureAcceptAnything" PolicyRequirement. -func NewPRInsecureAcceptAnything() PolicyRequirement { - return newPRInsecureAcceptAnything() -} - -// Compile-time check that prInsecureAcceptAnything implements json.Unmarshaler. -var _ json.Unmarshaler = (*prInsecureAcceptAnything)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (pr *prInsecureAcceptAnything) UnmarshalJSON(data []byte) error { - *pr = prInsecureAcceptAnything{} - var tmp prInsecureAcceptAnything - if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ - "type": &tmp.Type, - }); err != nil { - return err - } - - if tmp.Type != prTypeInsecureAcceptAnything { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) - } - *pr = *newPRInsecureAcceptAnything() - return nil -} - -// newPRReject is NewPRReject, except it returns the private type. -func newPRReject() *prReject { - return &prReject{prCommon{Type: prTypeReject}} -} - -// NewPRReject returns a new "reject" PolicyRequirement. -func NewPRReject() PolicyRequirement { - return newPRReject() -} - -// Compile-time check that prReject implements json.Unmarshaler. -var _ json.Unmarshaler = (*prReject)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (pr *prReject) UnmarshalJSON(data []byte) error { - *pr = prReject{} - var tmp prReject - if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ - "type": &tmp.Type, - }); err != nil { - return err - } - - if tmp.Type != prTypeReject { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) - } - *pr = *newPRReject() - return nil -} - -// newPRSignedBy returns a new prSignedBy if parameters are valid. -func newPRSignedBy(keyType sbKeyType, keyPath string, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { - if !keyType.IsValid() { - return nil, InvalidPolicyFormatError(fmt.Sprintf("invalid keyType \"%s\"", keyType)) - } - if len(keyPath) > 0 && len(keyData) > 0 { - return nil, InvalidPolicyFormatError("keyType and keyData cannot be used simultaneously") - } - if signedIdentity == nil { - return nil, InvalidPolicyFormatError("signedIdentity not specified") - } - return &prSignedBy{ - prCommon: prCommon{Type: prTypeSignedBy}, - KeyType: keyType, - KeyPath: keyPath, - KeyData: keyData, - SignedIdentity: signedIdentity, - }, nil -} - -// newPRSignedByKeyPath is NewPRSignedByKeyPath, except it returns the private type. -func newPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { - return newPRSignedBy(keyType, keyPath, nil, signedIdentity) -} - -// NewPRSignedByKeyPath returns a new "signedBy" PolicyRequirement using a KeyPath -func NewPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { - return newPRSignedByKeyPath(keyType, keyPath, signedIdentity) -} - -// newPRSignedByKeyData is NewPRSignedByKeyData, except it returns the private type. -func newPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { - return newPRSignedBy(keyType, "", keyData, signedIdentity) -} - -// NewPRSignedByKeyData returns a new "signedBy" PolicyRequirement using a KeyData -func NewPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { - return newPRSignedByKeyData(keyType, keyData, signedIdentity) -} - -// Compile-time check that prSignedBy implements json.Unmarshaler. -var _ json.Unmarshaler = (*prSignedBy)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (pr *prSignedBy) UnmarshalJSON(data []byte) error { - *pr = prSignedBy{} - var tmp prSignedBy - var gotKeyPath, gotKeyData = false, false - var signedIdentity json.RawMessage - if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { - switch key { - case "type": - return &tmp.Type - case "keyType": - return &tmp.KeyType - case "keyPath": - gotKeyPath = true - return &tmp.KeyPath - case "keyData": - gotKeyData = true - return &tmp.KeyData - case "signedIdentity": - return &signedIdentity - default: - return nil - } - }); err != nil { - return err - } - - if tmp.Type != prTypeSignedBy { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) - } - if signedIdentity == nil { - tmp.SignedIdentity = NewPRMMatchRepoDigestOrExact() - } else { - si, err := newPolicyReferenceMatchFromJSON(signedIdentity) - if err != nil { - return err - } - tmp.SignedIdentity = si - } - - var res *prSignedBy - var err error - switch { - case gotKeyPath && gotKeyData: - return InvalidPolicyFormatError("keyPath and keyData cannot be used simultaneously") - case gotKeyPath && !gotKeyData: - res, err = newPRSignedByKeyPath(tmp.KeyType, tmp.KeyPath, tmp.SignedIdentity) - case !gotKeyPath && gotKeyData: - res, err = newPRSignedByKeyData(tmp.KeyType, tmp.KeyData, tmp.SignedIdentity) - case !gotKeyPath && !gotKeyData: - return InvalidPolicyFormatError("At least one of keyPath and keyData mus be specified") - default: // Coverage: This should never happen - return errors.Errorf("Impossible keyPath/keyData presence combination!?") - } - if err != nil { - return err - } - *pr = *res - - return nil -} - -// IsValid returns true iff kt is a recognized value -func (kt sbKeyType) IsValid() bool { - switch kt { - case SBKeyTypeGPGKeys, SBKeyTypeSignedByGPGKeys, - SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs: - return true - default: - return false - } -} - -// Compile-time check that sbKeyType implements json.Unmarshaler. -var _ json.Unmarshaler = (*sbKeyType)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (kt *sbKeyType) UnmarshalJSON(data []byte) error { - *kt = sbKeyType("") - var s string - if err := json.Unmarshal(data, &s); err != nil { - return err - } - if !sbKeyType(s).IsValid() { - return InvalidPolicyFormatError(fmt.Sprintf("Unrecognized keyType value \"%s\"", s)) - } - *kt = sbKeyType(s) - return nil -} - -// newPRSignedBaseLayer is NewPRSignedBaseLayer, except it returns the private type. -func newPRSignedBaseLayer(baseLayerIdentity PolicyReferenceMatch) (*prSignedBaseLayer, error) { - if baseLayerIdentity == nil { - return nil, InvalidPolicyFormatError("baseLayerIdentity not specified") - } - return &prSignedBaseLayer{ - prCommon: prCommon{Type: prTypeSignedBaseLayer}, - BaseLayerIdentity: baseLayerIdentity, - }, nil -} - -// NewPRSignedBaseLayer returns a new "signedBaseLayer" PolicyRequirement. -func NewPRSignedBaseLayer(baseLayerIdentity PolicyReferenceMatch) (PolicyRequirement, error) { - return newPRSignedBaseLayer(baseLayerIdentity) -} - -// Compile-time check that prSignedBaseLayer implements json.Unmarshaler. -var _ json.Unmarshaler = (*prSignedBaseLayer)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (pr *prSignedBaseLayer) UnmarshalJSON(data []byte) error { - *pr = prSignedBaseLayer{} - var tmp prSignedBaseLayer - var baseLayerIdentity json.RawMessage - if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ - "type": &tmp.Type, - "baseLayerIdentity": &baseLayerIdentity, - }); err != nil { - return err - } - - if tmp.Type != prTypeSignedBaseLayer { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) - } - bli, err := newPolicyReferenceMatchFromJSON(baseLayerIdentity) - if err != nil { - return err - } - res, err := newPRSignedBaseLayer(bli) - if err != nil { - // Coverage: This should never happen, newPolicyReferenceMatchFromJSON has ensured bli is valid. - return err - } - *pr = *res - return nil -} - -// newPolicyReferenceMatchFromJSON parses JSON data into a PolicyReferenceMatch implementation. -func newPolicyReferenceMatchFromJSON(data []byte) (PolicyReferenceMatch, error) { - var typeField prmCommon - if err := json.Unmarshal(data, &typeField); err != nil { - return nil, err - } - var res PolicyReferenceMatch - switch typeField.Type { - case prmTypeMatchExact: - res = &prmMatchExact{} - case prmTypeMatchRepoDigestOrExact: - res = &prmMatchRepoDigestOrExact{} - case prmTypeMatchRepository: - res = &prmMatchRepository{} - case prmTypeExactReference: - res = &prmExactReference{} - case prmTypeExactRepository: - res = &prmExactRepository{} - default: - return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy reference match type \"%s\"", typeField.Type)) - } - if err := json.Unmarshal(data, &res); err != nil { - return nil, err - } - return res, nil -} - -// newPRMMatchExact is NewPRMMatchExact, except it resturns the private type. -func newPRMMatchExact() *prmMatchExact { - return &prmMatchExact{prmCommon{Type: prmTypeMatchExact}} -} - -// NewPRMMatchExact returns a new "matchExact" PolicyReferenceMatch. -func NewPRMMatchExact() PolicyReferenceMatch { - return newPRMMatchExact() -} - -// Compile-time check that prmMatchExact implements json.Unmarshaler. -var _ json.Unmarshaler = (*prmMatchExact)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (prm *prmMatchExact) UnmarshalJSON(data []byte) error { - *prm = prmMatchExact{} - var tmp prmMatchExact - if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ - "type": &tmp.Type, - }); err != nil { - return err - } - - if tmp.Type != prmTypeMatchExact { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) - } - *prm = *newPRMMatchExact() - return nil -} - -// newPRMMatchRepoDigestOrExact is NewPRMMatchRepoDigestOrExact, except it resturns the private type. -func newPRMMatchRepoDigestOrExact() *prmMatchRepoDigestOrExact { - return &prmMatchRepoDigestOrExact{prmCommon{Type: prmTypeMatchRepoDigestOrExact}} -} - -// NewPRMMatchRepoDigestOrExact returns a new "matchRepoDigestOrExact" PolicyReferenceMatch. -func NewPRMMatchRepoDigestOrExact() PolicyReferenceMatch { - return newPRMMatchRepoDigestOrExact() -} - -// Compile-time check that prmMatchRepoDigestOrExact implements json.Unmarshaler. -var _ json.Unmarshaler = (*prmMatchRepoDigestOrExact)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (prm *prmMatchRepoDigestOrExact) UnmarshalJSON(data []byte) error { - *prm = prmMatchRepoDigestOrExact{} - var tmp prmMatchRepoDigestOrExact - if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ - "type": &tmp.Type, - }); err != nil { - return err - } - - if tmp.Type != prmTypeMatchRepoDigestOrExact { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) - } - *prm = *newPRMMatchRepoDigestOrExact() - return nil -} - -// newPRMMatchRepository is NewPRMMatchRepository, except it resturns the private type. -func newPRMMatchRepository() *prmMatchRepository { - return &prmMatchRepository{prmCommon{Type: prmTypeMatchRepository}} -} - -// NewPRMMatchRepository returns a new "matchRepository" PolicyReferenceMatch. -func NewPRMMatchRepository() PolicyReferenceMatch { - return newPRMMatchRepository() -} - -// Compile-time check that prmMatchRepository implements json.Unmarshaler. -var _ json.Unmarshaler = (*prmMatchRepository)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (prm *prmMatchRepository) UnmarshalJSON(data []byte) error { - *prm = prmMatchRepository{} - var tmp prmMatchRepository - if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ - "type": &tmp.Type, - }); err != nil { - return err - } - - if tmp.Type != prmTypeMatchRepository { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) - } - *prm = *newPRMMatchRepository() - return nil -} - -// newPRMExactReference is NewPRMExactReference, except it resturns the private type. -func newPRMExactReference(dockerReference string) (*prmExactReference, error) { - ref, err := reference.ParseNormalizedNamed(dockerReference) - if err != nil { - return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerReference %s: %s", dockerReference, err.Error())) - } - if reference.IsNameOnly(ref) { - return nil, InvalidPolicyFormatError(fmt.Sprintf("dockerReference %s contains neither a tag nor digest", dockerReference)) - } - return &prmExactReference{ - prmCommon: prmCommon{Type: prmTypeExactReference}, - DockerReference: dockerReference, - }, nil -} - -// NewPRMExactReference returns a new "exactReference" PolicyReferenceMatch. -func NewPRMExactReference(dockerReference string) (PolicyReferenceMatch, error) { - return newPRMExactReference(dockerReference) -} - -// Compile-time check that prmExactReference implements json.Unmarshaler. -var _ json.Unmarshaler = (*prmExactReference)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (prm *prmExactReference) UnmarshalJSON(data []byte) error { - *prm = prmExactReference{} - var tmp prmExactReference - if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ - "type": &tmp.Type, - "dockerReference": &tmp.DockerReference, - }); err != nil { - return err - } - - if tmp.Type != prmTypeExactReference { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) - } - - res, err := newPRMExactReference(tmp.DockerReference) - if err != nil { - return err - } - *prm = *res - return nil -} - -// newPRMExactRepository is NewPRMExactRepository, except it resturns the private type. -func newPRMExactRepository(dockerRepository string) (*prmExactRepository, error) { - if _, err := reference.ParseNormalizedNamed(dockerRepository); err != nil { - return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerRepository %s: %s", dockerRepository, err.Error())) - } - return &prmExactRepository{ - prmCommon: prmCommon{Type: prmTypeExactRepository}, - DockerRepository: dockerRepository, - }, nil -} - -// NewPRMExactRepository returns a new "exactRepository" PolicyRepositoryMatch. -func NewPRMExactRepository(dockerRepository string) (PolicyReferenceMatch, error) { - return newPRMExactRepository(dockerRepository) -} - -// Compile-time check that prmExactRepository implements json.Unmarshaler. -var _ json.Unmarshaler = (*prmExactRepository)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (prm *prmExactRepository) UnmarshalJSON(data []byte) error { - *prm = prmExactRepository{} - var tmp prmExactRepository - if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ - "type": &tmp.Type, - "dockerRepository": &tmp.DockerRepository, - }); err != nil { - return err - } - - if tmp.Type != prmTypeExactRepository { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) - } - - res, err := newPRMExactRepository(tmp.DockerRepository) - if err != nil { - return err - } - *prm = *res - return nil -} diff --git a/vendor/github.com/containers/image/signature/policy_config_test.go b/vendor/github.com/containers/image/signature/policy_config_test.go deleted file mode 100644 index c1d930ba228a..000000000000 --- a/vendor/github.com/containers/image/signature/policy_config_test.go +++ /dev/null @@ -1,1370 +0,0 @@ -package signature - -import ( - "bytes" - "encoding/json" - "io/ioutil" - "path/filepath" - "testing" - - "github.com/containers/image/directory" - "github.com/containers/image/docker" - // this import is needed where we use the "atomic" transport in TestPolicyUnmarshalJSON - _ "github.com/containers/image/openshift" - "github.com/containers/image/types" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// policyFixtureContents is a data structure equal to the contents of "fixtures/policy.json" -var policyFixtureContents = &Policy{ - Default: PolicyRequirements{NewPRReject()}, - Transports: map[string]PolicyTransportScopes{ - "dir": { - "": PolicyRequirements{NewPRInsecureAcceptAnything()}, - }, - "docker": { - "example.com/playground": { - NewPRInsecureAcceptAnything(), - }, - "example.com/production": { - xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, - "/keys/employee-gpg-keyring", - NewPRMMatchRepoDigestOrExact()), - }, - "example.com/hardened": { - xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, - "/keys/employee-gpg-keyring", - NewPRMMatchRepository()), - xNewPRSignedByKeyPath(SBKeyTypeSignedByGPGKeys, - "/keys/public-key-signing-gpg-keyring", - NewPRMMatchExact()), - xNewPRSignedBaseLayer(xNewPRMExactRepository("registry.access.redhat.com/rhel7/rhel")), - }, - "example.com/hardened-x509": { - xNewPRSignedByKeyPath(SBKeyTypeX509Certificates, - "/keys/employee-cert-file", - NewPRMMatchRepository()), - xNewPRSignedByKeyPath(SBKeyTypeSignedByX509CAs, - "/keys/public-key-signing-ca-file", - NewPRMMatchRepoDigestOrExact()), - }, - "registry.access.redhat.com": { - xNewPRSignedByKeyPath(SBKeyTypeSignedByGPGKeys, - "/keys/RH-key-signing-key-gpg-keyring", - NewPRMMatchRepoDigestOrExact()), - }, - "bogus/key-data-example": { - xNewPRSignedByKeyData(SBKeyTypeSignedByGPGKeys, - []byte("nonsense"), - NewPRMMatchRepoDigestOrExact()), - }, - "bogus/signed-identity-example": { - xNewPRSignedBaseLayer(xNewPRMExactReference("registry.access.redhat.com/rhel7/rhel:latest")), - }, - }, - }, -} - -func TestDefaultPolicy(t *testing.T) { - // We can't test the actual systemDefaultPolicyPath, so override. - // TestDefaultPolicyPath below tests that we handle the overrides and defaults - // correctly. - - // Success - policy, err := DefaultPolicy(&types.SystemContext{SignaturePolicyPath: "./fixtures/policy.json"}) - require.NoError(t, err) - assert.Equal(t, policyFixtureContents, policy) - - for _, path := range []string{ - "/this/doesnt/exist", // Error reading file - "/dev/null", // A failure case; most are tested in the individual method unit tests. - } { - policy, err := DefaultPolicy(&types.SystemContext{SignaturePolicyPath: path}) - assert.Error(t, err) - assert.Nil(t, policy) - } -} - -func TestDefaultPolicyPath(t *testing.T) { - - const nondefaultPath = "/this/is/not/the/default/path.json" - const variableReference = "$HOME" - const rootPrefix = "/root/prefix" - - for _, c := range []struct { - ctx *types.SystemContext - expected string - }{ - // The common case - {nil, systemDefaultPolicyPath}, - // There is a context, but it does not override the path. - {&types.SystemContext{}, systemDefaultPolicyPath}, - // Path overridden - {&types.SystemContext{SignaturePolicyPath: nondefaultPath}, nondefaultPath}, - // Root overridden - { - &types.SystemContext{RootForImplicitAbsolutePaths: rootPrefix}, - filepath.Join(rootPrefix, systemDefaultPolicyPath), - }, - // Root and path overrides present simultaneously, - { - &types.SystemContext{ - RootForImplicitAbsolutePaths: rootPrefix, - SignaturePolicyPath: nondefaultPath, - }, - nondefaultPath, - }, - // No environment expansion happens in the overridden paths - {&types.SystemContext{SignaturePolicyPath: variableReference}, variableReference}, - } { - path := defaultPolicyPath(c.ctx) - assert.Equal(t, c.expected, path) - } -} - -func TestNewPolicyFromFile(t *testing.T) { - // Success - policy, err := NewPolicyFromFile("./fixtures/policy.json") - require.NoError(t, err) - assert.Equal(t, policyFixtureContents, policy) - - // Error reading file - _, err = NewPolicyFromFile("/this/doesnt/exist") - assert.Error(t, err) - - // A failure case; most are tested in the individual method unit tests. - _, err = NewPolicyFromFile("/dev/null") - require.Error(t, err) - assert.IsType(t, InvalidPolicyFormatError(""), err) -} - -func TestNewPolicyFromBytes(t *testing.T) { - // Success - bytes, err := ioutil.ReadFile("./fixtures/policy.json") - require.NoError(t, err) - policy, err := NewPolicyFromBytes(bytes) - require.NoError(t, err) - assert.Equal(t, policyFixtureContents, policy) - - // A failure case; most are tested in the individual method unit tests. - _, err = NewPolicyFromBytes([]byte("")) - require.Error(t, err) - assert.IsType(t, InvalidPolicyFormatError(""), err) -} - -// FIXME? There is quite a bit of duplication below. Factor some of it out? - -// testInvalidJSONInput verifies that obviously invalid input is rejected for dest. -func testInvalidJSONInput(t *testing.T, dest json.Unmarshaler) { - // Invalid input. Note that json.Unmarshal is guaranteed to validate input before calling our - // UnmarshalJSON implementation; so test that first, then test our error handling for completeness. - err := json.Unmarshal([]byte("&"), dest) - assert.Error(t, err) - err = dest.UnmarshalJSON([]byte("&")) - assert.Error(t, err) - - // Not an object/array/string - err = json.Unmarshal([]byte("1"), dest) - assert.Error(t, err) -} - -// addExtraJSONMember adds adds an additional member "$name": $extra, -// possibly with a duplicate name, to encoded. -// Errors, if any, are reported through t. -func addExtraJSONMember(t *testing.T, encoded []byte, name string, extra interface{}) []byte { - extraJSON, err := json.Marshal(extra) - require.NoError(t, err) - - require.True(t, bytes.HasSuffix(encoded, []byte("}"))) - preservedLen := len(encoded) - 1 - - return bytes.Join([][]byte{encoded[:preservedLen], []byte(`,"`), []byte(name), []byte(`":`), extraJSON, []byte("}")}, nil) -} - -func TestInvalidPolicyFormatError(t *testing.T) { - // A stupid test just to keep code coverage - s := "test" - err := InvalidPolicyFormatError(s) - assert.Equal(t, s, err.Error()) -} - -// Return the result of modifying validJSON with fn and unmarshaling it into *p -func tryUnmarshalModifiedPolicy(t *testing.T, p *Policy, validJSON []byte, modifyFn func(mSI)) error { - var tmp mSI - err := json.Unmarshal(validJSON, &tmp) - require.NoError(t, err) - - modifyFn(tmp) - - testJSON, err := json.Marshal(tmp) - require.NoError(t, err) - - *p = Policy{} - return json.Unmarshal(testJSON, p) -} - -// xNewPRSignedByKeyPath is like NewPRSignedByKeyPath, except it must not fail. -func xNewPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity PolicyReferenceMatch) PolicyRequirement { - pr, err := NewPRSignedByKeyPath(keyType, keyPath, signedIdentity) - if err != nil { - panic("xNewPRSignedByKeyPath failed") - } - return pr -} - -// xNewPRSignedByKeyData is like NewPRSignedByKeyData, except it must not fail. -func xNewPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) PolicyRequirement { - pr, err := NewPRSignedByKeyData(keyType, keyData, signedIdentity) - if err != nil { - panic("xNewPRSignedByKeyData failed") - } - return pr -} - -func TestPolicyUnmarshalJSON(t *testing.T) { - var p Policy - - testInvalidJSONInput(t, &p) - - // Start with a valid JSON. - validPolicy := Policy{ - Default: []PolicyRequirement{ - xNewPRSignedByKeyData(SBKeyTypeGPGKeys, []byte("abc"), NewPRMMatchRepoDigestOrExact()), - }, - Transports: map[string]PolicyTransportScopes{ - "docker": { - "docker.io/library/busybox": []PolicyRequirement{ - xNewPRSignedByKeyData(SBKeyTypeGPGKeys, []byte("def"), NewPRMMatchRepoDigestOrExact()), - }, - "registry.access.redhat.com": []PolicyRequirement{ - xNewPRSignedByKeyData(SBKeyTypeSignedByGPGKeys, []byte("RH"), NewPRMMatchRepository()), - }, - }, - "atomic": { - "registry.access.redhat.com/rhel7": []PolicyRequirement{ - xNewPRSignedByKeyData(SBKeyTypeSignedByGPGKeys, []byte("RHatomic"), NewPRMMatchRepository()), - }, - }, - "unknown": { - "registry.access.redhat.com/rhel7": []PolicyRequirement{ - xNewPRSignedByKeyData(SBKeyTypeSignedByGPGKeys, []byte("RHatomic"), NewPRMMatchRepository()), - }, - }, - }, - } - validJSON, err := json.Marshal(validPolicy) - require.NoError(t, err) - - // Success - p = Policy{} - err = json.Unmarshal(validJSON, &p) - require.NoError(t, err) - assert.Equal(t, validPolicy, p) - - // Various ways to corrupt the JSON - breakFns := []func(mSI){ - // The "default" field is missing - func(v mSI) { delete(v, "default") }, - // Extra top-level sub-object - func(v mSI) { v["unexpected"] = 1 }, - // "default" not an array - func(v mSI) { v["default"] = 1 }, - func(v mSI) { v["default"] = mSI{} }, - // "transports" not an object - func(v mSI) { v["transports"] = 1 }, - func(v mSI) { v["transports"] = []string{} }, - // "default" is an invalid PolicyRequirements - func(v mSI) { v["default"] = PolicyRequirements{} }, - } - for _, fn := range breakFns { - err = tryUnmarshalModifiedPolicy(t, &p, validJSON, fn) - assert.Error(t, err) - } - - // Duplicated fields - for _, field := range []string{"default", "transports"} { - var tmp mSI - err := json.Unmarshal(validJSON, &tmp) - require.NoError(t, err) - - testJSON := addExtraJSONMember(t, validJSON, field, tmp[field]) - - p = Policy{} - err = json.Unmarshal(testJSON, &p) - assert.Error(t, err) - } - - // Various allowed modifications to the policy - allowedModificationFns := []func(mSI){ - // Delete the map of transport-specific scopes - func(v mSI) { delete(v, "transports") }, - // Use an empty map of transport-specific scopes - func(v mSI) { v["transports"] = map[string]PolicyTransportScopes{} }, - } - for _, fn := range allowedModificationFns { - err = tryUnmarshalModifiedPolicy(t, &p, validJSON, fn) - require.NoError(t, err) - } -} - -func TestPolicyTransportScopesUnmarshalJSON(t *testing.T) { - var pts PolicyTransportScopes - - // Start with a valid JSON. - validPTS := PolicyTransportScopes{ - "": []PolicyRequirement{ - xNewPRSignedByKeyData(SBKeyTypeGPGKeys, []byte("global"), NewPRMMatchRepoDigestOrExact()), - }, - } - validJSON, err := json.Marshal(validPTS) - require.NoError(t, err) - - // Nothing can be unmarshaled directly into PolicyTransportScopes - pts = PolicyTransportScopes{} - err = json.Unmarshal(validJSON, &pts) - assert.Error(t, err) -} - -// Return the result of modifying validJSON with fn and unmarshaling it into *pts -// using transport. -func tryUnmarshalModifiedPTS(t *testing.T, pts *PolicyTransportScopes, transport types.ImageTransport, - validJSON []byte, modifyFn func(mSI)) error { - var tmp mSI - err := json.Unmarshal(validJSON, &tmp) - require.NoError(t, err) - - modifyFn(tmp) - - testJSON, err := json.Marshal(tmp) - require.NoError(t, err) - - *pts = PolicyTransportScopes{} - dest := policyTransportScopesWithTransport{ - transport: transport, - dest: pts, - } - return json.Unmarshal(testJSON, &dest) -} - -func TestPolicyTransportScopesWithTransportUnmarshalJSON(t *testing.T) { - var pts PolicyTransportScopes - - dest := policyTransportScopesWithTransport{ - transport: docker.Transport, - dest: &pts, - } - testInvalidJSONInput(t, &dest) - - // Start with a valid JSON. - validPTS := PolicyTransportScopes{ - "docker.io/library/busybox": []PolicyRequirement{ - xNewPRSignedByKeyData(SBKeyTypeGPGKeys, []byte("def"), NewPRMMatchRepoDigestOrExact()), - }, - "registry.access.redhat.com": []PolicyRequirement{ - xNewPRSignedByKeyData(SBKeyTypeSignedByGPGKeys, []byte("RH"), NewPRMMatchRepository()), - }, - "": []PolicyRequirement{ - xNewPRSignedByKeyData(SBKeyTypeGPGKeys, []byte("global"), NewPRMMatchRepoDigestOrExact()), - }, - } - validJSON, err := json.Marshal(validPTS) - require.NoError(t, err) - - // Success - pts = PolicyTransportScopes{} - dest = policyTransportScopesWithTransport{ - transport: docker.Transport, - dest: &pts, - } - err = json.Unmarshal(validJSON, &dest) - require.NoError(t, err) - assert.Equal(t, validPTS, pts) - - // Various ways to corrupt the JSON - breakFns := []func(mSI){ - // A scope is not an array - func(v mSI) { v["docker.io/library/busybox"] = 1 }, - func(v mSI) { v["docker.io/library/busybox"] = mSI{} }, - func(v mSI) { v[""] = 1 }, - func(v mSI) { v[""] = mSI{} }, - // A scope is an invalid PolicyRequirements - func(v mSI) { v["docker.io/library/busybox"] = PolicyRequirements{} }, - func(v mSI) { v[""] = PolicyRequirements{} }, - } - for _, fn := range breakFns { - err = tryUnmarshalModifiedPTS(t, &pts, docker.Transport, validJSON, fn) - assert.Error(t, err) - } - - // Duplicated fields - for _, field := range []string{"docker.io/library/busybox", ""} { - var tmp mSI - err := json.Unmarshal(validJSON, &tmp) - require.NoError(t, err) - - testJSON := addExtraJSONMember(t, validJSON, field, tmp[field]) - - pts = PolicyTransportScopes{} - dest := policyTransportScopesWithTransport{ - transport: docker.Transport, - dest: &pts, - } - err = json.Unmarshal(testJSON, &dest) - assert.Error(t, err) - } - - // Scope rejected by transport the Docker scopes we use as valid are rejected by directory.Transport - // as relative paths. - err = tryUnmarshalModifiedPTS(t, &pts, directory.Transport, validJSON, - func(v mSI) {}) - assert.Error(t, err) - - // Various allowed modifications to the policy - allowedModificationFns := []func(mSI){ - // The "" scope is missing - func(v mSI) { delete(v, "") }, - // The policy is completely empty - func(v mSI) { - for key := range v { - delete(v, key) - } - }, - } - for _, fn := range allowedModificationFns { - err = tryUnmarshalModifiedPTS(t, &pts, docker.Transport, validJSON, fn) - require.NoError(t, err) - } -} - -func TestPolicyRequirementsUnmarshalJSON(t *testing.T) { - var reqs PolicyRequirements - - testInvalidJSONInput(t, &reqs) - - // Start with a valid JSON. - validReqs := PolicyRequirements{ - xNewPRSignedByKeyData(SBKeyTypeGPGKeys, []byte("def"), NewPRMMatchRepoDigestOrExact()), - xNewPRSignedByKeyData(SBKeyTypeSignedByGPGKeys, []byte("RH"), NewPRMMatchRepository()), - } - validJSON, err := json.Marshal(validReqs) - require.NoError(t, err) - - // Success - reqs = PolicyRequirements{} - err = json.Unmarshal(validJSON, &reqs) - require.NoError(t, err) - assert.Equal(t, validReqs, reqs) - - for _, invalid := range [][]interface{}{ - // No requirements - {}, - // A member is not an object - {1}, - // A member has an invalid type - {prSignedBy{prCommon: prCommon{Type: "this is invalid"}}}, - // A member has a valid type but invalid contents - {prSignedBy{ - prCommon: prCommon{Type: prTypeSignedBy}, - KeyType: "this is invalid", - }}, - } { - testJSON, err := json.Marshal(invalid) - require.NoError(t, err) - - reqs = PolicyRequirements{} - err = json.Unmarshal(testJSON, &reqs) - assert.Error(t, err, string(testJSON)) - } -} - -func TestNewPolicyRequirementFromJSON(t *testing.T) { - // Sample success. Others tested in the individual PolicyRequirement.UnmarshalJSON implementations. - validReq := NewPRInsecureAcceptAnything() - validJSON, err := json.Marshal(validReq) - require.NoError(t, err) - req, err := newPolicyRequirementFromJSON(validJSON) - require.NoError(t, err) - assert.Equal(t, validReq, req) - - // Invalid - for _, invalid := range []interface{}{ - // Not an object - 1, - // Missing type - prCommon{}, - // Invalid type - prCommon{Type: "this is invalid"}, - // Valid type but invalid contents - prSignedBy{ - prCommon: prCommon{Type: prTypeSignedBy}, - KeyType: "this is invalid", - }, - } { - testJSON, err := json.Marshal(invalid) - require.NoError(t, err) - - _, err = newPolicyRequirementFromJSON(testJSON) - assert.Error(t, err, string(testJSON)) - } -} - -func TestNewPRInsecureAcceptAnything(t *testing.T) { - _pr := NewPRInsecureAcceptAnything() - pr, ok := _pr.(*prInsecureAcceptAnything) - require.True(t, ok) - assert.Equal(t, &prInsecureAcceptAnything{prCommon{prTypeInsecureAcceptAnything}}, pr) -} - -func TestPRInsecureAcceptAnythingUnmarshalJSON(t *testing.T) { - var pr prInsecureAcceptAnything - - testInvalidJSONInput(t, &pr) - - // Start with a valid JSON. - validPR := NewPRInsecureAcceptAnything() - validJSON, err := json.Marshal(validPR) - require.NoError(t, err) - - // Success - pr = prInsecureAcceptAnything{} - err = json.Unmarshal(validJSON, &pr) - require.NoError(t, err) - assert.Equal(t, validPR, &pr) - - // newPolicyRequirementFromJSON recognizes this type - _pr, err := newPolicyRequirementFromJSON(validJSON) - require.NoError(t, err) - assert.Equal(t, validPR, _pr) - - for _, invalid := range []mSI{ - // Missing "type" field - {}, - // Wrong "type" field - {"type": 1}, - {"type": "this is invalid"}, - // Extra fields - { - "type": string(prTypeInsecureAcceptAnything), - "unknown": "foo", - }, - } { - testJSON, err := json.Marshal(invalid) - require.NoError(t, err) - - pr = prInsecureAcceptAnything{} - err = json.Unmarshal(testJSON, &pr) - assert.Error(t, err, string(testJSON)) - } - - // Duplicated fields - for _, field := range []string{"type"} { - var tmp mSI - err := json.Unmarshal(validJSON, &tmp) - require.NoError(t, err) - - testJSON := addExtraJSONMember(t, validJSON, field, tmp[field]) - - pr = prInsecureAcceptAnything{} - err = json.Unmarshal(testJSON, &pr) - assert.Error(t, err) - } -} - -func TestNewPRReject(t *testing.T) { - _pr := NewPRReject() - pr, ok := _pr.(*prReject) - require.True(t, ok) - assert.Equal(t, &prReject{prCommon{prTypeReject}}, pr) -} - -func TestPRRejectUnmarshalJSON(t *testing.T) { - var pr prReject - - testInvalidJSONInput(t, &pr) - - // Start with a valid JSON. - validPR := NewPRReject() - validJSON, err := json.Marshal(validPR) - require.NoError(t, err) - - // Success - pr = prReject{} - err = json.Unmarshal(validJSON, &pr) - require.NoError(t, err) - assert.Equal(t, validPR, &pr) - - // newPolicyRequirementFromJSON recognizes this type - _pr, err := newPolicyRequirementFromJSON(validJSON) - require.NoError(t, err) - assert.Equal(t, validPR, _pr) - - for _, invalid := range []mSI{ - // Missing "type" field - {}, - // Wrong "type" field - {"type": 1}, - {"type": "this is invalid"}, - // Extra fields - { - "type": string(prTypeReject), - "unknown": "foo", - }, - } { - testJSON, err := json.Marshal(invalid) - require.NoError(t, err) - - pr = prReject{} - err = json.Unmarshal(testJSON, &pr) - assert.Error(t, err, string(testJSON)) - } - - // Duplicated fields - for _, field := range []string{"type"} { - var tmp mSI - err := json.Unmarshal(validJSON, &tmp) - require.NoError(t, err) - - testJSON := addExtraJSONMember(t, validJSON, field, tmp[field]) - - pr = prReject{} - err = json.Unmarshal(testJSON, &pr) - assert.Error(t, err) - } -} - -func TestNewPRSignedBy(t *testing.T) { - const testPath = "/foo/bar" - testData := []byte("abc") - testIdentity := NewPRMMatchRepoDigestOrExact() - - // Success - pr, err := newPRSignedBy(SBKeyTypeGPGKeys, testPath, nil, testIdentity) - require.NoError(t, err) - assert.Equal(t, &prSignedBy{ - prCommon: prCommon{prTypeSignedBy}, - KeyType: SBKeyTypeGPGKeys, - KeyPath: testPath, - KeyData: nil, - SignedIdentity: testIdentity, - }, pr) - pr, err = newPRSignedBy(SBKeyTypeGPGKeys, "", testData, testIdentity) - require.NoError(t, err) - assert.Equal(t, &prSignedBy{ - prCommon: prCommon{prTypeSignedBy}, - KeyType: SBKeyTypeGPGKeys, - KeyPath: "", - KeyData: testData, - SignedIdentity: testIdentity, - }, pr) - - // Invalid keyType - pr, err = newPRSignedBy(sbKeyType(""), testPath, nil, testIdentity) - assert.Error(t, err) - pr, err = newPRSignedBy(sbKeyType("this is invalid"), testPath, nil, testIdentity) - assert.Error(t, err) - - // Both keyPath and keyData specified - pr, err = newPRSignedBy(SBKeyTypeGPGKeys, testPath, testData, testIdentity) - assert.Error(t, err) - - // Invalid signedIdentity - pr, err = newPRSignedBy(SBKeyTypeGPGKeys, testPath, nil, nil) - assert.Error(t, err) -} - -func TestNewPRSignedByKeyPath(t *testing.T) { - const testPath = "/foo/bar" - _pr, err := NewPRSignedByKeyPath(SBKeyTypeGPGKeys, testPath, NewPRMMatchRepoDigestOrExact()) - require.NoError(t, err) - pr, ok := _pr.(*prSignedBy) - require.True(t, ok) - assert.Equal(t, testPath, pr.KeyPath) - // Failure cases tested in TestNewPRSignedBy. -} - -func TestNewPRSignedByKeyData(t *testing.T) { - testData := []byte("abc") - _pr, err := NewPRSignedByKeyData(SBKeyTypeGPGKeys, testData, NewPRMMatchRepoDigestOrExact()) - require.NoError(t, err) - pr, ok := _pr.(*prSignedBy) - require.True(t, ok) - assert.Equal(t, testData, pr.KeyData) - // Failure cases tested in TestNewPRSignedBy. -} - -// Return the result of modifying vaoidJSON with fn and unmarshalingit into *pr -func tryUnmarshalModifiedSignedBy(t *testing.T, pr *prSignedBy, validJSON []byte, modifyFn func(mSI)) error { - var tmp mSI - err := json.Unmarshal(validJSON, &tmp) - require.NoError(t, err) - - modifyFn(tmp) - - testJSON, err := json.Marshal(tmp) - require.NoError(t, err) - - *pr = prSignedBy{} - return json.Unmarshal(testJSON, &pr) -} - -func TestPRSignedByUnmarshalJSON(t *testing.T) { - var pr prSignedBy - - testInvalidJSONInput(t, &pr) - - // Start with a valid JSON. - validPR, err := NewPRSignedByKeyData(SBKeyTypeGPGKeys, []byte("abc"), NewPRMMatchRepoDigestOrExact()) - require.NoError(t, err) - validJSON, err := json.Marshal(validPR) - require.NoError(t, err) - - // Success with KeyData - pr = prSignedBy{} - err = json.Unmarshal(validJSON, &pr) - require.NoError(t, err) - assert.Equal(t, validPR, &pr) - - // Success with KeyPath - kpPR, err := NewPRSignedByKeyPath(SBKeyTypeGPGKeys, "/foo/bar", NewPRMMatchRepoDigestOrExact()) - require.NoError(t, err) - testJSON, err := json.Marshal(kpPR) - require.NoError(t, err) - pr = prSignedBy{} - err = json.Unmarshal(testJSON, &pr) - require.NoError(t, err) - assert.Equal(t, kpPR, &pr) - - // newPolicyRequirementFromJSON recognizes this type - _pr, err := newPolicyRequirementFromJSON(validJSON) - require.NoError(t, err) - assert.Equal(t, validPR, _pr) - - // Various ways to corrupt the JSON - breakFns := []func(mSI){ - // The "type" field is missing - func(v mSI) { delete(v, "type") }, - // Wrong "type" field - func(v mSI) { v["type"] = 1 }, - func(v mSI) { v["type"] = "this is invalid" }, - // Extra top-level sub-object - func(v mSI) { v["unexpected"] = 1 }, - // The "keyType" field is missing - func(v mSI) { delete(v, "keyType") }, - // Invalid "keyType" field - func(v mSI) { v["keyType"] = "this is invalid" }, - // Both "keyPath" and "keyData" is missing - func(v mSI) { delete(v, "keyData") }, - // Both "keyPath" and "keyData" is present - func(v mSI) { v["keyPath"] = "/foo/bar" }, - // Invalid "keyPath" field - func(v mSI) { delete(v, "keyData"); v["keyPath"] = 1 }, - func(v mSI) { v["type"] = "this is invalid" }, - // Invalid "keyData" field - func(v mSI) { v["keyData"] = 1 }, - func(v mSI) { v["keyData"] = "this is invalid base64" }, - // Invalid "signedIdentity" field - func(v mSI) { v["signedIdentity"] = "this is invalid" }, - // "signedIdentity" an explicit nil - func(v mSI) { v["signedIdentity"] = nil }, - } - for _, fn := range breakFns { - err = tryUnmarshalModifiedSignedBy(t, &pr, validJSON, fn) - assert.Error(t, err, string(testJSON)) - } - - // Duplicated fields - for _, field := range []string{"type", "keyType", "keyData", "signedIdentity"} { - var tmp mSI - err := json.Unmarshal(validJSON, &tmp) - require.NoError(t, err) - - testJSON := addExtraJSONMember(t, validJSON, field, tmp[field]) - - pr = prSignedBy{} - err = json.Unmarshal(testJSON, &pr) - assert.Error(t, err) - } - // Handle "keyPath", which is not in validJSON, specially - pathPR, err := NewPRSignedByKeyPath(SBKeyTypeGPGKeys, "/foo/bar", NewPRMMatchRepoDigestOrExact()) - require.NoError(t, err) - testJSON, err = json.Marshal(pathPR) - require.NoError(t, err) - testJSON = addExtraJSONMember(t, testJSON, "keyPath", pr.KeyPath) - pr = prSignedBy{} - err = json.Unmarshal(testJSON, &pr) - assert.Error(t, err) - - // Various allowed modifications to the requirement - allowedModificationFns := []func(mSI){ - // Delete the signedIdentity field - func(v mSI) { delete(v, "signedIdentity") }, - } - for _, fn := range allowedModificationFns { - err = tryUnmarshalModifiedSignedBy(t, &pr, validJSON, fn) - require.NoError(t, err) - } - - // Various ways to set signedIdentity to the default value - signedIdentityDefaultFns := []func(mSI){ - // Set signedIdentity to the default explicitly - func(v mSI) { v["signedIdentity"] = NewPRMMatchRepoDigestOrExact() }, - // Delete the signedIdentity field - func(v mSI) { delete(v, "signedIdentity") }, - } - for _, fn := range signedIdentityDefaultFns { - err = tryUnmarshalModifiedSignedBy(t, &pr, validJSON, fn) - require.NoError(t, err) - assert.Equal(t, NewPRMMatchRepoDigestOrExact(), pr.SignedIdentity) - } -} - -func TestSBKeyTypeIsValid(t *testing.T) { - // Valid values - for _, s := range []sbKeyType{ - SBKeyTypeGPGKeys, - SBKeyTypeSignedByGPGKeys, - SBKeyTypeX509Certificates, - SBKeyTypeSignedByX509CAs, - } { - assert.True(t, s.IsValid()) - } - - // Invalid values - for _, s := range []string{"", "this is invalid"} { - assert.False(t, sbKeyType(s).IsValid()) - } -} - -func TestSBKeyTypeUnmarshalJSON(t *testing.T) { - var kt sbKeyType - - testInvalidJSONInput(t, &kt) - - // Valid values. - for _, v := range []sbKeyType{ - SBKeyTypeGPGKeys, - SBKeyTypeSignedByGPGKeys, - SBKeyTypeX509Certificates, - SBKeyTypeSignedByX509CAs, - } { - kt = sbKeyType("") - err := json.Unmarshal([]byte(`"`+string(v)+`"`), &kt) - assert.NoError(t, err) - } - - // Invalid values - kt = sbKeyType("") - err := json.Unmarshal([]byte(`""`), &kt) - assert.Error(t, err) - - kt = sbKeyType("") - err = json.Unmarshal([]byte(`"this is invalid"`), &kt) - assert.Error(t, err) -} - -// NewPRSignedBaseLayer is like NewPRSignedBaseLayer, except it must not fail. -func xNewPRSignedBaseLayer(baseLayerIdentity PolicyReferenceMatch) PolicyRequirement { - pr, err := NewPRSignedBaseLayer(baseLayerIdentity) - if err != nil { - panic("xNewPRSignedBaseLayer failed") - } - return pr -} - -func TestNewPRSignedBaseLayer(t *testing.T) { - testBLI := NewPRMMatchExact() - - // Success - _pr, err := NewPRSignedBaseLayer(testBLI) - require.NoError(t, err) - pr, ok := _pr.(*prSignedBaseLayer) - require.True(t, ok) - assert.Equal(t, &prSignedBaseLayer{ - prCommon: prCommon{prTypeSignedBaseLayer}, - BaseLayerIdentity: testBLI, - }, pr) - - // Invalid baseLayerIdentity - _, err = NewPRSignedBaseLayer(nil) - assert.Error(t, err) -} - -func TestPRSignedBaseLayerUnmarshalJSON(t *testing.T) { - var pr prSignedBaseLayer - - testInvalidJSONInput(t, &pr) - - // Start with a valid JSON. - baseIdentity, err := NewPRMExactReference("registry.access.redhat.com/rhel7/rhel:7.2.3") - require.NoError(t, err) - validPR, err := NewPRSignedBaseLayer(baseIdentity) - require.NoError(t, err) - validJSON, err := json.Marshal(validPR) - require.NoError(t, err) - - // Success - pr = prSignedBaseLayer{} - err = json.Unmarshal(validJSON, &pr) - require.NoError(t, err) - assert.Equal(t, validPR, &pr) - - // newPolicyRequirementFromJSON recognizes this type - _pr, err := newPolicyRequirementFromJSON(validJSON) - require.NoError(t, err) - assert.Equal(t, validPR, _pr) - - // Various ways to corrupt the JSON - breakFns := []func(mSI){ - // The "type" field is missing - func(v mSI) { delete(v, "type") }, - // Wrong "type" field - func(v mSI) { v["type"] = 1 }, - func(v mSI) { v["type"] = "this is invalid" }, - // Extra top-level sub-object - func(v mSI) { v["unexpected"] = 1 }, - // The "baseLayerIdentity" field is missing - func(v mSI) { delete(v, "baseLayerIdentity") }, - // Invalid "baseLayerIdentity" field - func(v mSI) { v["baseLayerIdentity"] = "this is invalid" }, - // Invalid "baseLayerIdentity" an explicit nil - func(v mSI) { v["baseLayerIdentity"] = nil }, - } - for _, fn := range breakFns { - var tmp mSI - err := json.Unmarshal(validJSON, &tmp) - require.NoError(t, err) - - fn(tmp) - - testJSON, err := json.Marshal(tmp) - require.NoError(t, err) - - pr = prSignedBaseLayer{} - err = json.Unmarshal(testJSON, &pr) - assert.Error(t, err) - } - - // Duplicated fields - for _, field := range []string{"type", "baseLayerIdentity"} { - var tmp mSI - err := json.Unmarshal(validJSON, &tmp) - require.NoError(t, err) - - testJSON := addExtraJSONMember(t, validJSON, field, tmp[field]) - - pr = prSignedBaseLayer{} - err = json.Unmarshal(testJSON, &pr) - assert.Error(t, err) - } -} - -func TestNewPolicyReferenceMatchFromJSON(t *testing.T) { - // Sample success. Others tested in the individual PolicyReferenceMatch.UnmarshalJSON implementations. - validPRM := NewPRMMatchRepoDigestOrExact() - validJSON, err := json.Marshal(validPRM) - require.NoError(t, err) - prm, err := newPolicyReferenceMatchFromJSON(validJSON) - require.NoError(t, err) - assert.Equal(t, validPRM, prm) - - // Invalid - for _, invalid := range []interface{}{ - // Not an object - 1, - // Missing type - prmCommon{}, - // Invalid type - prmCommon{Type: "this is invalid"}, - // Valid type but invalid contents - prmExactReference{ - prmCommon: prmCommon{Type: prmTypeExactReference}, - DockerReference: "", - }, - } { - testJSON, err := json.Marshal(invalid) - require.NoError(t, err) - - _, err = newPolicyReferenceMatchFromJSON(testJSON) - assert.Error(t, err, string(testJSON)) - } -} - -func TestNewPRMMatchExact(t *testing.T) { - _prm := NewPRMMatchExact() - prm, ok := _prm.(*prmMatchExact) - require.True(t, ok) - assert.Equal(t, &prmMatchExact{prmCommon{prmTypeMatchExact}}, prm) -} - -func TestPRMMatchExactUnmarshalJSON(t *testing.T) { - var prm prmMatchExact - - testInvalidJSONInput(t, &prm) - - // Start with a valid JSON. - validPR := NewPRMMatchExact() - validJSON, err := json.Marshal(validPR) - require.NoError(t, err) - - // Success - prm = prmMatchExact{} - err = json.Unmarshal(validJSON, &prm) - require.NoError(t, err) - assert.Equal(t, validPR, &prm) - - // newPolicyReferenceMatchFromJSON recognizes this type - _pr, err := newPolicyReferenceMatchFromJSON(validJSON) - require.NoError(t, err) - assert.Equal(t, validPR, _pr) - - for _, invalid := range []mSI{ - // Missing "type" field - {}, - // Wrong "type" field - {"type": 1}, - {"type": "this is invalid"}, - // Extra fields - { - "type": string(prmTypeMatchExact), - "unknown": "foo", - }, - } { - testJSON, err := json.Marshal(invalid) - require.NoError(t, err) - - prm = prmMatchExact{} - err = json.Unmarshal(testJSON, &prm) - assert.Error(t, err, string(testJSON)) - } - - // Duplicated fields - for _, field := range []string{"type"} { - var tmp mSI - err := json.Unmarshal(validJSON, &tmp) - require.NoError(t, err) - - testJSON := addExtraJSONMember(t, validJSON, field, tmp[field]) - - prm = prmMatchExact{} - err = json.Unmarshal(testJSON, &prm) - assert.Error(t, err) - } -} - -func TestNewPRMMatchRepoDigestOrExact(t *testing.T) { - _prm := NewPRMMatchRepoDigestOrExact() - prm, ok := _prm.(*prmMatchRepoDigestOrExact) - require.True(t, ok) - assert.Equal(t, &prmMatchRepoDigestOrExact{prmCommon{prmTypeMatchRepoDigestOrExact}}, prm) -} - -func TestPRMMatchRepoDigestOrExactUnmarshalJSON(t *testing.T) { - var prm prmMatchRepoDigestOrExact - - testInvalidJSONInput(t, &prm) - - // Start with a valid JSON. - validPR := NewPRMMatchRepoDigestOrExact() - validJSON, err := json.Marshal(validPR) - require.NoError(t, err) - - // Success - prm = prmMatchRepoDigestOrExact{} - err = json.Unmarshal(validJSON, &prm) - require.NoError(t, err) - assert.Equal(t, validPR, &prm) - - // newPolicyReferenceMatchFromJSON recognizes this type - _pr, err := newPolicyReferenceMatchFromJSON(validJSON) - require.NoError(t, err) - assert.Equal(t, validPR, _pr) - - for _, invalid := range []mSI{ - // Missing "type" field - {}, - // Wrong "type" field - {"type": 1}, - {"type": "this is invalid"}, - // Extra fields - { - "type": string(prmTypeMatchRepoDigestOrExact), - "unknown": "foo", - }, - } { - testJSON, err := json.Marshal(invalid) - require.NoError(t, err) - - prm = prmMatchRepoDigestOrExact{} - err = json.Unmarshal(testJSON, &prm) - assert.Error(t, err, string(testJSON)) - } - - // Duplicated fields - for _, field := range []string{"type"} { - var tmp mSI - err := json.Unmarshal(validJSON, &tmp) - require.NoError(t, err) - - testJSON := addExtraJSONMember(t, validJSON, field, tmp[field]) - - prm = prmMatchRepoDigestOrExact{} - err = json.Unmarshal(testJSON, &prm) - assert.Error(t, err) - } -} - -func TestNewPRMMatchRepository(t *testing.T) { - _prm := NewPRMMatchRepository() - prm, ok := _prm.(*prmMatchRepository) - require.True(t, ok) - assert.Equal(t, &prmMatchRepository{prmCommon{prmTypeMatchRepository}}, prm) -} - -func TestPRMMatchRepositoryUnmarshalJSON(t *testing.T) { - var prm prmMatchRepository - - testInvalidJSONInput(t, &prm) - - // Start with a valid JSON. - validPR := NewPRMMatchRepository() - validJSON, err := json.Marshal(validPR) - require.NoError(t, err) - - // Success - prm = prmMatchRepository{} - err = json.Unmarshal(validJSON, &prm) - require.NoError(t, err) - assert.Equal(t, validPR, &prm) - - // newPolicyReferenceMatchFromJSON recognizes this type - _pr, err := newPolicyReferenceMatchFromJSON(validJSON) - require.NoError(t, err) - assert.Equal(t, validPR, _pr) - - for _, invalid := range []mSI{ - // Missing "type" field - {}, - // Wrong "type" field - {"type": 1}, - {"type": "this is invalid"}, - // Extra fields - { - "type": string(prmTypeMatchRepository), - "unknown": "foo", - }, - } { - testJSON, err := json.Marshal(invalid) - require.NoError(t, err) - - prm = prmMatchRepository{} - err = json.Unmarshal(testJSON, &prm) - assert.Error(t, err, string(testJSON)) - } - - // Duplicated fields - for _, field := range []string{"type"} { - var tmp mSI - err := json.Unmarshal(validJSON, &tmp) - require.NoError(t, err) - - testJSON := addExtraJSONMember(t, validJSON, field, tmp[field]) - - prm = prmMatchRepository{} - err = json.Unmarshal(testJSON, &prm) - assert.Error(t, err) - } -} - -// xNewPRMExactReference is like NewPRMExactReference, except it must not fail. -func xNewPRMExactReference(dockerReference string) PolicyReferenceMatch { - pr, err := NewPRMExactReference(dockerReference) - if err != nil { - panic("xNewPRMExactReference failed") - } - return pr -} - -func TestNewPRMExactReference(t *testing.T) { - const testDR = "library/busybox:latest" - - // Success - _prm, err := NewPRMExactReference(testDR) - require.NoError(t, err) - prm, ok := _prm.(*prmExactReference) - require.True(t, ok) - assert.Equal(t, &prmExactReference{ - prmCommon: prmCommon{prmTypeExactReference}, - DockerReference: testDR, - }, prm) - - // Invalid dockerReference - _, err = NewPRMExactReference("") - assert.Error(t, err) - // Uppercase is invalid in Docker reference components. - _, err = NewPRMExactReference("INVALIDUPPERCASE:latest") - assert.Error(t, err) - // Missing tag - _, err = NewPRMExactReference("library/busybox") - assert.Error(t, err) -} - -func TestPRMExactReferenceUnmarshalJSON(t *testing.T) { - var prm prmExactReference - - testInvalidJSONInput(t, &prm) - - // Start with a valid JSON. - validPRM, err := NewPRMExactReference("library/buxybox:latest") - require.NoError(t, err) - validJSON, err := json.Marshal(validPRM) - require.NoError(t, err) - - // Success - prm = prmExactReference{} - err = json.Unmarshal(validJSON, &prm) - require.NoError(t, err) - assert.Equal(t, validPRM, &prm) - - // newPolicyReferenceMatchFromJSON recognizes this type - _prm, err := newPolicyReferenceMatchFromJSON(validJSON) - require.NoError(t, err) - assert.Equal(t, validPRM, _prm) - - // Various ways to corrupt the JSON - breakFns := []func(mSI){ - // The "type" field is missing - func(v mSI) { delete(v, "type") }, - // Wrong "type" field - func(v mSI) { v["type"] = 1 }, - func(v mSI) { v["type"] = "this is invalid" }, - // Extra top-level sub-object - func(v mSI) { v["unexpected"] = 1 }, - // The "dockerReference" field is missing - func(v mSI) { delete(v, "dockerReference") }, - // Invalid "dockerReference" field - func(v mSI) { v["dockerReference"] = 1 }, - } - for _, fn := range breakFns { - var tmp mSI - err := json.Unmarshal(validJSON, &tmp) - require.NoError(t, err) - - fn(tmp) - - testJSON, err := json.Marshal(tmp) - require.NoError(t, err) - - prm = prmExactReference{} - err = json.Unmarshal(testJSON, &prm) - assert.Error(t, err) - } - - // Duplicated fields - for _, field := range []string{"type", "baseLayerIdentity"} { - var tmp mSI - err := json.Unmarshal(validJSON, &tmp) - require.NoError(t, err) - - testJSON := addExtraJSONMember(t, validJSON, field, tmp[field]) - - prm = prmExactReference{} - err = json.Unmarshal(testJSON, &prm) - assert.Error(t, err) - } -} - -// xNewPRMExactRepository is like NewPRMExactRepository, except it must not fail. -func xNewPRMExactRepository(dockerRepository string) PolicyReferenceMatch { - pr, err := NewPRMExactRepository(dockerRepository) - if err != nil { - panic("xNewPRMExactRepository failed") - } - return pr -} - -func TestNewPRMExactRepository(t *testing.T) { - const testDR = "library/busybox:latest" - - // Success - _prm, err := NewPRMExactRepository(testDR) - require.NoError(t, err) - prm, ok := _prm.(*prmExactRepository) - require.True(t, ok) - assert.Equal(t, &prmExactRepository{ - prmCommon: prmCommon{prmTypeExactRepository}, - DockerRepository: testDR, - }, prm) - - // Invalid dockerRepository - _, err = NewPRMExactRepository("") - assert.Error(t, err) - // Uppercase is invalid in Docker reference components. - _, err = NewPRMExactRepository("INVALIDUPPERCASE") - assert.Error(t, err) -} - -func TestPRMExactRepositoryUnmarshalJSON(t *testing.T) { - var prm prmExactRepository - - testInvalidJSONInput(t, &prm) - - // Start with a valid JSON. - validPRM, err := NewPRMExactRepository("library/buxybox:latest") - require.NoError(t, err) - validJSON, err := json.Marshal(validPRM) - require.NoError(t, err) - - // Success - prm = prmExactRepository{} - err = json.Unmarshal(validJSON, &prm) - require.NoError(t, err) - assert.Equal(t, validPRM, &prm) - - // newPolicyReferenceMatchFromJSON recognizes this type - _prm, err := newPolicyReferenceMatchFromJSON(validJSON) - require.NoError(t, err) - assert.Equal(t, validPRM, _prm) - - // Various ways to corrupt the JSON - breakFns := []func(mSI){ - // The "type" field is missing - func(v mSI) { delete(v, "type") }, - // Wrong "type" field - func(v mSI) { v["type"] = 1 }, - func(v mSI) { v["type"] = "this is invalid" }, - // Extra top-level sub-object - func(v mSI) { v["unexpected"] = 1 }, - // The "dockerRepository" field is missing - func(v mSI) { delete(v, "dockerRepository") }, - // Invalid "dockerRepository" field - func(v mSI) { v["dockerRepository"] = 1 }, - } - for _, fn := range breakFns { - var tmp mSI - err := json.Unmarshal(validJSON, &tmp) - require.NoError(t, err) - - fn(tmp) - - testJSON, err := json.Marshal(tmp) - require.NoError(t, err) - - prm = prmExactRepository{} - err = json.Unmarshal(testJSON, &prm) - assert.Error(t, err) - } - - // Duplicated fields - for _, field := range []string{"type", "baseLayerIdentity"} { - var tmp mSI - err := json.Unmarshal(validJSON, &tmp) - require.NoError(t, err) - - testJSON := addExtraJSONMember(t, validJSON, field, tmp[field]) - - prm = prmExactRepository{} - err = json.Unmarshal(testJSON, &prm) - assert.Error(t, err) - } -} diff --git a/vendor/github.com/containers/image/signature/policy_eval.go b/vendor/github.com/containers/image/signature/policy_eval.go deleted file mode 100644 index f818eb095a66..000000000000 --- a/vendor/github.com/containers/image/signature/policy_eval.go +++ /dev/null @@ -1,289 +0,0 @@ -// This defines the top-level policy evaluation API. -// To the extent possible, the interface of the fuctions provided -// here is intended to be completely unambiguous, and stable for users -// to rely on. - -package signature - -import ( - "context" - - "github.com/containers/image/types" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// PolicyRequirementError is an explanatory text for rejecting a signature or an image. -type PolicyRequirementError string - -func (err PolicyRequirementError) Error() string { - return string(err) -} - -// signatureAcceptanceResult is the principal value returned by isSignatureAuthorAccepted. -type signatureAcceptanceResult string - -const ( - sarAccepted signatureAcceptanceResult = "sarAccepted" - sarRejected signatureAcceptanceResult = "sarRejected" - sarUnknown signatureAcceptanceResult = "sarUnknown" -) - -// PolicyRequirement is a rule which must be satisfied by at least one of the signatures of an image. -// The type is public, but its definition is private. -type PolicyRequirement interface { - // FIXME: For speed, we should support creating per-context state (not stored in the PolicyRequirement), to cache - // costly initialization like creating temporary GPG home directories and reading files. - // Setup() (someState, error) - // Then, the operations below would be done on the someState object, not directly on a PolicyRequirement. - - // isSignatureAuthorAccepted, given an image and a signature blob, returns: - // - sarAccepted if the signature has been verified against the appropriate public key - // (where "appropriate public key" may depend on the contents of the signature); - // in that case a parsed Signature should be returned. - // - sarRejected if the signature has not been verified; - // in that case error must be non-nil, and should be an PolicyRequirementError if evaluation - // succeeded but the result was rejection. - // - sarUnknown if if this PolicyRequirement does not deal with signatures. - // NOTE: sarUnknown should not be returned if this PolicyRequirement should make a decision but something failed. - // Returning sarUnknown and a non-nil error value is invalid. - // WARNING: This makes the signature contents acceptable for futher processing, - // but it does not necessarily mean that the contents of the signature are - // consistent with local policy. - // For example: - // - Do not use a true value to determine whether to run - // a container based on this image; use IsRunningImageAllowed instead. - // - Just because a signature is accepted does not automatically mean the contents of the - // signature are authorized to run code as root, or to affect system or cluster configuration. - isSignatureAuthorAccepted(image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) - - // isRunningImageAllowed returns true if the requirement allows running an image. - // If it returns false, err must be non-nil, and should be an PolicyRequirementError if evaluation - // succeeded but the result was rejection. - // WARNING: This validates signatures and the manifest, but does not download or validate the - // layers. Users must validate that the layers match their expected digests. - isRunningImageAllowed(image types.UnparsedImage) (bool, error) -} - -// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement. -// The type is public, but its implementation is private. -type PolicyReferenceMatch interface { - // matchesDockerReference decides whether a specific image identity is accepted for an image - // (or, usually, for the image's Reference().DockerReference()). Note that - // image.Reference().DockerReference() may be nil. - matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool -} - -// PolicyContext encapsulates a policy and possible cached state -// for speeding up its evaluation. -type PolicyContext struct { - Policy *Policy - state policyContextState // Internal consistency checking -} - -// policyContextState is used internally to verify the users are not misusing a PolicyContext. -type policyContextState string - -const ( - pcInvalid policyContextState = "" - pcInitializing policyContextState = "Initializing" - pcReady policyContextState = "Ready" - pcInUse policyContextState = "InUse" - pcDestroying policyContextState = "Destroying" - pcDestroyed policyContextState = "Destroyed" -) - -// changeContextState changes pc.state, or fails if the state is unexpected -func (pc *PolicyContext) changeState(expected, new policyContextState) error { - if pc.state != expected { - return errors.Errorf(`"Invalid PolicyContext state, expected "%s", found "%s"`, expected, pc.state) - } - pc.state = new - return nil -} - -// NewPolicyContext sets up and initializes a context for the specified policy. -// The policy must not be modified while the context exists. FIXME: make a deep copy? -// If this function succeeds, the caller should call PolicyContext.Destroy() when done. -func NewPolicyContext(policy *Policy) (*PolicyContext, error) { - pc := &PolicyContext{Policy: policy, state: pcInitializing} - // FIXME: initialize - if err := pc.changeState(pcInitializing, pcReady); err != nil { - // Huh?! This should never fail, we didn't give the pointer to anybody. - // Just give up and leave unclean state around. - return nil, err - } - return pc, nil -} - -// Destroy should be called when the user of the context is done with it. -func (pc *PolicyContext) Destroy() error { - if err := pc.changeState(pcReady, pcDestroying); err != nil { - return err - } - // FIXME: destroy - return pc.changeState(pcDestroying, pcDestroyed) -} - -// policyIdentityLogName returns a string description of the image identity for policy purposes. -// ONLY use this for log messages, not for any decisions! -func policyIdentityLogName(ref types.ImageReference) string { - return ref.Transport().Name() + ":" + ref.PolicyConfigurationIdentity() -} - -// requirementsForImageRef selects the appropriate requirements for ref. -func (pc *PolicyContext) requirementsForImageRef(ref types.ImageReference) PolicyRequirements { - // Do we have a PolicyTransportScopes for this transport? - transportName := ref.Transport().Name() - if transportScopes, ok := pc.Policy.Transports[transportName]; ok { - // Look for a full match. - identity := ref.PolicyConfigurationIdentity() - if req, ok := transportScopes[identity]; ok { - logrus.Debugf(` Using transport "%s" policy section %s`, transportName, identity) - return req - } - - // Look for a match of the possible parent namespaces. - for _, name := range ref.PolicyConfigurationNamespaces() { - if req, ok := transportScopes[name]; ok { - logrus.Debugf(` Using transport "%s" specific policy section %s`, transportName, name) - return req - } - } - - // Look for a default match for the transport. - if req, ok := transportScopes[""]; ok { - logrus.Debugf(` Using transport "%s" policy section ""`, transportName) - return req - } - } - - logrus.Debugf(" Using default policy section") - return pc.Policy.Default -} - -// GetSignaturesWithAcceptedAuthor returns those signatures from an image -// for which the policy accepts the author (and which have been successfully -// verified). -// NOTE: This may legitimately return an empty list and no error, if the image -// has no signatures or only invalid signatures. -// WARNING: This makes the signature contents acceptable for futher processing, -// but it does not necessarily mean that the contents of the signature are -// consistent with local policy. -// For example: -// - Do not use a an existence of an accepted signature to determine whether to run -// a container based on this image; use IsRunningImageAllowed instead. -// - Just because a signature is accepted does not automatically mean the contents of the -// signature are authorized to run code as root, or to affect system or cluster configuration. -func (pc *PolicyContext) GetSignaturesWithAcceptedAuthor(image types.UnparsedImage) (sigs []*Signature, finalErr error) { - if err := pc.changeState(pcReady, pcInUse); err != nil { - return nil, err - } - defer func() { - if err := pc.changeState(pcInUse, pcReady); err != nil { - sigs = nil - finalErr = err - } - }() - - logrus.Debugf("GetSignaturesWithAcceptedAuthor for image %s", policyIdentityLogName(image.Reference())) - reqs := pc.requirementsForImageRef(image.Reference()) - - // FIXME: rename Signatures to UnverifiedSignatures - // FIXME: pass context.Context - unverifiedSignatures, err := image.Signatures(context.TODO()) - if err != nil { - return nil, err - } - - res := make([]*Signature, 0, len(unverifiedSignatures)) - for sigNumber, sig := range unverifiedSignatures { - var acceptedSig *Signature // non-nil if accepted - rejected := false - // FIXME? Say more about the contents of the signature, i.e. parse it even before verification?! - logrus.Debugf("Evaluating signature %d:", sigNumber) - interpretingReqs: - for reqNumber, req := range reqs { - // FIXME: Log the requirement itself? For now, we use just the number. - // FIXME: supply state - switch res, as, err := req.isSignatureAuthorAccepted(image, sig); res { - case sarAccepted: - if as == nil { // Coverage: this should never happen - logrus.Debugf(" Requirement %d: internal inconsistency: sarAccepted but no parsed contents", reqNumber) - rejected = true - break interpretingReqs - } - logrus.Debugf(" Requirement %d: signature accepted", reqNumber) - if acceptedSig == nil { - acceptedSig = as - } else if *as != *acceptedSig { // Coverage: this should never happen - // Huh?! Two ways of verifying the same signature blob resulted in two different parses of its already accepted contents? - logrus.Debugf(" Requirement %d: internal inconsistency: sarAccepted but different parsed contents", reqNumber) - rejected = true - acceptedSig = nil - break interpretingReqs - } - case sarRejected: - logrus.Debugf(" Requirement %d: signature rejected: %s", reqNumber, err.Error()) - rejected = true - break interpretingReqs - case sarUnknown: - if err != nil { // Coverage: this should never happen - logrus.Debugf(" Requirement %d: internal inconsistency: sarUnknown but an error message %s", reqNumber, err.Error()) - rejected = true - break interpretingReqs - } - logrus.Debugf(" Requirement %d: signature state unknown, continuing", reqNumber) - default: // Coverage: this should never happen - logrus.Debugf(" Requirement %d: internal inconsistency: unknown result %#v", reqNumber, string(res)) - rejected = true - break interpretingReqs - } - } - // This also handles the (invalid) case of empty reqs, by rejecting the signature. - if acceptedSig != nil && !rejected { - logrus.Debugf(" Overall: OK, signature accepted") - res = append(res, acceptedSig) - } else { - logrus.Debugf(" Overall: Signature not accepted") - } - } - return res, nil -} - -// IsRunningImageAllowed returns true iff the policy allows running the image. -// If it returns false, err must be non-nil, and should be an PolicyRequirementError if evaluation -// succeeded but the result was rejection. -// WARNING: This validates signatures and the manifest, but does not download or validate the -// layers. Users must validate that the layers match their expected digests. -func (pc *PolicyContext) IsRunningImageAllowed(image types.UnparsedImage) (res bool, finalErr error) { - if err := pc.changeState(pcReady, pcInUse); err != nil { - return false, err - } - defer func() { - if err := pc.changeState(pcInUse, pcReady); err != nil { - res = false - finalErr = err - } - }() - - logrus.Debugf("IsRunningImageAllowed for image %s", policyIdentityLogName(image.Reference())) - reqs := pc.requirementsForImageRef(image.Reference()) - - if len(reqs) == 0 { - return false, PolicyRequirementError("List of verification policy requirements must not be empty") - } - - for reqNumber, req := range reqs { - // FIXME: supply state - allowed, err := req.isRunningImageAllowed(image) - if !allowed { - logrus.Debugf("Requirement %d: denied, done", reqNumber) - return false, err - } - logrus.Debugf(" Requirement %d: allowed", reqNumber) - } - // We have tested that len(reqs) != 0, so at least one req must have explicitly allowed this image. - logrus.Debugf("Overall: allowed") - return true, nil -} diff --git a/vendor/github.com/containers/image/signature/policy_eval_baselayer.go b/vendor/github.com/containers/image/signature/policy_eval_baselayer.go deleted file mode 100644 index 898958012e29..000000000000 --- a/vendor/github.com/containers/image/signature/policy_eval_baselayer.go +++ /dev/null @@ -1,18 +0,0 @@ -// Policy evaluation for prSignedBaseLayer. - -package signature - -import ( - "github.com/containers/image/types" - "github.com/sirupsen/logrus" -) - -func (pr *prSignedBaseLayer) isSignatureAuthorAccepted(image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { - return sarUnknown, nil, nil -} - -func (pr *prSignedBaseLayer) isRunningImageAllowed(image types.UnparsedImage) (bool, error) { - // FIXME? Reject this at policy parsing time already? - logrus.Errorf("signedBaseLayer not implemented yet!") - return false, PolicyRequirementError("signedBaseLayer not implemented yet!") -} diff --git a/vendor/github.com/containers/image/signature/policy_eval_baselayer_test.go b/vendor/github.com/containers/image/signature/policy_eval_baselayer_test.go deleted file mode 100644 index 937cb928f030..000000000000 --- a/vendor/github.com/containers/image/signature/policy_eval_baselayer_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package signature - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestPRSignedBaseLayerIsSignatureAuthorAccepted(t *testing.T) { - pr, err := NewPRSignedBaseLayer(NewPRMMatchRepository()) - require.NoError(t, err) - // Pass nil pointers to, kind of, test that the return value does not depend on the parameters. - sar, parsedSig, err := pr.isSignatureAuthorAccepted(nil, nil) - assertSARUnknown(t, sar, parsedSig, err) -} - -func TestPRSignedBaseLayerIsRunningImageAllowed(t *testing.T) { - // This will obviously need to change after signedBaseLayer is implemented. - pr, err := NewPRSignedBaseLayer(NewPRMMatchRepository()) - require.NoError(t, err) - // Pass a nil pointer to, kind of, test that the return value does not depend on the image. - res, err := pr.isRunningImageAllowed(nil) - assertRunningRejectedPolicyRequirement(t, res, err) -} diff --git a/vendor/github.com/containers/image/signature/policy_eval_signedby.go b/vendor/github.com/containers/image/signature/policy_eval_signedby.go deleted file mode 100644 index 56665124c069..000000000000 --- a/vendor/github.com/containers/image/signature/policy_eval_signedby.go +++ /dev/null @@ -1,131 +0,0 @@ -// Policy evaluation for prSignedBy. - -package signature - -import ( - "context" - "fmt" - "io/ioutil" - "strings" - - "github.com/pkg/errors" - - "github.com/containers/image/manifest" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" -) - -func (pr *prSignedBy) isSignatureAuthorAccepted(image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { - switch pr.KeyType { - case SBKeyTypeGPGKeys: - case SBKeyTypeSignedByGPGKeys, SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs: - // FIXME? Reject this at policy parsing time already? - return sarRejected, nil, errors.Errorf(`"Unimplemented "keyType" value "%s"`, string(pr.KeyType)) - default: - // This should never happen, newPRSignedBy ensures KeyType.IsValid() - return sarRejected, nil, errors.Errorf(`"Unknown "keyType" value "%s"`, string(pr.KeyType)) - } - - if pr.KeyPath != "" && pr.KeyData != nil { - return sarRejected, nil, errors.New(`Internal inconsistency: both "keyPath" and "keyData" specified`) - } - // FIXME: move this to per-context initialization - var data []byte - if pr.KeyData != nil { - data = pr.KeyData - } else { - d, err := ioutil.ReadFile(pr.KeyPath) - if err != nil { - return sarRejected, nil, err - } - data = d - } - - // FIXME: move this to per-context initialization - mech, trustedIdentities, err := NewEphemeralGPGSigningMechanism(data) - if err != nil { - return sarRejected, nil, err - } - defer mech.Close() - if len(trustedIdentities) == 0 { - return sarRejected, nil, PolicyRequirementError("No public keys imported") - } - - signature, err := verifyAndExtractSignature(mech, sig, signatureAcceptanceRules{ - validateKeyIdentity: func(keyIdentity string) error { - for _, trustedIdentity := range trustedIdentities { - if keyIdentity == trustedIdentity { - return nil - } - } - // Coverage: We use a private GPG home directory and only import trusted keys, so this should - // not be reachable. - return PolicyRequirementError(fmt.Sprintf("Signature by key %s is not accepted", keyIdentity)) - }, - validateSignedDockerReference: func(ref string) error { - if !pr.SignedIdentity.matchesDockerReference(image, ref) { - return PolicyRequirementError(fmt.Sprintf("Signature for identity %s is not accepted", ref)) - } - return nil - }, - validateSignedDockerManifestDigest: func(digest digest.Digest) error { - m, _, err := image.Manifest() - if err != nil { - return err - } - digestMatches, err := manifest.MatchesDigest(m, digest) - if err != nil { - return err - } - if !digestMatches { - return PolicyRequirementError(fmt.Sprintf("Signature for digest %s does not match", digest)) - } - return nil - }, - }) - if err != nil { - return sarRejected, nil, err - } - - return sarAccepted, signature, nil -} - -func (pr *prSignedBy) isRunningImageAllowed(image types.UnparsedImage) (bool, error) { - // FIXME: pass context.Context - sigs, err := image.Signatures(context.TODO()) - if err != nil { - return false, err - } - var rejections []error - for _, s := range sigs { - var reason error - switch res, _, err := pr.isSignatureAuthorAccepted(image, s); res { - case sarAccepted: - // One accepted signature is enough. - return true, nil - case sarRejected: - reason = err - case sarUnknown: - // Huh?! This should not happen at all; treat it as any other invalid value. - fallthrough - default: - reason = errors.Errorf(`Internal error: Unexpected signature verification result "%s"`, string(res)) - } - rejections = append(rejections, reason) - } - var summary error - switch len(rejections) { - case 0: - summary = PolicyRequirementError("A signature was required, but no signature exists") - case 1: - summary = rejections[0] - default: - var msgs []string - for _, e := range rejections { - msgs = append(msgs, e.Error()) - } - summary = PolicyRequirementError(fmt.Sprintf("None of the signatures were accepted, reasons: %s", - strings.Join(msgs, "; "))) - } - return false, summary -} diff --git a/vendor/github.com/containers/image/signature/policy_eval_signedby_test.go b/vendor/github.com/containers/image/signature/policy_eval_signedby_test.go deleted file mode 100644 index 19086fcf5b1b..000000000000 --- a/vendor/github.com/containers/image/signature/policy_eval_signedby_test.go +++ /dev/null @@ -1,264 +0,0 @@ -package signature - -import ( - "io/ioutil" - "os" - "path" - "testing" - - "github.com/containers/image/directory" - "github.com/containers/image/docker/reference" - "github.com/containers/image/image" - "github.com/containers/image/types" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// dirImageMock returns a types.UnparsedImage for a directory, claiming a specified dockerReference. -// The caller must call .Close() on the returned UnparsedImage. -func dirImageMock(t *testing.T, dir, dockerReference string) types.UnparsedImage { - ref, err := reference.ParseNormalizedNamed(dockerReference) - require.NoError(t, err) - return dirImageMockWithRef(t, dir, refImageReferenceMock{ref}) -} - -// dirImageMockWithRef returns a types.UnparsedImage for a directory, claiming a specified ref. -// The caller must call .Close() on the returned UnparsedImage. -func dirImageMockWithRef(t *testing.T, dir string, ref types.ImageReference) types.UnparsedImage { - srcRef, err := directory.NewReference(dir) - require.NoError(t, err) - src, err := srcRef.NewImageSource(nil, nil) - require.NoError(t, err) - return image.UnparsedFromSource(&dirImageSourceMock{ - ImageSource: src, - ref: ref, - }) -} - -// dirImageSourceMock inherits dirImageSource, but overrides its Reference method. -type dirImageSourceMock struct { - types.ImageSource - ref types.ImageReference -} - -func (d *dirImageSourceMock) Reference() types.ImageReference { - return d.ref -} - -func TestPRSignedByIsSignatureAuthorAccepted(t *testing.T) { - ktGPG := SBKeyTypeGPGKeys - prm := NewPRMMatchExact() - testImage := dirImageMock(t, "fixtures/dir-img-valid", "testing/manifest:latest") - defer testImage.Close() - testImageSig, err := ioutil.ReadFile("fixtures/dir-img-valid/signature-1") - require.NoError(t, err) - - // Successful validation, with KeyData and KeyPath - pr, err := NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm) - require.NoError(t, err) - sar, parsedSig, err := pr.isSignatureAuthorAccepted(testImage, testImageSig) - assertSARAccepted(t, sar, parsedSig, err, Signature{ - DockerManifestDigest: TestImageManifestDigest, - DockerReference: "testing/manifest:latest", - }) - - keyData, err := ioutil.ReadFile("fixtures/public-key.gpg") - require.NoError(t, err) - pr, err = NewPRSignedByKeyData(ktGPG, keyData, prm) - require.NoError(t, err) - sar, parsedSig, err = pr.isSignatureAuthorAccepted(testImage, testImageSig) - assertSARAccepted(t, sar, parsedSig, err, Signature{ - DockerManifestDigest: TestImageManifestDigest, - DockerReference: "testing/manifest:latest", - }) - - // Unimplemented and invalid KeyType values - for _, keyType := range []sbKeyType{SBKeyTypeSignedByGPGKeys, - SBKeyTypeX509Certificates, - SBKeyTypeSignedByX509CAs, - sbKeyType("This is invalid"), - } { - // Do not use NewPRSignedByKeyData, because it would reject invalid values. - pr := &prSignedBy{ - KeyType: keyType, - KeyData: []byte("abc"), - SignedIdentity: prm, - } - // Pass nil pointers to, kind of, test that the return value does not depend on the parameters. - sar, parsedSig, err := pr.isSignatureAuthorAccepted(nil, nil) - assertSARRejected(t, sar, parsedSig, err) - } - - // Both KeyPath and KeyData set. Do not use NewPRSignedBy*, because it would reject this. - prSB := &prSignedBy{ - KeyType: ktGPG, - KeyPath: "/foo/bar", - KeyData: []byte("abc"), - SignedIdentity: prm, - } - // Pass nil pointers to, kind of, test that the return value does not depend on the parameters. - sar, parsedSig, err = prSB.isSignatureAuthorAccepted(nil, nil) - assertSARRejected(t, sar, parsedSig, err) - - // Invalid KeyPath - pr, err = NewPRSignedByKeyPath(ktGPG, "/this/does/not/exist", prm) - require.NoError(t, err) - // Pass nil pointers to, kind of, test that the return value does not depend on the parameters. - sar, parsedSig, err = pr.isSignatureAuthorAccepted(nil, nil) - assertSARRejected(t, sar, parsedSig, err) - - // Errors initializing the temporary GPG directory and mechanism are not obviously easy to reach. - - // KeyData has no public keys. - pr, err = NewPRSignedByKeyData(ktGPG, []byte{}, prm) - require.NoError(t, err) - // Pass nil pointers to, kind of, test that the return value does not depend on the parameters. - sar, parsedSig, err = pr.isSignatureAuthorAccepted(nil, nil) - assertSARRejectedPolicyRequirement(t, sar, parsedSig, err) - - // A signature which does not GPG verify - pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm) - require.NoError(t, err) - // Pass a nil pointer to, kind of, test that the return value does not depend on the image parmater.. - sar, parsedSig, err = pr.isSignatureAuthorAccepted(nil, []byte("invalid signature")) - assertSARRejected(t, sar, parsedSig, err) - - // A valid signature using an unknown key. - // (This is (currently?) rejected through the "mech.Verify fails" path, not the "!identityFound" path, - // because we use a temporary directory and only import the trusted keys.) - pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm) - require.NoError(t, err) - sig, err := ioutil.ReadFile("fixtures/unknown-key.signature") - require.NoError(t, err) - // Pass a nil pointer to, kind of, test that the return value does not depend on the image parmater.. - sar, parsedSig, err = pr.isSignatureAuthorAccepted(nil, sig) - assertSARRejected(t, sar, parsedSig, err) - - // A valid signature of an invalid JSON. - pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm) - require.NoError(t, err) - sig, err = ioutil.ReadFile("fixtures/invalid-blob.signature") - require.NoError(t, err) - // Pass a nil pointer to, kind of, test that the return value does not depend on the image parmater.. - sar, parsedSig, err = pr.isSignatureAuthorAccepted(nil, sig) - assertSARRejected(t, sar, parsedSig, err) - assert.IsType(t, InvalidSignatureError{}, err) - - // A valid signature with a rejected identity. - nonmatchingPRM, err := NewPRMExactReference("this/doesnt:match") - require.NoError(t, err) - pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", nonmatchingPRM) - require.NoError(t, err) - sar, parsedSig, err = pr.isSignatureAuthorAccepted(testImage, testImageSig) - assertSARRejectedPolicyRequirement(t, sar, parsedSig, err) - - // Error reading image manifest - image := dirImageMock(t, "fixtures/dir-img-no-manifest", "testing/manifest:latest") - defer image.Close() - sig, err = ioutil.ReadFile("fixtures/dir-img-no-manifest/signature-1") - require.NoError(t, err) - pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm) - require.NoError(t, err) - sar, parsedSig, err = pr.isSignatureAuthorAccepted(image, sig) - assertSARRejected(t, sar, parsedSig, err) - - // Error computing manifest digest - image = dirImageMock(t, "fixtures/dir-img-manifest-digest-error", "testing/manifest:latest") - defer image.Close() - sig, err = ioutil.ReadFile("fixtures/dir-img-manifest-digest-error/signature-1") - require.NoError(t, err) - pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm) - require.NoError(t, err) - sar, parsedSig, err = pr.isSignatureAuthorAccepted(image, sig) - assertSARRejected(t, sar, parsedSig, err) - - // A valid signature with a non-matching manifest - image = dirImageMock(t, "fixtures/dir-img-modified-manifest", "testing/manifest:latest") - defer image.Close() - sig, err = ioutil.ReadFile("fixtures/dir-img-modified-manifest/signature-1") - require.NoError(t, err) - pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm) - require.NoError(t, err) - sar, parsedSig, err = pr.isSignatureAuthorAccepted(image, sig) - assertSARRejectedPolicyRequirement(t, sar, parsedSig, err) -} - -// createInvalidSigDir creates a directory suitable for dirImageMock, in which image.Signatures() -// fails. -// The caller should eventually call os.RemoveAll on the returned path. -func createInvalidSigDir(t *testing.T) string { - dir, err := ioutil.TempDir("", "skopeo-test-unreadable-signature") - require.NoError(t, err) - err = ioutil.WriteFile(path.Join(dir, "manifest.json"), []byte("{}"), 0644) - require.NoError(t, err) - // Creating a 000-permissions file would work for unprivileged accounts, but root (in particular, - // in the Docker container we use for testing) would still have access. So, create a symlink - // pointing to itself, to cause an ELOOP. (Note that a symlink pointing to a nonexistent file would be treated - // just like a nonexistent signature file, and not an error.) - err = os.Symlink("signature-1", path.Join(dir, "signature-1")) - require.NoError(t, err) - return dir -} - -func TestPRSignedByIsRunningImageAllowed(t *testing.T) { - ktGPG := SBKeyTypeGPGKeys - prm := NewPRMMatchExact() - - // A simple success case: single valid signature. - image := dirImageMock(t, "fixtures/dir-img-valid", "testing/manifest:latest") - defer image.Close() - pr, err := NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm) - require.NoError(t, err) - allowed, err := pr.isRunningImageAllowed(image) - assertRunningAllowed(t, allowed, err) - - // Error reading signatures - invalidSigDir := createInvalidSigDir(t) - defer os.RemoveAll(invalidSigDir) - image = dirImageMock(t, invalidSigDir, "testing/manifest:latest") - defer image.Close() - pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm) - require.NoError(t, err) - allowed, err = pr.isRunningImageAllowed(image) - assertRunningRejected(t, allowed, err) - - // No signatures - image = dirImageMock(t, "fixtures/dir-img-unsigned", "testing/manifest:latest") - defer image.Close() - pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm) - require.NoError(t, err) - allowed, err = pr.isRunningImageAllowed(image) - assertRunningRejectedPolicyRequirement(t, allowed, err) - - // 1 invalid signature: use dir-img-valid, but a non-matching Docker reference - image = dirImageMock(t, "fixtures/dir-img-valid", "testing/manifest:notlatest") - defer image.Close() - pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm) - require.NoError(t, err) - allowed, err = pr.isRunningImageAllowed(image) - assertRunningRejectedPolicyRequirement(t, allowed, err) - - // 2 valid signatures - image = dirImageMock(t, "fixtures/dir-img-valid-2", "testing/manifest:latest") - defer image.Close() - pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm) - require.NoError(t, err) - allowed, err = pr.isRunningImageAllowed(image) - assertRunningAllowed(t, allowed, err) - - // One invalid, one valid signature (in this order) - image = dirImageMock(t, "fixtures/dir-img-mixed", "testing/manifest:latest") - defer image.Close() - pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm) - require.NoError(t, err) - allowed, err = pr.isRunningImageAllowed(image) - assertRunningAllowed(t, allowed, err) - - // 2 invalid signatures: use dir-img-valid-2, but a non-matching Docker reference - image = dirImageMock(t, "fixtures/dir-img-valid-2", "testing/manifest:notlatest") - defer image.Close() - pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm) - require.NoError(t, err) - allowed, err = pr.isRunningImageAllowed(image) - assertRunningRejectedPolicyRequirement(t, allowed, err) -} diff --git a/vendor/github.com/containers/image/signature/policy_eval_simple.go b/vendor/github.com/containers/image/signature/policy_eval_simple.go deleted file mode 100644 index 19a71e6d99e2..000000000000 --- a/vendor/github.com/containers/image/signature/policy_eval_simple.go +++ /dev/null @@ -1,28 +0,0 @@ -// Policy evaluation for the various simple PolicyRequirement types. - -package signature - -import ( - "fmt" - - "github.com/containers/image/transports" - "github.com/containers/image/types" -) - -func (pr *prInsecureAcceptAnything) isSignatureAuthorAccepted(image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { - // prInsecureAcceptAnything semantics: Every image is allowed to run, - // but this does not consider the signature as verified. - return sarUnknown, nil, nil -} - -func (pr *prInsecureAcceptAnything) isRunningImageAllowed(image types.UnparsedImage) (bool, error) { - return true, nil -} - -func (pr *prReject) isSignatureAuthorAccepted(image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { - return sarRejected, nil, PolicyRequirementError(fmt.Sprintf("Any signatures for image %s are rejected by policy.", transports.ImageName(image.Reference()))) -} - -func (pr *prReject) isRunningImageAllowed(image types.UnparsedImage) (bool, error) { - return false, PolicyRequirementError(fmt.Sprintf("Running image %s is rejected by policy.", transports.ImageName(image.Reference()))) -} diff --git a/vendor/github.com/containers/image/signature/policy_eval_simple_test.go b/vendor/github.com/containers/image/signature/policy_eval_simple_test.go deleted file mode 100644 index aae4b6a8b83e..000000000000 --- a/vendor/github.com/containers/image/signature/policy_eval_simple_test.go +++ /dev/null @@ -1,74 +0,0 @@ -package signature - -import ( - "testing" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/types" -) - -// nameOnlyImageMock is a mock of types.UnparsedImage which only allows transports.ImageName to work -type nameOnlyImageMock struct { - forbiddenImageMock -} - -func (nameOnlyImageMock) Reference() types.ImageReference { - return nameOnlyImageReferenceMock("== StringWithinTransport mock") -} - -// nameOnlyImageReferenceMock is a mock of types.ImageReference which only allows transports.ImageName to work, returning self. -type nameOnlyImageReferenceMock string - -func (ref nameOnlyImageReferenceMock) Transport() types.ImageTransport { - return nameImageTransportMock("== Transport mock") -} -func (ref nameOnlyImageReferenceMock) StringWithinTransport() string { - return string(ref) -} -func (ref nameOnlyImageReferenceMock) DockerReference() reference.Named { - panic("unexpected call to a mock function") -} -func (ref nameOnlyImageReferenceMock) PolicyConfigurationIdentity() string { - panic("unexpected call to a mock function") -} -func (ref nameOnlyImageReferenceMock) PolicyConfigurationNamespaces() []string { - panic("unexpected call to a mock function") -} -func (ref nameOnlyImageReferenceMock) NewImage(ctx *types.SystemContext) (types.Image, error) { - panic("unexpected call to a mock function") -} -func (ref nameOnlyImageReferenceMock) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) { - panic("unexpected call to a mock function") -} -func (ref nameOnlyImageReferenceMock) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) { - panic("unexpected call to a mock function") -} -func (ref nameOnlyImageReferenceMock) DeleteImage(ctx *types.SystemContext) error { - panic("unexpected call to a mock function") -} - -func TestPRInsecureAcceptAnythingIsSignatureAuthorAccepted(t *testing.T) { - pr := NewPRInsecureAcceptAnything() - // Pass nil signature to, kind of, test that the return value does not depend on it. - sar, parsedSig, err := pr.isSignatureAuthorAccepted(nameOnlyImageMock{}, nil) - assertSARUnknown(t, sar, parsedSig, err) -} - -func TestPRInsecureAcceptAnythingIsRunningImageAllowed(t *testing.T) { - pr := NewPRInsecureAcceptAnything() - res, err := pr.isRunningImageAllowed(nameOnlyImageMock{}) - assertRunningAllowed(t, res, err) -} - -func TestPRRejectIsSignatureAuthorAccepted(t *testing.T) { - pr := NewPRReject() - // Pass nil signature to, kind of, test that the return value does not depend on it. - sar, parsedSig, err := pr.isSignatureAuthorAccepted(nameOnlyImageMock{}, nil) - assertSARRejectedPolicyRequirement(t, sar, parsedSig, err) -} - -func TestPRRejectIsRunningImageAllowed(t *testing.T) { - pr := NewPRReject() - res, err := pr.isRunningImageAllowed(nameOnlyImageMock{}) - assertRunningRejectedPolicyRequirement(t, res, err) -} diff --git a/vendor/github.com/containers/image/signature/policy_eval_test.go b/vendor/github.com/containers/image/signature/policy_eval_test.go deleted file mode 100644 index 7cfcb3fbf6b2..000000000000 --- a/vendor/github.com/containers/image/signature/policy_eval_test.go +++ /dev/null @@ -1,521 +0,0 @@ -package signature - -import ( - "fmt" - "os" - "testing" - - "github.com/containers/image/docker" - "github.com/containers/image/docker/policyconfiguration" - "github.com/containers/image/docker/reference" - "github.com/containers/image/transports" - "github.com/containers/image/types" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestPolicyRequirementError(t *testing.T) { - // A stupid test just to keep code coverage - s := "test" - err := PolicyRequirementError(s) - assert.Equal(t, s, err.Error()) -} - -func TestPolicyContextChangeState(t *testing.T) { - pc, err := NewPolicyContext(&Policy{Default: PolicyRequirements{NewPRReject()}}) - require.NoError(t, err) - defer pc.Destroy() - - require.Equal(t, pcReady, pc.state) - err = pc.changeState(pcReady, pcInUse) - require.NoError(t, err) - - err = pc.changeState(pcReady, pcInUse) - require.Error(t, err) - - // Return state to pcReady to allow pc.Destroy to clean up. - err = pc.changeState(pcInUse, pcReady) - require.NoError(t, err) -} - -func TestPolicyContextNewDestroy(t *testing.T) { - pc, err := NewPolicyContext(&Policy{Default: PolicyRequirements{NewPRReject()}}) - require.NoError(t, err) - assert.Equal(t, pcReady, pc.state) - - err = pc.Destroy() - require.NoError(t, err) - assert.Equal(t, pcDestroyed, pc.state) - - // Trying to destroy when not pcReady - pc, err = NewPolicyContext(&Policy{Default: PolicyRequirements{NewPRReject()}}) - require.NoError(t, err) - err = pc.changeState(pcReady, pcInUse) - require.NoError(t, err) - err = pc.Destroy() - require.Error(t, err) - assert.Equal(t, pcInUse, pc.state) // The state, and hopefully nothing else, has changed. - - err = pc.changeState(pcInUse, pcReady) - require.NoError(t, err) - err = pc.Destroy() - assert.NoError(t, err) -} - -// pcImageReferenceMock is a mock of types.ImageReference which returns itself in DockerReference -// and handles PolicyConfigurationIdentity and PolicyConfigurationReference consistently. -type pcImageReferenceMock struct { - transportName string - ref reference.Named -} - -func (ref pcImageReferenceMock) Transport() types.ImageTransport { - return nameImageTransportMock(ref.transportName) -} -func (ref pcImageReferenceMock) StringWithinTransport() string { - // We use this in error messages, so sadly we must return something. - return "== StringWithinTransport mock" -} -func (ref pcImageReferenceMock) DockerReference() reference.Named { - return ref.ref -} -func (ref pcImageReferenceMock) PolicyConfigurationIdentity() string { - res, err := policyconfiguration.DockerReferenceIdentity(ref.ref) - if res == "" || err != nil { - panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err)) - } - return res -} -func (ref pcImageReferenceMock) PolicyConfigurationNamespaces() []string { - if ref.ref == nil { - panic("unexpected call to a mock function") - } - return policyconfiguration.DockerReferenceNamespaces(ref.ref) -} -func (ref pcImageReferenceMock) NewImage(ctx *types.SystemContext) (types.Image, error) { - panic("unexpected call to a mock function") -} -func (ref pcImageReferenceMock) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) { - panic("unexpected call to a mock function") -} -func (ref pcImageReferenceMock) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) { - panic("unexpected call to a mock function") -} -func (ref pcImageReferenceMock) DeleteImage(ctx *types.SystemContext) error { - panic("unexpected call to a mock function") -} - -func TestPolicyContextRequirementsForImageRefNotRegisteredTransport(t *testing.T) { - transports.Delete("docker") - assert.Nil(t, transports.Get("docker")) - - defer func() { - assert.Nil(t, transports.Get("docker")) - transports.Register(docker.Transport) - assert.NotNil(t, transports.Get("docker")) - }() - - pr := []PolicyRequirement{ - xNewPRSignedByKeyData(SBKeyTypeSignedByGPGKeys, []byte("RH"), NewPRMMatchRepository()), - } - policy := &Policy{ - Default: PolicyRequirements{NewPRReject()}, - Transports: map[string]PolicyTransportScopes{ - "docker": { - "registry.access.redhat.com": pr, - }, - }, - } - pc, err := NewPolicyContext(policy) - require.NoError(t, err) - ref, err := reference.ParseNormalizedNamed("registry.access.redhat.com/rhel7:latest") - require.NoError(t, err) - reqs := pc.requirementsForImageRef(pcImageReferenceMock{"docker", ref}) - assert.True(t, &(reqs[0]) == &(pr[0])) - assert.True(t, len(reqs) == len(pr)) - -} - -func TestPolicyContextRequirementsForImageRef(t *testing.T) { - ktGPG := SBKeyTypeGPGKeys - prm := NewPRMMatchRepoDigestOrExact() - - policy := &Policy{ - Default: PolicyRequirements{NewPRReject()}, - Transports: map[string]PolicyTransportScopes{}, - } - // Just put _something_ into the PolicyTransportScopes map for the keys we care about, and make it pairwise - // distinct so that we can compare the values and show them when debugging the tests. - for _, t := range []struct{ transport, scope string }{ - {"docker", ""}, - {"docker", "unmatched"}, - {"docker", "deep.com"}, - {"docker", "deep.com/n1"}, - {"docker", "deep.com/n1/n2"}, - {"docker", "deep.com/n1/n2/n3"}, - {"docker", "deep.com/n1/n2/n3/repo"}, - {"docker", "deep.com/n1/n2/n3/repo:tag2"}, - {"atomic", "unmatched"}, - } { - if _, ok := policy.Transports[t.transport]; !ok { - policy.Transports[t.transport] = PolicyTransportScopes{} - } - policy.Transports[t.transport][t.scope] = PolicyRequirements{xNewPRSignedByKeyData(ktGPG, []byte(t.transport+t.scope), prm)} - } - - pc, err := NewPolicyContext(policy) - require.NoError(t, err) - - for _, c := range []struct{ inputTransport, input, matchedTransport, matched string }{ - // Full match - {"docker", "deep.com/n1/n2/n3/repo:tag2", "docker", "deep.com/n1/n2/n3/repo:tag2"}, - // Namespace matches - {"docker", "deep.com/n1/n2/n3/repo:nottag2", "docker", "deep.com/n1/n2/n3/repo"}, - {"docker", "deep.com/n1/n2/n3/notrepo:tag2", "docker", "deep.com/n1/n2/n3"}, - {"docker", "deep.com/n1/n2/notn3/repo:tag2", "docker", "deep.com/n1/n2"}, - {"docker", "deep.com/n1/notn2/n3/repo:tag2", "docker", "deep.com/n1"}, - // Host name match - {"docker", "deep.com/notn1/n2/n3/repo:tag2", "docker", "deep.com"}, - // Default - {"docker", "this.doesnt/match:anything", "docker", ""}, - // No match within a matched transport which doesn't have a "" scope - {"atomic", "this.doesnt/match:anything", "", ""}, - // No configuration available for this transport at all - {"dir", "what/ever", "", ""}, // "what/ever" is not a valid scope for the real "dir" transport, but we only need it to be a valid reference.Named. - } { - var expected PolicyRequirements - if c.matchedTransport != "" { - e, ok := policy.Transports[c.matchedTransport][c.matched] - require.True(t, ok, fmt.Sprintf("case %s:%s: expected reqs not found", c.inputTransport, c.input)) - expected = e - } else { - expected = policy.Default - } - - ref, err := reference.ParseNormalizedNamed(c.input) - require.NoError(t, err) - reqs := pc.requirementsForImageRef(pcImageReferenceMock{c.inputTransport, ref}) - comment := fmt.Sprintf("case %s:%s: %#v", c.inputTransport, c.input, reqs[0]) - // Do not use assert.Equal, which would do a deep contents comparison; we want to compare - // the pointers. Also, == does not work on slices; so test that the slices start at the - // same element and have the same length. - assert.True(t, &(reqs[0]) == &(expected[0]), comment) - assert.True(t, len(reqs) == len(expected), comment) - } -} - -// pcImageMock returns a types.UnparsedImage for a directory, claiming a specified dockerReference and implementing PolicyConfigurationIdentity/PolicyConfigurationNamespaces. -// The caller must call .Close() on the returned Image. -func pcImageMock(t *testing.T, dir, dockerReference string) types.UnparsedImage { - ref, err := reference.ParseNormalizedNamed(dockerReference) - require.NoError(t, err) - return dirImageMockWithRef(t, dir, pcImageReferenceMock{"docker", ref}) -} - -func TestPolicyContextGetSignaturesWithAcceptedAuthor(t *testing.T) { - expectedSig := &Signature{ - DockerManifestDigest: TestImageManifestDigest, - DockerReference: "testing/manifest:latest", - } - - pc, err := NewPolicyContext(&Policy{ - Default: PolicyRequirements{NewPRReject()}, - Transports: map[string]PolicyTransportScopes{ - "docker": { - "docker.io/testing/manifest:latest": { - xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, "fixtures/public-key.gpg", NewPRMMatchExact()), - }, - "docker.io/testing/manifest:twoAccepts": { - xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, "fixtures/public-key.gpg", NewPRMMatchRepository()), - xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, "fixtures/public-key.gpg", NewPRMMatchRepository()), - }, - "docker.io/testing/manifest:acceptReject": { - xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, "fixtures/public-key.gpg", NewPRMMatchRepository()), - NewPRReject(), - }, - "docker.io/testing/manifest:acceptUnknown": { - xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, "fixtures/public-key.gpg", NewPRMMatchRepository()), - xNewPRSignedBaseLayer(NewPRMMatchRepository()), - }, - "docker.io/testing/manifest:rejectUnknown": { - NewPRReject(), - xNewPRSignedBaseLayer(NewPRMMatchRepository()), - }, - "docker.io/testing/manifest:unknown": { - xNewPRSignedBaseLayer(NewPRMMatchRepository()), - }, - "docker.io/testing/manifest:unknown2": { - NewPRInsecureAcceptAnything(), - }, - "docker.io/testing/manifest:invalidEmptyRequirements": {}, - }, - }, - }) - require.NoError(t, err) - defer pc.Destroy() - - // Success - img := pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:latest") - defer img.Close() - sigs, err := pc.GetSignaturesWithAcceptedAuthor(img) - require.NoError(t, err) - assert.Equal(t, []*Signature{expectedSig}, sigs) - - // Two signatures - // FIXME? Use really different signatures for this? - img = pcImageMock(t, "fixtures/dir-img-valid-2", "testing/manifest:latest") - defer img.Close() - sigs, err = pc.GetSignaturesWithAcceptedAuthor(img) - require.NoError(t, err) - assert.Equal(t, []*Signature{expectedSig, expectedSig}, sigs) - - // No signatures - img = pcImageMock(t, "fixtures/dir-img-unsigned", "testing/manifest:latest") - defer img.Close() - sigs, err = pc.GetSignaturesWithAcceptedAuthor(img) - require.NoError(t, err) - assert.Empty(t, sigs) - - // Only invalid signatures - img = pcImageMock(t, "fixtures/dir-img-modified-manifest", "testing/manifest:latest") - defer img.Close() - sigs, err = pc.GetSignaturesWithAcceptedAuthor(img) - require.NoError(t, err) - assert.Empty(t, sigs) - - // 1 invalid, 1 valid signature (in this order) - img = pcImageMock(t, "fixtures/dir-img-mixed", "testing/manifest:latest") - defer img.Close() - sigs, err = pc.GetSignaturesWithAcceptedAuthor(img) - require.NoError(t, err) - assert.Equal(t, []*Signature{expectedSig}, sigs) - - // Two sarAccepted results for one signature - img = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:twoAccepts") - defer img.Close() - sigs, err = pc.GetSignaturesWithAcceptedAuthor(img) - require.NoError(t, err) - assert.Equal(t, []*Signature{expectedSig}, sigs) - - // sarAccepted+sarRejected for a signature - img = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:acceptReject") - defer img.Close() - sigs, err = pc.GetSignaturesWithAcceptedAuthor(img) - require.NoError(t, err) - assert.Empty(t, sigs) - - // sarAccepted+sarUnknown for a signature - img = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:acceptUnknown") - defer img.Close() - sigs, err = pc.GetSignaturesWithAcceptedAuthor(img) - require.NoError(t, err) - assert.Equal(t, []*Signature{expectedSig}, sigs) - - // sarRejected+sarUnknown for a signature - img = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:rejectUnknown") - defer img.Close() - sigs, err = pc.GetSignaturesWithAcceptedAuthor(img) - require.NoError(t, err) - assert.Empty(t, sigs) - - // sarUnknown only - img = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:unknown") - defer img.Close() - sigs, err = pc.GetSignaturesWithAcceptedAuthor(img) - require.NoError(t, err) - assert.Empty(t, sigs) - - img = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:unknown2") - defer img.Close() - sigs, err = pc.GetSignaturesWithAcceptedAuthor(img) - require.NoError(t, err) - assert.Empty(t, sigs) - - // Empty list of requirements (invalid) - img = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:invalidEmptyRequirements") - defer img.Close() - sigs, err = pc.GetSignaturesWithAcceptedAuthor(img) - require.NoError(t, err) - assert.Empty(t, sigs) - - // Failures: Make sure we return nil sigs. - - // Unexpected state (context already destroyed) - destroyedPC, err := NewPolicyContext(pc.Policy) - require.NoError(t, err) - err = destroyedPC.Destroy() - require.NoError(t, err) - img = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:latest") - defer img.Close() - sigs, err = destroyedPC.GetSignaturesWithAcceptedAuthor(img) - assert.Error(t, err) - assert.Nil(t, sigs) - // Not testing the pcInUse->pcReady transition, that would require custom PolicyRequirement - // implementations meddling with the state, or threads. This is for catching trivial programmer - // mistakes only, anyway. - - // Error reading signatures. - invalidSigDir := createInvalidSigDir(t) - defer os.RemoveAll(invalidSigDir) - img = pcImageMock(t, invalidSigDir, "testing/manifest:latest") - defer img.Close() - sigs, err = pc.GetSignaturesWithAcceptedAuthor(img) - assert.Error(t, err) - assert.Nil(t, sigs) -} - -func TestPolicyContextIsRunningImageAllowed(t *testing.T) { - pc, err := NewPolicyContext(&Policy{ - Default: PolicyRequirements{NewPRReject()}, - Transports: map[string]PolicyTransportScopes{ - "docker": { - "docker.io/testing/manifest:latest": { - xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, "fixtures/public-key.gpg", NewPRMMatchExact()), - }, - "docker.io/testing/manifest:twoAllows": { - xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, "fixtures/public-key.gpg", NewPRMMatchRepository()), - xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, "fixtures/public-key.gpg", NewPRMMatchRepository()), - }, - "docker.io/testing/manifest:allowDeny": { - xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, "fixtures/public-key.gpg", NewPRMMatchRepository()), - NewPRReject(), - }, - "docker.io/testing/manifest:reject": { - NewPRReject(), - }, - "docker.io/testing/manifest:acceptAnything": { - NewPRInsecureAcceptAnything(), - }, - "docker.io/testing/manifest:invalidEmptyRequirements": {}, - }, - }, - }) - require.NoError(t, err) - defer pc.Destroy() - - // Success - img := pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:latest") - defer img.Close() - res, err := pc.IsRunningImageAllowed(img) - assertRunningAllowed(t, res, err) - - // Two signatures - // FIXME? Use really different signatures for this? - img = pcImageMock(t, "fixtures/dir-img-valid-2", "testing/manifest:latest") - defer img.Close() - res, err = pc.IsRunningImageAllowed(img) - assertRunningAllowed(t, res, err) - - // No signatures - img = pcImageMock(t, "fixtures/dir-img-unsigned", "testing/manifest:latest") - defer img.Close() - res, err = pc.IsRunningImageAllowed(img) - assertRunningRejectedPolicyRequirement(t, res, err) - - // Only invalid signatures - img = pcImageMock(t, "fixtures/dir-img-modified-manifest", "testing/manifest:latest") - defer img.Close() - res, err = pc.IsRunningImageAllowed(img) - assertRunningRejectedPolicyRequirement(t, res, err) - - // 1 invalid, 1 valid signature (in this order) - img = pcImageMock(t, "fixtures/dir-img-mixed", "testing/manifest:latest") - defer img.Close() - res, err = pc.IsRunningImageAllowed(img) - assertRunningAllowed(t, res, err) - - // Two allowed results - img = pcImageMock(t, "fixtures/dir-img-mixed", "testing/manifest:twoAllows") - defer img.Close() - res, err = pc.IsRunningImageAllowed(img) - assertRunningAllowed(t, res, err) - - // Allow + deny results - img = pcImageMock(t, "fixtures/dir-img-mixed", "testing/manifest:allowDeny") - defer img.Close() - res, err = pc.IsRunningImageAllowed(img) - assertRunningRejectedPolicyRequirement(t, res, err) - - // prReject works - img = pcImageMock(t, "fixtures/dir-img-mixed", "testing/manifest:reject") - defer img.Close() - res, err = pc.IsRunningImageAllowed(img) - assertRunningRejectedPolicyRequirement(t, res, err) - - // prInsecureAcceptAnything works - img = pcImageMock(t, "fixtures/dir-img-mixed", "testing/manifest:acceptAnything") - defer img.Close() - res, err = pc.IsRunningImageAllowed(img) - assertRunningAllowed(t, res, err) - - // Empty list of requirements (invalid) - img = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:invalidEmptyRequirements") - defer img.Close() - res, err = pc.IsRunningImageAllowed(img) - assertRunningRejectedPolicyRequirement(t, res, err) - - // Unexpected state (context already destroyed) - destroyedPC, err := NewPolicyContext(pc.Policy) - require.NoError(t, err) - err = destroyedPC.Destroy() - require.NoError(t, err) - img = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:latest") - defer img.Close() - res, err = destroyedPC.IsRunningImageAllowed(img) - assertRunningRejected(t, res, err) - // Not testing the pcInUse->pcReady transition, that would require custom PolicyRequirement - // implementations meddling with the state, or threads. This is for catching trivial programmer - // mistakes only, anyway. -} - -// Helpers for validating PolicyRequirement.isSignatureAuthorAccepted results: - -// assertSARRejected verifies that isSignatureAuthorAccepted returns a consistent sarRejected result -// with the expected signature. -func assertSARAccepted(t *testing.T, sar signatureAcceptanceResult, parsedSig *Signature, err error, expectedSig Signature) { - assert.Equal(t, sarAccepted, sar) - assert.Equal(t, &expectedSig, parsedSig) - assert.NoError(t, err) -} - -// assertSARRejected verifies that isSignatureAuthorAccepted returns a consistent sarRejected result. -func assertSARRejected(t *testing.T, sar signatureAcceptanceResult, parsedSig *Signature, err error) { - assert.Equal(t, sarRejected, sar) - assert.Nil(t, parsedSig) - assert.Error(t, err) -} - -// assertSARRejectedPolicyRequiremnt verifies that isSignatureAuthorAccepted returns a consistent sarRejected resul, -// and that the returned error is a PolicyRequirementError.. -func assertSARRejectedPolicyRequirement(t *testing.T, sar signatureAcceptanceResult, parsedSig *Signature, err error) { - assertSARRejected(t, sar, parsedSig, err) - assert.IsType(t, PolicyRequirementError(""), err) -} - -// assertSARRejected verifies that isSignatureAuthorAccepted returns a consistent sarUnknown result. -func assertSARUnknown(t *testing.T, sar signatureAcceptanceResult, parsedSig *Signature, err error) { - assert.Equal(t, sarUnknown, sar) - assert.Nil(t, parsedSig) - assert.NoError(t, err) -} - -// Helpers for validating PolicyRequirement.isRunningImageAllowed results: - -// assertRunningAllowed verifies that isRunningImageAllowed returns a consistent true result -func assertRunningAllowed(t *testing.T, allowed bool, err error) { - assert.Equal(t, true, allowed) - assert.NoError(t, err) -} - -// assertRunningRejected verifies that isRunningImageAllowed returns a consistent false result -func assertRunningRejected(t *testing.T, allowed bool, err error) { - assert.Equal(t, false, allowed) - assert.Error(t, err) -} - -// assertRunningRejectedPolicyRequirement verifies that isRunningImageAllowed returns a consistent false result -// and that the returned error is a PolicyRequirementError. -func assertRunningRejectedPolicyRequirement(t *testing.T, allowed bool, err error) { - assertRunningRejected(t, allowed, err) - assert.IsType(t, PolicyRequirementError(""), err) -} diff --git a/vendor/github.com/containers/image/signature/policy_reference_match.go b/vendor/github.com/containers/image/signature/policy_reference_match.go deleted file mode 100644 index a8dad677011d..000000000000 --- a/vendor/github.com/containers/image/signature/policy_reference_match.go +++ /dev/null @@ -1,101 +0,0 @@ -// PolicyReferenceMatch implementations. - -package signature - -import ( - "fmt" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/transports" - "github.com/containers/image/types" -) - -// parseImageAndDockerReference converts an image and a reference string into two parsed entities, failing on any error and handling unidentified images. -func parseImageAndDockerReference(image types.UnparsedImage, s2 string) (reference.Named, reference.Named, error) { - r1 := image.Reference().DockerReference() - if r1 == nil { - return nil, nil, PolicyRequirementError(fmt.Sprintf("Docker reference match attempted on image %s with no known Docker reference identity", - transports.ImageName(image.Reference()))) - } - r2, err := reference.ParseNormalizedNamed(s2) - if err != nil { - return nil, nil, err - } - return r1, r2, nil -} - -func (prm *prmMatchExact) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { - intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) - if err != nil { - return false - } - // Do not add default tags: image.Reference().DockerReference() should contain it already, and signatureDockerReference should be exact; so, verify that now. - if reference.IsNameOnly(intended) || reference.IsNameOnly(signature) { - return false - } - return signature.String() == intended.String() -} - -func (prm *prmMatchRepoDigestOrExact) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { - intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) - if err != nil { - return false - } - - // Do not add default tags: image.Reference().DockerReference() should contain it already, and signatureDockerReference should be exact; so, verify that now. - if reference.IsNameOnly(signature) { - return false - } - switch intended.(type) { - case reference.NamedTagged: // Includes the case when intended has both a tag and a digest. - return signature.String() == intended.String() - case reference.Canonical: - // We don’t actually compare the manifest digest against the signature here; that happens prSignedBy.in UnparsedImage.Manifest. - // Becase UnparsedImage.Manifest verifies the intended.Digest() against the manifest, and prSignedBy verifies the signature digest against the manifest, - // we know that signature digest matches intended.Digest() (but intended.Digest() and signature digest may use different algorithms) - return signature.Name() == intended.Name() - default: // !reference.IsNameOnly(intended) - return false - } -} - -func (prm *prmMatchRepository) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { - intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) - if err != nil { - return false - } - return signature.Name() == intended.Name() -} - -// parseDockerReferences converts two reference strings into parsed entities, failing on any error -func parseDockerReferences(s1, s2 string) (reference.Named, reference.Named, error) { - r1, err := reference.ParseNormalizedNamed(s1) - if err != nil { - return nil, nil, err - } - r2, err := reference.ParseNormalizedNamed(s2) - if err != nil { - return nil, nil, err - } - return r1, r2, nil -} - -func (prm *prmExactReference) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { - intended, signature, err := parseDockerReferences(prm.DockerReference, signatureDockerReference) - if err != nil { - return false - } - // prm.DockerReference and signatureDockerReference should be exact; so, verify that now. - if reference.IsNameOnly(intended) || reference.IsNameOnly(signature) { - return false - } - return signature.String() == intended.String() -} - -func (prm *prmExactRepository) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { - intended, signature, err := parseDockerReferences(prm.DockerRepository, signatureDockerReference) - if err != nil { - return false - } - return signature.Name() == intended.Name() -} diff --git a/vendor/github.com/containers/image/signature/policy_reference_match_test.go b/vendor/github.com/containers/image/signature/policy_reference_match_test.go deleted file mode 100644 index 2ddd1730815a..000000000000 --- a/vendor/github.com/containers/image/signature/policy_reference_match_test.go +++ /dev/null @@ -1,366 +0,0 @@ -package signature - -import ( - "context" - "fmt" - "testing" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/types" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -const ( - fullRHELRef = "registry.access.redhat.com/rhel7/rhel:7.2.3" - untaggedRHELRef = "registry.access.redhat.com/rhel7/rhel" - digestSuffix = "@sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" - digestSuffixOther = "@sha256:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" -) - -func TestParseImageAndDockerReference(t *testing.T) { - const ( - ok1 = "busybox" - ok2 = fullRHELRef - bad1 = "UPPERCASE_IS_INVALID_IN_DOCKER_REFERENCES" - bad2 = "" - ) - // Success - ref, err := reference.ParseNormalizedNamed(ok1) - require.NoError(t, err) - r1, r2, err := parseImageAndDockerReference(refImageMock{ref}, ok2) - require.NoError(t, err) - assert.Equal(t, ok1, reference.FamiliarString(r1)) - assert.Equal(t, ok2, reference.FamiliarString(r2)) - - // Unidentified images are rejected. - _, _, err = parseImageAndDockerReference(refImageMock{nil}, ok2) - require.Error(t, err) - assert.IsType(t, PolicyRequirementError(""), err) - - // Failures - for _, refs := range [][]string{ - {bad1, ok2}, - {ok1, bad2}, - {bad1, bad2}, - } { - ref, err := reference.ParseNormalizedNamed(refs[0]) - if err == nil { - _, _, err := parseImageAndDockerReference(refImageMock{ref}, refs[1]) - assert.Error(t, err) - } - } -} - -// refImageMock is a mock of types.UnparsedImage which returns itself in Reference().DockerReference. -type refImageMock struct{ reference.Named } - -func (ref refImageMock) Reference() types.ImageReference { - return refImageReferenceMock{ref.Named} -} -func (ref refImageMock) Close() error { - panic("unexpected call to a mock function") -} -func (ref refImageMock) Manifest() ([]byte, string, error) { - panic("unexpected call to a mock function") -} -func (ref refImageMock) Signatures(context.Context) ([][]byte, error) { - panic("unexpected call to a mock function") -} - -// refImageReferenceMock is a mock of types.ImageReference which returns itself in DockerReference. -type refImageReferenceMock struct{ reference.Named } - -func (ref refImageReferenceMock) Transport() types.ImageTransport { - // We use this in error messages, so sady we must return something. But right now we do so only when DockerReference is nil, so restrict to that. - if ref.Named == nil { - return nameImageTransportMock("== Transport mock") - } - panic("unexpected call to a mock function") -} -func (ref refImageReferenceMock) StringWithinTransport() string { - // We use this in error messages, so sadly we must return something. But right now we do so only when DockerReference is nil, so restrict to that. - if ref.Named == nil { - return "== StringWithinTransport for an image with no Docker support" - } - panic("unexpected call to a mock function") -} -func (ref refImageReferenceMock) DockerReference() reference.Named { - return ref.Named -} -func (ref refImageReferenceMock) PolicyConfigurationIdentity() string { - panic("unexpected call to a mock function") -} -func (ref refImageReferenceMock) PolicyConfigurationNamespaces() []string { - panic("unexpected call to a mock function") -} -func (ref refImageReferenceMock) NewImage(ctx *types.SystemContext) (types.Image, error) { - panic("unexpected call to a mock function") -} -func (ref refImageReferenceMock) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) { - panic("unexpected call to a mock function") -} -func (ref refImageReferenceMock) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) { - panic("unexpected call to a mock function") -} -func (ref refImageReferenceMock) DeleteImage(ctx *types.SystemContext) error { - panic("unexpected call to a mock function") -} - -// nameImageTransportMock is a mock of types.ImageTransport which returns itself in Name. -type nameImageTransportMock string - -func (name nameImageTransportMock) Name() string { - return string(name) -} -func (name nameImageTransportMock) ParseReference(reference string) (types.ImageReference, error) { - panic("unexpected call to a mock function") -} -func (name nameImageTransportMock) ValidatePolicyConfigurationScope(scope string) error { - panic("unexpected call to a mock function") -} - -type prmSymmetricTableTest struct { - refA, refB string - result bool -} - -// Test cases for exact reference match. The behavior is supposed to be symmetric. -var prmExactMatchTestTable = []prmSymmetricTableTest{ - // Success, simple matches - {"busybox:latest", "busybox:latest", true}, - {fullRHELRef, fullRHELRef, true}, - {"busybox" + digestSuffix, "busybox" + digestSuffix, true}, // NOTE: This is not documented; signing digests is not recommended at this time. - // Non-canonical reference format is canonicalized - {"library/busybox:latest", "busybox:latest", true}, - {"docker.io/library/busybox:latest", "busybox:latest", true}, - {"library/busybox" + digestSuffix, "busybox" + digestSuffix, true}, - // Mismatch - {"busybox:latest", "busybox:notlatest", false}, - {"busybox:latest", "notbusybox:latest", false}, - {"busybox:latest", "hostname/library/busybox:notlatest", false}, - {"hostname/library/busybox:latest", "busybox:notlatest", false}, - {"busybox:latest", fullRHELRef, false}, - {"busybox" + digestSuffix, "notbusybox" + digestSuffix, false}, - {"busybox:latest", "busybox" + digestSuffix, false}, - {"busybox" + digestSuffix, "busybox" + digestSuffixOther, false}, - // NameOnly references - {"busybox", "busybox:latest", false}, - {"busybox", "busybox" + digestSuffix, false}, - {"busybox", "busybox", false}, - // References with both tags and digests: We match them exactly (requiring BOTH to match) - // NOTE: Again, this is not documented behavior; the recommendation is to sign tags, not digests, and then tag-and-digest references won’t match the signed identity. - {"busybox:latest" + digestSuffix, "busybox:latest" + digestSuffix, true}, - {"busybox:latest" + digestSuffix, "busybox:latest" + digestSuffixOther, false}, - {"busybox:latest" + digestSuffix, "busybox:notlatest" + digestSuffix, false}, - {"busybox:latest" + digestSuffix, "busybox" + digestSuffix, false}, - {"busybox:latest" + digestSuffix, "busybox:latest", false}, - // Invalid format - {"UPPERCASE_IS_INVALID_IN_DOCKER_REFERENCES", "busybox:latest", false}, - {"", "UPPERCASE_IS_INVALID_IN_DOCKER_REFERENCES", false}, - // Even if they are exactly equal, invalid values are rejected. - {"INVALID", "INVALID", false}, -} - -// Test cases for repository-only reference match. The behavior is supposed to be symmetric. -var prmRepositoryMatchTestTable = []prmSymmetricTableTest{ - // Success, simple matches - {"busybox:latest", "busybox:latest", true}, - {fullRHELRef, fullRHELRef, true}, - {"busybox" + digestSuffix, "busybox" + digestSuffix, true}, // NOTE: This is not documented; signing digests is not recommended at this time. - // Non-canonical reference format is canonicalized - {"library/busybox:latest", "busybox:latest", true}, - {"docker.io/library/busybox:latest", "busybox:latest", true}, - {"library/busybox" + digestSuffix, "busybox" + digestSuffix, true}, - // The same as above, but with mismatching tags - {"busybox:latest", "busybox:notlatest", true}, - {fullRHELRef + "tagsuffix", fullRHELRef, true}, - {"library/busybox:latest", "busybox:notlatest", true}, - {"busybox:latest", "library/busybox:notlatest", true}, - {"docker.io/library/busybox:notlatest", "busybox:latest", true}, - {"busybox:notlatest", "docker.io/library/busybox:latest", true}, - {"busybox:latest", "busybox" + digestSuffix, true}, - {"busybox" + digestSuffix, "busybox" + digestSuffixOther, true}, // Even this is accepted here. (This could more reasonably happen with two different digest algorithms.) - // The same as above, but with defaulted tags (should not actually happen) - {"busybox", "busybox:notlatest", true}, - {fullRHELRef, untaggedRHELRef, true}, - {"busybox", "busybox" + digestSuffix, true}, - {"library/busybox", "busybox", true}, - {"docker.io/library/busybox", "busybox", true}, - // Mismatch - {"busybox:latest", "notbusybox:latest", false}, - {"hostname/library/busybox:latest", "busybox:notlatest", false}, - {"busybox:latest", fullRHELRef, false}, - {"busybox" + digestSuffix, "notbusybox" + digestSuffix, false}, - // References with both tags and digests: We ignore both anyway. - {"busybox:latest" + digestSuffix, "busybox:latest" + digestSuffix, true}, - {"busybox:latest" + digestSuffix, "busybox:latest" + digestSuffixOther, true}, - {"busybox:latest" + digestSuffix, "busybox:notlatest" + digestSuffix, true}, - {"busybox:latest" + digestSuffix, "busybox" + digestSuffix, true}, - {"busybox:latest" + digestSuffix, "busybox:latest", true}, - // Invalid format - {"UPPERCASE_IS_INVALID_IN_DOCKER_REFERENCES", "busybox:latest", false}, - {"", "UPPERCASE_IS_INVALID_IN_DOCKER_REFERENCES", false}, - // Even if they are exactly equal, invalid values are rejected. - {"INVALID", "INVALID", false}, -} - -func testImageAndSig(t *testing.T, prm PolicyReferenceMatch, imageRef, sigRef string, result bool) { - // This assumes that all ways to obtain a reference.Named perform equivalent validation, - // and therefore values refused by reference.ParseNormalizedNamed can not happen in practice. - parsedImageRef, err := reference.ParseNormalizedNamed(imageRef) - if err != nil { - return - } - res := prm.matchesDockerReference(refImageMock{parsedImageRef}, sigRef) - assert.Equal(t, result, res, fmt.Sprintf("%s vs. %s", imageRef, sigRef)) -} - -func TestPRMMatchExactMatchesDockerReference(t *testing.T) { - prm := NewPRMMatchExact() - for _, test := range prmExactMatchTestTable { - testImageAndSig(t, prm, test.refA, test.refB, test.result) - testImageAndSig(t, prm, test.refB, test.refA, test.result) - } - // Even if they are signed with an empty string as a reference, unidentified images are rejected. - res := prm.matchesDockerReference(refImageMock{nil}, "") - assert.False(t, res, `unidentified vs. ""`) -} - -func TestPMMMatchRepoDigestOrExactMatchesDockerReference(t *testing.T) { - prm := NewPRMMatchRepoDigestOrExact() - - // prmMatchRepoDigestOrExact is a middle ground between prmMatchExact and prmMatchRepository: - // It accepts anything prmMatchExact accepts,… - for _, test := range prmExactMatchTestTable { - if test.result == true { - testImageAndSig(t, prm, test.refA, test.refB, test.result) - testImageAndSig(t, prm, test.refB, test.refA, test.result) - } - } - // … and it rejects everything prmMatchRepository rejects. - for _, test := range prmRepositoryMatchTestTable { - if test.result == false { - testImageAndSig(t, prm, test.refA, test.refB, test.result) - testImageAndSig(t, prm, test.refB, test.refA, test.result) - } - } - - // The other cases, possibly assymetrical: - for _, test := range []struct { - imageRef, sigRef string - result bool - }{ - // Tag mismatch - {"busybox:latest", "busybox:notlatest", false}, - {fullRHELRef + "tagsuffix", fullRHELRef, false}, - {"library/busybox:latest", "busybox:notlatest", false}, - {"busybox:latest", "library/busybox:notlatest", false}, - {"docker.io/library/busybox:notlatest", "busybox:latest", false}, - {"busybox:notlatest", "docker.io/library/busybox:latest", false}, - // NameOnly references - {"busybox", "busybox:latest", false}, - {"busybox:latest", "busybox", false}, - {"busybox", "busybox" + digestSuffix, false}, - {"busybox" + digestSuffix, "busybox", false}, - {fullRHELRef, untaggedRHELRef, false}, - {"busybox", "busybox", false}, - // Tag references only accept signatures with matching tags. - {"busybox:latest", "busybox" + digestSuffix, false}, - // Digest references accept any signature with matching repository. - {"busybox" + digestSuffix, "busybox:latest", true}, - {"busybox" + digestSuffix, "busybox" + digestSuffixOther, true}, // Even this is accepted here. (This could more reasonably happen with two different digest algorithms.) - // References with both tags and digests: We match them exactly (requiring BOTH to match). - {"busybox:latest" + digestSuffix, "busybox:latest", false}, - {"busybox:latest" + digestSuffix, "busybox:notlatest", false}, - {"busybox:latest", "busybox:latest" + digestSuffix, false}, - {"busybox:latest" + digestSuffix, "busybox:latest" + digestSuffixOther, false}, - {"busybox:latest" + digestSuffix, "busybox:notlatest" + digestSuffixOther, false}, - } { - testImageAndSig(t, prm, test.imageRef, test.sigRef, test.result) - } -} - -func TestPRMMatchRepositoryMatchesDockerReference(t *testing.T) { - prm := NewPRMMatchRepository() - for _, test := range prmRepositoryMatchTestTable { - testImageAndSig(t, prm, test.refA, test.refB, test.result) - testImageAndSig(t, prm, test.refB, test.refA, test.result) - } - // Even if they are signed with an empty string as a reference, unidentified images are rejected. - res := prm.matchesDockerReference(refImageMock{nil}, "") - assert.False(t, res, `unidentified vs. ""`) -} - -func TestParseDockerReferences(t *testing.T) { - const ( - ok1 = "busybox" - ok2 = fullRHELRef - bad1 = "UPPERCASE_IS_INVALID_IN_DOCKER_REFERENCES" - bad2 = "" - ) - - // Success - r1, r2, err := parseDockerReferences(ok1, ok2) - require.NoError(t, err) - assert.Equal(t, ok1, reference.FamiliarString(r1)) - assert.Equal(t, ok2, reference.FamiliarString(r2)) - - // Failures - for _, refs := range [][]string{ - {bad1, ok2}, - {ok1, bad2}, - {bad1, bad2}, - } { - _, _, err := parseDockerReferences(refs[0], refs[1]) - assert.Error(t, err) - } -} - -// forbiddenImageMock is a mock of types.UnparsedImage which ensures Reference is not called -type forbiddenImageMock struct{} - -func (ref forbiddenImageMock) Reference() types.ImageReference { - panic("unexpected call to a mock function") -} -func (ref forbiddenImageMock) Close() error { - panic("unexpected call to a mock function") -} -func (ref forbiddenImageMock) Manifest() ([]byte, string, error) { - panic("unexpected call to a mock function") -} -func (ref forbiddenImageMock) Signatures(context.Context) ([][]byte, error) { - panic("unexpected call to a mock function") -} - -func testExactPRMAndSig(t *testing.T, prmFactory func(string) PolicyReferenceMatch, imageRef, sigRef string, result bool) { - prm := prmFactory(imageRef) - res := prm.matchesDockerReference(forbiddenImageMock{}, sigRef) - assert.Equal(t, result, res, fmt.Sprintf("%s vs. %s", imageRef, sigRef)) -} - -func prmExactReferenceFactory(ref string) PolicyReferenceMatch { - // Do not use NewPRMExactReference, we want to also test the case with an invalid DockerReference, - // even though NewPRMExactReference should never let it happen. - return &prmExactReference{DockerReference: ref} -} - -func TestPRMExactReferenceMatchesDockerReference(t *testing.T) { - for _, test := range prmExactMatchTestTable { - testExactPRMAndSig(t, prmExactReferenceFactory, test.refA, test.refB, test.result) - testExactPRMAndSig(t, prmExactReferenceFactory, test.refB, test.refA, test.result) - } -} - -func prmExactRepositoryFactory(ref string) PolicyReferenceMatch { - // Do not use NewPRMExactRepository, we want to also test the case with an invalid DockerReference, - // even though NewPRMExactRepository should never let it happen. - return &prmExactRepository{DockerRepository: ref} -} - -func TestPRMExactRepositoryMatchesDockerReference(t *testing.T) { - for _, test := range prmRepositoryMatchTestTable { - testExactPRMAndSig(t, prmExactRepositoryFactory, test.refA, test.refB, test.result) - testExactPRMAndSig(t, prmExactRepositoryFactory, test.refB, test.refA, test.result) - } -} diff --git a/vendor/github.com/containers/image/signature/policy_types.go b/vendor/github.com/containers/image/signature/policy_types.go deleted file mode 100644 index 4cd770f11c84..000000000000 --- a/vendor/github.com/containers/image/signature/policy_types.go +++ /dev/null @@ -1,152 +0,0 @@ -// Note: Consider the API unstable until the code supports at least three different image formats or transports. - -// This defines types used to represent a signature verification policy in memory. -// Do not use the private types directly; either parse a configuration file, or construct a Policy from PolicyRequirements -// built using the constructor functions provided in policy_config.go. - -package signature - -// NOTE: Keep this in sync with docs/policy.json.md! - -// Policy defines requirements for considering a signature, or an image, valid. -type Policy struct { - // Default applies to any image which does not have a matching policy in Transports. - // Note that this can happen even if a matching PolicyTransportScopes exists in Transports - // if the image matches none of the scopes. - Default PolicyRequirements `json:"default"` - Transports map[string]PolicyTransportScopes `json:"transports"` -} - -// PolicyTransportScopes defines policies for images for a specific transport, -// for various scopes, the map keys. -// Scopes are defined by the transport (types.ImageReference.PolicyConfigurationIdentity etc.); -// there is one scope precisely matching to a single image, and namespace scopes as prefixes -// of the single-image scope. (e.g. hostname[/zero[/or[/more[/namespaces[/individualimage]]]]]) -// The empty scope, if exists, is considered a parent namespace of all other scopes. -// Most specific scope wins, duplication is prohibited (hard failure). -type PolicyTransportScopes map[string]PolicyRequirements - -// PolicyRequirements is a set of requirements applying to a set of images; each of them must be satisfied (though perhaps each by a different signature). -// Must not be empty, frequently will only contain a single element. -type PolicyRequirements []PolicyRequirement - -// PolicyRequirement is a rule which must be satisfied by at least one of the signatures of an image. -// The type is public, but its definition is private. - -// prCommon is the common type field in a JSON encoding of PolicyRequirement. -type prCommon struct { - Type prTypeIdentifier `json:"type"` -} - -// prTypeIdentifier is string designating a kind of a PolicyRequirement. -type prTypeIdentifier string - -const ( - prTypeInsecureAcceptAnything prTypeIdentifier = "insecureAcceptAnything" - prTypeReject prTypeIdentifier = "reject" - prTypeSignedBy prTypeIdentifier = "signedBy" - prTypeSignedBaseLayer prTypeIdentifier = "signedBaseLayer" -) - -// prInsecureAcceptAnything is a PolicyRequirement with type = prTypeInsecureAcceptAnything: -// every image is allowed to run. -// Note that because PolicyRequirements are implicitly ANDed, this is necessary only if it is the only rule (to make the list non-empty and the policy explicit). -// NOTE: This allows the image to run; it DOES NOT consider the signature verified (per IsSignatureAuthorAccepted). -// FIXME? Better name? -type prInsecureAcceptAnything struct { - prCommon -} - -// prReject is a PolicyRequirement with type = prTypeReject: every image is rejected. -type prReject struct { - prCommon -} - -// prSignedBy is a PolicyRequirement with type = prTypeSignedBy: the image is signed by trusted keys for a specified identity -type prSignedBy struct { - prCommon - - // KeyType specifies what kind of key reference KeyPath/KeyData is. - // Acceptable values are “GPGKeys” | “signedByGPGKeys” “X.509Certificates” | “signedByX.509CAs” - // FIXME: eventually also support GPGTOFU, X.509TOFU, with KeyPath only - KeyType sbKeyType `json:"keyType"` - - // KeyPath is a pathname to a local file containing the trusted key(s). Exactly one of KeyPath and KeyData must be specified. - KeyPath string `json:"keyPath,omitempty"` - // KeyData contains the trusted key(s), base64-encoded. Exactly one of KeyPath and KeyData must be specified. - KeyData []byte `json:"keyData,omitempty"` - - // SignedIdentity specifies what image identity the signature must be claiming about the image. - // Defaults to "match-exact" if not specified. - SignedIdentity PolicyReferenceMatch `json:"signedIdentity"` -} - -// sbKeyType are the allowed values for prSignedBy.KeyType -type sbKeyType string - -const ( - // SBKeyTypeGPGKeys refers to keys contained in a GPG keyring - SBKeyTypeGPGKeys sbKeyType = "GPGKeys" - // SBKeyTypeSignedByGPGKeys refers to keys signed by keys in a GPG keyring - SBKeyTypeSignedByGPGKeys sbKeyType = "signedByGPGKeys" - // SBKeyTypeX509Certificates refers to keys in a set of X.509 certificates - // FIXME: PEM, DER? - SBKeyTypeX509Certificates sbKeyType = "X509Certificates" - // SBKeyTypeSignedByX509CAs refers to keys signed by one of the X.509 CAs - // FIXME: PEM, DER? - SBKeyTypeSignedByX509CAs sbKeyType = "signedByX509CAs" -) - -// prSignedBaseLayer is a PolicyRequirement with type = prSignedBaseLayer: the image has a specified, correctly signed, base image. -type prSignedBaseLayer struct { - prCommon - // BaseLayerIdentity specifies the base image to look for. "match-exact" is rejected, "match-repository" is unlikely to be useful. - BaseLayerIdentity PolicyReferenceMatch `json:"baseLayerIdentity"` -} - -// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement. -// The type is public, but its implementation is private. - -// prmCommon is the common type field in a JSON encoding of PolicyReferenceMatch. -type prmCommon struct { - Type prmTypeIdentifier `json:"type"` -} - -// prmTypeIdentifier is string designating a kind of a PolicyReferenceMatch. -type prmTypeIdentifier string - -const ( - prmTypeMatchExact prmTypeIdentifier = "matchExact" - prmTypeMatchRepoDigestOrExact prmTypeIdentifier = "matchRepoDigestOrExact" - prmTypeMatchRepository prmTypeIdentifier = "matchRepository" - prmTypeExactReference prmTypeIdentifier = "exactReference" - prmTypeExactRepository prmTypeIdentifier = "exactRepository" -) - -// prmMatchExact is a PolicyReferenceMatch with type = prmMatchExact: the two references must match exactly. -type prmMatchExact struct { - prmCommon -} - -// prmMatchRepoDigestOrExact is a PolicyReferenceMatch with type = prmMatchExactOrDigest: the two references must match exactly, -// except that digest references are also accepted if the repository name matches (regardless of tag/digest) and the signature applies to the referenced digest -type prmMatchRepoDigestOrExact struct { - prmCommon -} - -// prmMatchRepository is a PolicyReferenceMatch with type = prmMatchRepository: the two references must use the same repository, may differ in the tag. -type prmMatchRepository struct { - prmCommon -} - -// prmExactReference is a PolicyReferenceMatch with type = prmExactReference: matches a specified reference exactly. -type prmExactReference struct { - prmCommon - DockerReference string `json:"dockerReference"` -} - -// prmExactRepository is a PolicyReferenceMatch with type = prmExactRepository: matches a specified repository, with any tag. -type prmExactRepository struct { - prmCommon - DockerRepository string `json:"dockerRepository"` -} diff --git a/vendor/github.com/containers/image/signature/signature.go b/vendor/github.com/containers/image/signature/signature.go deleted file mode 100644 index f6219bec8761..000000000000 --- a/vendor/github.com/containers/image/signature/signature.go +++ /dev/null @@ -1,284 +0,0 @@ -// Note: Consider the API unstable until the code supports at least three different image formats or transports. - -// NOTE: Keep this in sync with docs/atomic-signature.md and docs/atomic-signature-embedded.json! - -package signature - -import ( - "encoding/json" - "fmt" - "time" - - "github.com/pkg/errors" - - "github.com/containers/image/version" - "github.com/opencontainers/go-digest" -) - -const ( - signatureType = "atomic container signature" -) - -// InvalidSignatureError is returned when parsing an invalid signature. -type InvalidSignatureError struct { - msg string -} - -func (err InvalidSignatureError) Error() string { - return err.msg -} - -// Signature is a parsed content of a signature. -// The only way to get this structure from a blob should be as a return value from a successful call to verifyAndExtractSignature below. -type Signature struct { - DockerManifestDigest digest.Digest - DockerReference string // FIXME: more precise type? -} - -// untrustedSignature is a parsed content of a signature. -type untrustedSignature struct { - UntrustedDockerManifestDigest digest.Digest - UntrustedDockerReference string // FIXME: more precise type? - UntrustedCreatorID *string - // This is intentionally an int64; the native JSON float64 type would allow to represent _some_ sub-second precision, - // but not nearly enough (with current timestamp values, a single unit in the last place is on the order of hundreds of nanoseconds). - // So, this is explicitly an int64, and we reject fractional values. If we did need more precise timestamps eventually, - // we would add another field, UntrustedTimestampNS int64. - UntrustedTimestamp *int64 -} - -// UntrustedSignatureInformation is information available in an untrusted signature. -// This may be useful when debugging signature verification failures, -// or when managing a set of signatures on a single image. -// -// WARNING: Do not use the contents of this for ANY security decisions, -// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably” reliable. -// There is NO REASON to expect the values to be correct, or not intentionally misleading -// (including things like “✅ Verified by $authority”) -type UntrustedSignatureInformation struct { - UntrustedDockerManifestDigest digest.Digest - UntrustedDockerReference string // FIXME: more precise type? - UntrustedCreatorID *string - UntrustedTimestamp *time.Time - UntrustedShortKeyIdentifier string -} - -// newUntrustedSignature returns an untrustedSignature object with -// the specified primary contents and appropriate metadata. -func newUntrustedSignature(dockerManifestDigest digest.Digest, dockerReference string) untrustedSignature { - // Use intermediate variables for these values so that we can take their addresses. - // Golang guarantees that they will have a new address on every execution. - creatorID := "atomic " + version.Version - timestamp := time.Now().Unix() - return untrustedSignature{ - UntrustedDockerManifestDigest: dockerManifestDigest, - UntrustedDockerReference: dockerReference, - UntrustedCreatorID: &creatorID, - UntrustedTimestamp: ×tamp, - } -} - -// Compile-time check that untrustedSignature implements json.Marshaler -var _ json.Marshaler = (*untrustedSignature)(nil) - -// MarshalJSON implements the json.Marshaler interface. -func (s untrustedSignature) MarshalJSON() ([]byte, error) { - if s.UntrustedDockerManifestDigest == "" || s.UntrustedDockerReference == "" { - return nil, errors.New("Unexpected empty signature content") - } - critical := map[string]interface{}{ - "type": signatureType, - "image": map[string]string{"docker-manifest-digest": s.UntrustedDockerManifestDigest.String()}, - "identity": map[string]string{"docker-reference": s.UntrustedDockerReference}, - } - optional := map[string]interface{}{} - if s.UntrustedCreatorID != nil { - optional["creator"] = *s.UntrustedCreatorID - } - if s.UntrustedTimestamp != nil { - optional["timestamp"] = *s.UntrustedTimestamp - } - signature := map[string]interface{}{ - "critical": critical, - "optional": optional, - } - return json.Marshal(signature) -} - -// Compile-time check that untrustedSignature implements json.Unmarshaler -var _ json.Unmarshaler = (*untrustedSignature)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface -func (s *untrustedSignature) UnmarshalJSON(data []byte) error { - err := s.strictUnmarshalJSON(data) - if err != nil { - if _, ok := err.(jsonFormatError); ok { - err = InvalidSignatureError{msg: err.Error()} - } - } - return err -} - -// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal jsonFormatError error type. -// Splitting it into a separate function allows us to do the jsonFormatError → InvalidSignatureError in a single place, the caller. -func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error { - var critical, optional json.RawMessage - if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ - "critical": &critical, - "optional": &optional, - }); err != nil { - return err - } - - var creatorID string - var timestamp float64 - var gotCreatorID, gotTimestamp = false, false - if err := paranoidUnmarshalJSONObject(optional, func(key string) interface{} { - switch key { - case "creator": - gotCreatorID = true - return &creatorID - case "timestamp": - gotTimestamp = true - return ×tamp - default: - var ignore interface{} - return &ignore - } - }); err != nil { - return err - } - if gotCreatorID { - s.UntrustedCreatorID = &creatorID - } - if gotTimestamp { - intTimestamp := int64(timestamp) - if float64(intTimestamp) != timestamp { - return InvalidSignatureError{msg: "Field optional.timestamp is not is not an integer"} - } - s.UntrustedTimestamp = &intTimestamp - } - - var t string - var image, identity json.RawMessage - if err := paranoidUnmarshalJSONObjectExactFields(critical, map[string]interface{}{ - "type": &t, - "image": &image, - "identity": &identity, - }); err != nil { - return err - } - if t != signatureType { - return InvalidSignatureError{msg: fmt.Sprintf("Unrecognized signature type %s", t)} - } - - var digestString string - if err := paranoidUnmarshalJSONObjectExactFields(image, map[string]interface{}{ - "docker-manifest-digest": &digestString, - }); err != nil { - return err - } - s.UntrustedDockerManifestDigest = digest.Digest(digestString) - - if err := paranoidUnmarshalJSONObjectExactFields(identity, map[string]interface{}{ - "docker-reference": &s.UntrustedDockerReference, - }); err != nil { - return err - } - - return nil -} - -// Sign formats the signature and returns a blob signed using mech and keyIdentity -// (If it seems surprising that this is a method on untrustedSignature, note that there -// isn’t a good reason to think that a key used by the user is trusted by any component -// of the system just because it is a private key — actually the presence of a private key -// on the system increases the likelihood of an a successful attack on that private key -// on that particular system.) -func (s untrustedSignature) sign(mech SigningMechanism, keyIdentity string) ([]byte, error) { - json, err := json.Marshal(s) - if err != nil { - return nil, err - } - - return mech.Sign(json, keyIdentity) -} - -// signatureAcceptanceRules specifies how to decide whether an untrusted signature is acceptable. -// We centralize the actual parsing and data extraction in verifyAndExtractSignature; this supplies -// the policy. We use an object instead of supplying func parameters to verifyAndExtractSignature -// because the functions have the same or similar types, so there is a risk of exchanging the functions; -// named members of this struct are more explicit. -type signatureAcceptanceRules struct { - validateKeyIdentity func(string) error - validateSignedDockerReference func(string) error - validateSignedDockerManifestDigest func(digest.Digest) error -} - -// verifyAndExtractSignature verifies that unverifiedSignature has been signed, and that its principial components -// match expected values, both as specified by rules, and returns it -func verifyAndExtractSignature(mech SigningMechanism, unverifiedSignature []byte, rules signatureAcceptanceRules) (*Signature, error) { - signed, keyIdentity, err := mech.Verify(unverifiedSignature) - if err != nil { - return nil, err - } - if err := rules.validateKeyIdentity(keyIdentity); err != nil { - return nil, err - } - - var unmatchedSignature untrustedSignature - if err := json.Unmarshal(signed, &unmatchedSignature); err != nil { - return nil, InvalidSignatureError{msg: err.Error()} - } - if err := rules.validateSignedDockerManifestDigest(unmatchedSignature.UntrustedDockerManifestDigest); err != nil { - return nil, err - } - if err := rules.validateSignedDockerReference(unmatchedSignature.UntrustedDockerReference); err != nil { - return nil, err - } - // signatureAcceptanceRules have accepted this value. - return &Signature{ - DockerManifestDigest: unmatchedSignature.UntrustedDockerManifestDigest, - DockerReference: unmatchedSignature.UntrustedDockerReference, - }, nil -} - -// GetUntrustedSignatureInformationWithoutVerifying extracts information available in an untrusted signature, -// WITHOUT doing any cryptographic verification. -// This may be useful when debugging signature verification failures, -// or when managing a set of signatures on a single image. -// -// WARNING: Do not use the contents of this for ANY security decisions, -// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably” reliable. -// There is NO REASON to expect the values to be correct, or not intentionally misleading -// (including things like “✅ Verified by $authority”) -func GetUntrustedSignatureInformationWithoutVerifying(untrustedSignatureBytes []byte) (*UntrustedSignatureInformation, error) { - // NOTE: This should eventualy do format autodetection. - mech, _, err := NewEphemeralGPGSigningMechanism([]byte{}) - if err != nil { - return nil, err - } - defer mech.Close() - - untrustedContents, shortKeyIdentifier, err := mech.UntrustedSignatureContents(untrustedSignatureBytes) - if err != nil { - return nil, err - } - var untrustedDecodedContents untrustedSignature - if err := json.Unmarshal(untrustedContents, &untrustedDecodedContents); err != nil { - return nil, InvalidSignatureError{msg: err.Error()} - } - - var timestamp *time.Time // = nil - if untrustedDecodedContents.UntrustedTimestamp != nil { - ts := time.Unix(*untrustedDecodedContents.UntrustedTimestamp, 0) - timestamp = &ts - } - return &UntrustedSignatureInformation{ - UntrustedDockerManifestDigest: untrustedDecodedContents.UntrustedDockerManifestDigest, - UntrustedDockerReference: untrustedDecodedContents.UntrustedDockerReference, - UntrustedCreatorID: untrustedDecodedContents.UntrustedCreatorID, - UntrustedTimestamp: timestamp, - UntrustedShortKeyIdentifier: shortKeyIdentifier, - }, nil -} diff --git a/vendor/github.com/containers/image/signature/signature_test.go b/vendor/github.com/containers/image/signature/signature_test.go deleted file mode 100644 index 412a03df3233..000000000000 --- a/vendor/github.com/containers/image/signature/signature_test.go +++ /dev/null @@ -1,412 +0,0 @@ -package signature - -import ( - "encoding/json" - "io/ioutil" - "path/filepath" - "testing" - "time" - - "github.com/containers/image/version" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/xeipuuv/gojsonschema" -) - -func TestInvalidSignatureError(t *testing.T) { - // A stupid test just to keep code coverage - s := "test" - err := InvalidSignatureError{msg: s} - assert.Equal(t, s, err.Error()) -} - -func TestNewUntrustedSignature(t *testing.T) { - timeBefore := time.Now() - sig := newUntrustedSignature(TestImageManifestDigest, TestImageSignatureReference) - assert.Equal(t, TestImageManifestDigest, sig.UntrustedDockerManifestDigest) - assert.Equal(t, TestImageSignatureReference, sig.UntrustedDockerReference) - require.NotNil(t, sig.UntrustedCreatorID) - assert.Equal(t, "atomic "+version.Version, *sig.UntrustedCreatorID) - require.NotNil(t, sig.UntrustedTimestamp) - timeAfter := time.Now() - assert.True(t, timeBefore.Unix() <= *sig.UntrustedTimestamp) - assert.True(t, *sig.UntrustedTimestamp <= timeAfter.Unix()) -} - -func TestMarshalJSON(t *testing.T) { - // Empty string values - s := newUntrustedSignature("", "_") - _, err := s.MarshalJSON() - assert.Error(t, err) - s = newUntrustedSignature("_", "") - _, err = s.MarshalJSON() - assert.Error(t, err) - - // Success - // Use intermediate variables for these values so that we can take their addresses. - creatorID := "CREATOR" - timestamp := int64(1484683104) - for _, c := range []struct { - input untrustedSignature - expected string - }{ - { - untrustedSignature{ - UntrustedDockerManifestDigest: "digest!@#", - UntrustedDockerReference: "reference#@!", - UntrustedCreatorID: &creatorID, - UntrustedTimestamp: ×tamp, - }, - "{\"critical\":{\"identity\":{\"docker-reference\":\"reference#@!\"},\"image\":{\"docker-manifest-digest\":\"digest!@#\"},\"type\":\"atomic container signature\"},\"optional\":{\"creator\":\"CREATOR\",\"timestamp\":1484683104}}", - }, - { - untrustedSignature{ - UntrustedDockerManifestDigest: "digest!@#", - UntrustedDockerReference: "reference#@!", - }, - "{\"critical\":{\"identity\":{\"docker-reference\":\"reference#@!\"},\"image\":{\"docker-manifest-digest\":\"digest!@#\"},\"type\":\"atomic container signature\"},\"optional\":{}}", - }, - } { - marshaled, err := c.input.MarshalJSON() - require.NoError(t, err) - assert.Equal(t, []byte(c.expected), marshaled) - - // Also call MarshalJSON through the JSON package. - marshaled, err = json.Marshal(c.input) - assert.NoError(t, err) - assert.Equal(t, []byte(c.expected), marshaled) - } -} - -// Return the result of modifying validJSON with fn -func modifiedUntrustedSignatureJSON(t *testing.T, validJSON []byte, modifyFn func(mSI)) []byte { - var tmp mSI - err := json.Unmarshal(validJSON, &tmp) - require.NoError(t, err) - - modifyFn(tmp) - - modifiedJSON, err := json.Marshal(tmp) - require.NoError(t, err) - return modifiedJSON -} - -// Verify that input can be unmarshaled as an untrustedSignature, and that it passes JSON schema validation, and return the unmarshaled untrustedSignature. -func succesfullyUnmarshalUntrustedSignature(t *testing.T, schemaLoader gojsonschema.JSONLoader, input []byte) untrustedSignature { - inputString := string(input) - - var s untrustedSignature - err := json.Unmarshal(input, &s) - require.NoError(t, err, inputString) - - res, err := gojsonschema.Validate(schemaLoader, gojsonschema.NewStringLoader(inputString)) - assert.True(t, err == nil, inputString) - assert.True(t, res.Valid(), inputString) - - return s -} - -// Verify that input can't be unmashaled as an untrusted signature, and that it fails JSON schema validation. -func assertUnmarshalUntrustedSignatureFails(t *testing.T, schemaLoader gojsonschema.JSONLoader, input []byte) { - inputString := string(input) - - var s untrustedSignature - err := json.Unmarshal(input, &s) - assert.Error(t, err, inputString) - - res, err := gojsonschema.Validate(schemaLoader, gojsonschema.NewStringLoader(inputString)) - assert.True(t, err != nil || !res.Valid(), inputString) -} - -func TestUnmarshalJSON(t *testing.T) { - // NOTE: The schema at schemaPath is NOT authoritative; docs/atomic-signature.json and the code is, rather! - // The schemaPath references are not testing that the code follows the behavior declared by the schema, - // they are testing that the schema follows the behavior of the code! - schemaPath, err := filepath.Abs("../docs/atomic-signature-embedded-json.json") - require.NoError(t, err) - schemaLoader := gojsonschema.NewReferenceLoader("file://" + schemaPath) - - // Invalid input. Note that json.Unmarshal is guaranteed to validate input before calling our - // UnmarshalJSON implementation; so test that first, then test our error handling for completeness. - assertUnmarshalUntrustedSignatureFails(t, schemaLoader, []byte("&")) - var s untrustedSignature - err = s.UnmarshalJSON([]byte("&")) - assert.Error(t, err) - - // Not an object - assertUnmarshalUntrustedSignatureFails(t, schemaLoader, []byte("1")) - - // Start with a valid JSON. - validSig := newUntrustedSignature("digest!@#", "reference#@!") - validJSON, err := validSig.MarshalJSON() - require.NoError(t, err) - - // Success - s = succesfullyUnmarshalUntrustedSignature(t, schemaLoader, validJSON) - assert.Equal(t, validSig, s) - - // Various ways to corrupt the JSON - breakFns := []func(mSI){ - // A top-level field is missing - func(v mSI) { delete(v, "critical") }, - func(v mSI) { delete(v, "optional") }, - // Extra top-level sub-object - func(v mSI) { v["unexpected"] = 1 }, - // "critical" not an object - func(v mSI) { v["critical"] = 1 }, - // "optional" not an object - func(v mSI) { v["optional"] = 1 }, - // A field of "critical" is missing - func(v mSI) { delete(x(v, "critical"), "type") }, - func(v mSI) { delete(x(v, "critical"), "image") }, - func(v mSI) { delete(x(v, "critical"), "identity") }, - // Extra field of "critical" - func(v mSI) { x(v, "critical")["unexpected"] = 1 }, - // Invalid "type" - func(v mSI) { x(v, "critical")["type"] = 1 }, - func(v mSI) { x(v, "critical")["type"] = "unexpected" }, - // Invalid "image" object - func(v mSI) { x(v, "critical")["image"] = 1 }, - func(v mSI) { delete(x(v, "critical", "image"), "docker-manifest-digest") }, - func(v mSI) { x(v, "critical", "image")["unexpected"] = 1 }, - // Invalid "docker-manifest-digest" - func(v mSI) { x(v, "critical", "image")["docker-manifest-digest"] = 1 }, - // Invalid "identity" object - func(v mSI) { x(v, "critical")["identity"] = 1 }, - func(v mSI) { delete(x(v, "critical", "identity"), "docker-reference") }, - func(v mSI) { x(v, "critical", "identity")["unexpected"] = 1 }, - // Invalid "docker-reference" - func(v mSI) { x(v, "critical", "identity")["docker-reference"] = 1 }, - // Invalid "creator" - func(v mSI) { x(v, "optional")["creator"] = 1 }, - // Invalid "timestamp" - func(v mSI) { x(v, "optional")["timestamp"] = "unexpected" }, - func(v mSI) { x(v, "optional")["timestamp"] = 0.5 }, // Fractional input - } - for _, fn := range breakFns { - testJSON := modifiedUntrustedSignatureJSON(t, validJSON, fn) - assertUnmarshalUntrustedSignatureFails(t, schemaLoader, testJSON) - } - - // Modifications to unrecognized fields in "optional" are allowed and ignored - allowedModificationFns := []func(mSI){ - // Add an optional field - func(v mSI) { x(v, "optional")["unexpected"] = 1 }, - } - for _, fn := range allowedModificationFns { - testJSON := modifiedUntrustedSignatureJSON(t, validJSON, fn) - s := succesfullyUnmarshalUntrustedSignature(t, schemaLoader, testJSON) - assert.Equal(t, validSig, s) - } - - // Optional fields can be missing - validSig = untrustedSignature{ - UntrustedDockerManifestDigest: "digest!@#", - UntrustedDockerReference: "reference#@!", - UntrustedCreatorID: nil, - UntrustedTimestamp: nil, - } - validJSON, err = validSig.MarshalJSON() - require.NoError(t, err) - s = succesfullyUnmarshalUntrustedSignature(t, schemaLoader, validJSON) - assert.Equal(t, validSig, s) -} - -func TestSign(t *testing.T) { - mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory) - require.NoError(t, err) - defer mech.Close() - - if err := mech.SupportsSigning(); err != nil { - t.Skipf("Signing not supported: %v", err) - } - - sig := newUntrustedSignature("digest!@#", "reference#@!") - - // Successful signing - signature, err := sig.sign(mech, TestKeyFingerprint) - require.NoError(t, err) - - verified, err := verifyAndExtractSignature(mech, signature, signatureAcceptanceRules{ - validateKeyIdentity: func(keyIdentity string) error { - if keyIdentity != TestKeyFingerprint { - return errors.Errorf("Unexpected keyIdentity") - } - return nil - }, - validateSignedDockerReference: func(signedDockerReference string) error { - if signedDockerReference != sig.UntrustedDockerReference { - return errors.Errorf("Unexpected signedDockerReference") - } - return nil - }, - validateSignedDockerManifestDigest: func(signedDockerManifestDigest digest.Digest) error { - if signedDockerManifestDigest != sig.UntrustedDockerManifestDigest { - return errors.Errorf("Unexpected signedDockerManifestDigest") - } - return nil - }, - }) - require.NoError(t, err) - - assert.Equal(t, sig.UntrustedDockerManifestDigest, verified.DockerManifestDigest) - assert.Equal(t, sig.UntrustedDockerReference, verified.DockerReference) - - // Error creating blob to sign - _, err = untrustedSignature{}.sign(mech, TestKeyFingerprint) - assert.Error(t, err) - - // Error signing - _, err = sig.sign(mech, "this fingerprint doesn't exist") - assert.Error(t, err) -} - -func TestVerifyAndExtractSignature(t *testing.T) { - mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory) - require.NoError(t, err) - defer mech.Close() - - type triple struct { - keyIdentity string - signedDockerReference string - signedDockerManifestDigest digest.Digest - } - var wanted, recorded triple - // recordingRules are a plausible signatureAcceptanceRules implementations, but equally - // importantly record that we are passing the correct values to the rule callbacks. - recordingRules := signatureAcceptanceRules{ - validateKeyIdentity: func(keyIdentity string) error { - recorded.keyIdentity = keyIdentity - if keyIdentity != wanted.keyIdentity { - return errors.Errorf("keyIdentity mismatch") - } - return nil - }, - validateSignedDockerReference: func(signedDockerReference string) error { - recorded.signedDockerReference = signedDockerReference - if signedDockerReference != wanted.signedDockerReference { - return errors.Errorf("signedDockerReference mismatch") - } - return nil - }, - validateSignedDockerManifestDigest: func(signedDockerManifestDigest digest.Digest) error { - recorded.signedDockerManifestDigest = signedDockerManifestDigest - if signedDockerManifestDigest != wanted.signedDockerManifestDigest { - return errors.Errorf("signedDockerManifestDigest mismatch") - } - return nil - }, - } - - signature, err := ioutil.ReadFile("./fixtures/image.signature") - require.NoError(t, err) - signatureData := triple{ - keyIdentity: TestKeyFingerprint, - signedDockerReference: TestImageSignatureReference, - signedDockerManifestDigest: TestImageManifestDigest, - } - - // Successful verification - wanted = signatureData - recorded = triple{} - sig, err := verifyAndExtractSignature(mech, signature, recordingRules) - require.NoError(t, err) - assert.Equal(t, TestImageSignatureReference, sig.DockerReference) - assert.Equal(t, TestImageManifestDigest, sig.DockerManifestDigest) - assert.Equal(t, signatureData, recorded) - - // For extra paranoia, test that we return a nil signature object on error. - - // Completely invalid signature. - recorded = triple{} - sig, err = verifyAndExtractSignature(mech, []byte{}, recordingRules) - assert.Error(t, err) - assert.Nil(t, sig) - assert.Equal(t, triple{}, recorded) - - recorded = triple{} - sig, err = verifyAndExtractSignature(mech, []byte("invalid signature"), recordingRules) - assert.Error(t, err) - assert.Nil(t, sig) - assert.Equal(t, triple{}, recorded) - - // Valid signature of non-JSON: asked for keyIdentity, only - invalidBlobSignature, err := ioutil.ReadFile("./fixtures/invalid-blob.signature") - require.NoError(t, err) - recorded = triple{} - sig, err = verifyAndExtractSignature(mech, invalidBlobSignature, recordingRules) - assert.Error(t, err) - assert.Nil(t, sig) - assert.Equal(t, triple{keyIdentity: signatureData.keyIdentity}, recorded) - - // Valid signature with a wrong key: asked for keyIdentity, only - wanted = signatureData - wanted.keyIdentity = "unexpected fingerprint" - recorded = triple{} - sig, err = verifyAndExtractSignature(mech, signature, recordingRules) - assert.Error(t, err) - assert.Nil(t, sig) - assert.Equal(t, triple{keyIdentity: signatureData.keyIdentity}, recorded) - - // Valid signature with a wrong manifest digest: asked for keyIdentity and signedDockerManifestDigest - wanted = signatureData - wanted.signedDockerManifestDigest = "invalid digest" - recorded = triple{} - sig, err = verifyAndExtractSignature(mech, signature, recordingRules) - assert.Error(t, err) - assert.Nil(t, sig) - assert.Equal(t, triple{ - keyIdentity: signatureData.keyIdentity, - signedDockerManifestDigest: signatureData.signedDockerManifestDigest, - }, recorded) - - // Valid signature with a wrong image reference - wanted = signatureData - wanted.signedDockerReference = "unexpected docker reference" - recorded = triple{} - sig, err = verifyAndExtractSignature(mech, signature, recordingRules) - assert.Error(t, err) - assert.Nil(t, sig) - assert.Equal(t, signatureData, recorded) -} - -func TestGetUntrustedSignatureInformationWithoutVerifying(t *testing.T) { - signature, err := ioutil.ReadFile("./fixtures/image.signature") - require.NoError(t, err) - // Successful parsing, all optional fields present - info, err := GetUntrustedSignatureInformationWithoutVerifying(signature) - require.NoError(t, err) - assert.Equal(t, TestImageSignatureReference, info.UntrustedDockerReference) - assert.Equal(t, TestImageManifestDigest, info.UntrustedDockerManifestDigest) - assert.NotNil(t, info.UntrustedCreatorID) - assert.Equal(t, "atomic ", *info.UntrustedCreatorID) - assert.NotNil(t, info.UntrustedTimestamp) - assert.Equal(t, time.Unix(1458239713, 0), *info.UntrustedTimestamp) - assert.Equal(t, TestKeyShortID, info.UntrustedShortKeyIdentifier) - // Successful parsing, no optional fields present - signature, err = ioutil.ReadFile("./fixtures/no-optional-fields.signature") - require.NoError(t, err) - // Successful parsing - info, err = GetUntrustedSignatureInformationWithoutVerifying(signature) - require.NoError(t, err) - assert.Equal(t, TestImageSignatureReference, info.UntrustedDockerReference) - assert.Equal(t, TestImageManifestDigest, info.UntrustedDockerManifestDigest) - assert.Nil(t, info.UntrustedCreatorID) - assert.Nil(t, info.UntrustedTimestamp) - assert.Equal(t, TestKeyShortID, info.UntrustedShortKeyIdentifier) - - // Completely invalid signature. - _, err = GetUntrustedSignatureInformationWithoutVerifying([]byte{}) - assert.Error(t, err) - - _, err = GetUntrustedSignatureInformationWithoutVerifying([]byte("invalid signature")) - assert.Error(t, err) - - // Valid signature of non-JSON - invalidBlobSignature, err := ioutil.ReadFile("./fixtures/invalid-blob.signature") - require.NoError(t, err) - _, err = GetUntrustedSignatureInformationWithoutVerifying(invalidBlobSignature) - assert.Error(t, err) -} diff --git a/vendor/github.com/containers/image/storage/storage_image.go b/vendor/github.com/containers/image/storage/storage_image.go deleted file mode 100644 index 08fa71b56ba8..000000000000 --- a/vendor/github.com/containers/image/storage/storage_image.go +++ /dev/null @@ -1,616 +0,0 @@ -package storage - -import ( - "bytes" - "context" - "encoding/json" - "io" - "io/ioutil" - "time" - - "github.com/pkg/errors" - - "github.com/containers/image/image" - "github.com/containers/image/manifest" - "github.com/containers/image/types" - "github.com/containers/storage" - "github.com/containers/storage/pkg/archive" - "github.com/containers/storage/pkg/ioutils" - ddigest "github.com/opencontainers/go-digest" - "github.com/sirupsen/logrus" -) - -var ( - // ErrBlobDigestMismatch is returned when PutBlob() is given a blob - // with a digest-based name that doesn't match its contents. - ErrBlobDigestMismatch = errors.New("blob digest mismatch") - // ErrBlobSizeMismatch is returned when PutBlob() is given a blob - // with an expected size that doesn't match the reader. - ErrBlobSizeMismatch = errors.New("blob size mismatch") - // ErrNoManifestLists is returned when GetTargetManifest() is - // called. - ErrNoManifestLists = errors.New("manifest lists are not supported by this transport") - // ErrNoSuchImage is returned when we attempt to access an image which - // doesn't exist in the storage area. - ErrNoSuchImage = storage.ErrNotAnImage -) - -type storageImageSource struct { - imageRef storageReference - Tag string `json:"tag,omitempty"` - Created time.Time `json:"created-time,omitempty"` - ID string `json:"id"` - BlobList []types.BlobInfo `json:"blob-list,omitempty"` // Ordered list of every blob the image has been told to handle - Layers map[ddigest.Digest][]string `json:"layers,omitempty"` // Map from digests of blobs to lists of layer IDs - LayerPosition map[ddigest.Digest]int `json:"-"` // Where we are in reading a blob's layers - SignatureSizes []int `json:"signature-sizes"` // List of sizes of each signature slice -} - -type storageImageDestination struct { - imageRef storageReference - Tag string `json:"tag,omitempty"` - Created time.Time `json:"created-time,omitempty"` - ID string `json:"id"` - BlobList []types.BlobInfo `json:"blob-list,omitempty"` // Ordered list of every blob the image has been told to handle - Layers map[ddigest.Digest][]string `json:"layers,omitempty"` // Map from digests of blobs to lists of layer IDs - BlobData map[ddigest.Digest][]byte `json:"-"` // Map from names of blobs that aren't layers to contents, temporary - Manifest []byte `json:"-"` // Manifest contents, temporary - Signatures []byte `json:"-"` // Signature contents, temporary - SignatureSizes []int `json:"signature-sizes"` // List of sizes of each signature slice -} - -type storageLayerMetadata struct { - Digest string `json:"digest,omitempty"` - Size int64 `json:"size"` - CompressedSize int64 `json:"compressed-size,omitempty"` -} - -type storageImage struct { - types.Image - size int64 -} - -// newImageSource sets us up to read out an image, which needs to already exist. -func newImageSource(imageRef storageReference) (*storageImageSource, error) { - img, err := imageRef.resolveImage() - if err != nil { - return nil, err - } - image := &storageImageSource{ - imageRef: imageRef, - Created: time.Now(), - ID: img.ID, - BlobList: []types.BlobInfo{}, - Layers: make(map[ddigest.Digest][]string), - LayerPosition: make(map[ddigest.Digest]int), - SignatureSizes: []int{}, - } - if err := json.Unmarshal([]byte(img.Metadata), image); err != nil { - return nil, errors.Wrap(err, "error decoding metadata for source image") - } - return image, nil -} - -// newImageDestination sets us up to write a new image. -func newImageDestination(imageRef storageReference) (*storageImageDestination, error) { - image := &storageImageDestination{ - imageRef: imageRef, - Tag: imageRef.reference, - Created: time.Now(), - ID: imageRef.id, - BlobList: []types.BlobInfo{}, - Layers: make(map[ddigest.Digest][]string), - BlobData: make(map[ddigest.Digest][]byte), - SignatureSizes: []int{}, - } - return image, nil -} - -func (s storageImageSource) Reference() types.ImageReference { - return s.imageRef -} - -func (s storageImageDestination) Reference() types.ImageReference { - return s.imageRef -} - -func (s storageImageSource) Close() error { - return nil -} - -func (s storageImageDestination) Close() error { - return nil -} - -func (s storageImageDestination) ShouldCompressLayers() bool { - // We ultimately have to decompress layers to populate trees on disk, - // so callers shouldn't bother compressing them before handing them to - // us, if they're not already compressed. - return false -} - -// putBlob stores a layer or data blob, optionally enforcing that a digest in -// blobinfo matches the incoming data. -func (s *storageImageDestination) putBlob(stream io.Reader, blobinfo types.BlobInfo, enforceDigestAndSize bool) (types.BlobInfo, error) { - blobSize := blobinfo.Size - digest := blobinfo.Digest - errorBlobInfo := types.BlobInfo{ - Digest: "", - Size: -1, - } - // Try to read an initial snippet of the blob. - buf := [archive.HeaderSize]byte{} - n, err := io.ReadAtLeast(stream, buf[:], len(buf)) - if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { - return errorBlobInfo, err - } - // Set up to read the whole blob (the initial snippet, plus the rest) - // while digesting it with either the default, or the passed-in digest, - // if one was specified. - hasher := ddigest.Canonical.Digester() - if digest.Validate() == nil { - if a := digest.Algorithm(); a.Available() { - hasher = a.Digester() - } - } - hash := "" - counter := ioutils.NewWriteCounter(hasher.Hash()) - defragmented := io.MultiReader(bytes.NewBuffer(buf[:n]), stream) - multi := io.TeeReader(defragmented, counter) - if (n > 0) && archive.IsArchive(buf[:n]) { - // It's a filesystem layer. If it's not the first one in the - // image, we assume that the most recently added layer is its - // parent. - parentLayer := "" - for _, blob := range s.BlobList { - if layerList, ok := s.Layers[blob.Digest]; ok { - parentLayer = layerList[len(layerList)-1] - } - } - // If we have an expected content digest, generate a layer ID - // based on the parent's ID and the expected content digest. - id := "" - if digest.Validate() == nil { - id = ddigest.Canonical.FromBytes([]byte(parentLayer + "+" + digest.String())).Hex() - } - // Attempt to create the identified layer and import its contents. - layer, uncompressedSize, err := s.imageRef.transport.store.PutLayer(id, parentLayer, nil, "", true, multi) - if err != nil && errors.Cause(err) != storage.ErrDuplicateID { - logrus.Debugf("error importing layer blob %q as %q: %v", blobinfo.Digest, id, err) - return errorBlobInfo, err - } - if errors.Cause(err) == storage.ErrDuplicateID { - // We specified an ID, and there's already a layer with - // the same ID. Drain the input so that we can look at - // its length and digest. - _, err := io.Copy(ioutil.Discard, multi) - if err != nil && err != io.EOF { - logrus.Debugf("error digesting layer blob %q: %v", blobinfo.Digest, id, err) - return errorBlobInfo, err - } - hash = hasher.Digest().String() - } else { - // Applied the layer with the specified ID. Note the - // size info and computed digest. - hash = hasher.Digest().String() - layerMeta := storageLayerMetadata{ - Digest: hash, - CompressedSize: counter.Count, - Size: uncompressedSize, - } - if metadata, err := json.Marshal(&layerMeta); len(metadata) != 0 && err == nil { - s.imageRef.transport.store.SetMetadata(layer.ID, string(metadata)) - } - // Hang on to the new layer's ID. - id = layer.ID - } - // Check if the size looks right. - if enforceDigestAndSize && blobinfo.Size >= 0 && blobinfo.Size != counter.Count { - logrus.Debugf("layer blob %q size is %d, not %d, rejecting", blobinfo.Digest, counter.Count, blobinfo.Size) - if layer != nil { - // Something's wrong; delete the newly-created layer. - s.imageRef.transport.store.DeleteLayer(layer.ID) - } - return errorBlobInfo, ErrBlobSizeMismatch - } - // If the content digest was specified, verify it. - if enforceDigestAndSize && digest.Validate() == nil && digest.String() != hash { - logrus.Debugf("layer blob %q digests to %q, rejecting", blobinfo.Digest, hash) - if layer != nil { - // Something's wrong; delete the newly-created layer. - s.imageRef.transport.store.DeleteLayer(layer.ID) - } - return errorBlobInfo, ErrBlobDigestMismatch - } - // If we didn't get a blob size, return the one we calculated. - if blobSize == -1 { - blobSize = counter.Count - } - // If we didn't get a digest, construct one. - if digest == "" { - digest = ddigest.Digest(hash) - } - // Record that this layer blob is a layer, and the layer ID it - // ended up having. This is a list, in case the same blob is - // being applied more than once. - s.Layers[digest] = append(s.Layers[digest], id) - s.BlobList = append(s.BlobList, types.BlobInfo{Digest: digest, Size: counter.Count}) - if layer != nil { - logrus.Debugf("blob %q imported as a filesystem layer %q", blobinfo.Digest, id) - } else { - logrus.Debugf("layer blob %q already present as layer %q", blobinfo.Digest, id) - } - } else { - // It's just data. Finish scanning it in, check that our - // computed digest matches the passed-in digest, and store it, - // but leave it out of the blob-to-layer-ID map so that we can - // tell that it's not a layer. - blob, err := ioutil.ReadAll(multi) - if err != nil && err != io.EOF { - return errorBlobInfo, err - } - hash = hasher.Digest().String() - if enforceDigestAndSize && blobinfo.Size >= 0 && int64(len(blob)) != blobinfo.Size { - logrus.Debugf("blob %q size is %d, not %d, rejecting", blobinfo.Digest, int64(len(blob)), blobinfo.Size) - return errorBlobInfo, ErrBlobSizeMismatch - } - // If we were given a digest, verify that the content matches - // it. - if enforceDigestAndSize && digest.Validate() == nil && digest.String() != hash { - logrus.Debugf("blob %q digests to %q, rejecting", blobinfo.Digest, hash) - return errorBlobInfo, ErrBlobDigestMismatch - } - // If we didn't get a blob size, return the one we calculated. - if blobSize == -1 { - blobSize = int64(len(blob)) - } - // If we didn't get a digest, construct one. - if digest == "" { - digest = ddigest.Digest(hash) - } - // Save the blob for when we Commit(). - s.BlobData[digest] = blob - s.BlobList = append(s.BlobList, types.BlobInfo{Digest: digest, Size: int64(len(blob))}) - logrus.Debugf("blob %q imported as opaque data %q", blobinfo.Digest, digest) - } - return types.BlobInfo{ - Digest: digest, - Size: blobSize, - }, nil -} - -// PutBlob is used to both store filesystem layers and binary data that is part -// of the image. Filesystem layers are assumed to be imported in order, as -// that is required by some of the underlying storage drivers. -func (s *storageImageDestination) PutBlob(stream io.Reader, blobinfo types.BlobInfo) (types.BlobInfo, error) { - return s.putBlob(stream, blobinfo, true) -} - -// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob. -// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned. -// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil); -// it returns a non-nil error only on an unexpected failure. -func (s *storageImageDestination) HasBlob(blobinfo types.BlobInfo) (bool, int64, error) { - if blobinfo.Digest == "" { - return false, -1, errors.Errorf(`Can not check for a blob with unknown digest`) - } - for _, blob := range s.BlobList { - if blob.Digest == blobinfo.Digest { - return true, blob.Size, nil - } - } - return false, -1, nil -} - -func (s *storageImageDestination) ReapplyBlob(blobinfo types.BlobInfo) (types.BlobInfo, error) { - err := blobinfo.Digest.Validate() - if err != nil { - return types.BlobInfo{}, err - } - if layerList, ok := s.Layers[blobinfo.Digest]; !ok || len(layerList) < 1 { - b, err := s.imageRef.transport.store.ImageBigData(s.ID, blobinfo.Digest.String()) - if err != nil { - return types.BlobInfo{}, err - } - return types.BlobInfo{Digest: blobinfo.Digest, Size: int64(len(b))}, nil - } - layerList := s.Layers[blobinfo.Digest] - rc, _, err := diffLayer(s.imageRef.transport.store, layerList[len(layerList)-1]) - if err != nil { - return types.BlobInfo{}, err - } - return s.putBlob(rc, blobinfo, false) -} - -func (s *storageImageDestination) Commit() error { - // Create the image record. - lastLayer := "" - for _, blob := range s.BlobList { - if layerList, ok := s.Layers[blob.Digest]; ok { - lastLayer = layerList[len(layerList)-1] - } - } - img, err := s.imageRef.transport.store.CreateImage(s.ID, nil, lastLayer, "", nil) - if err != nil { - if errors.Cause(err) != storage.ErrDuplicateID { - logrus.Debugf("error creating image: %q", err) - return errors.Wrapf(err, "error creating image %q", s.ID) - } - img, err = s.imageRef.transport.store.Image(s.ID) - if err != nil { - return errors.Wrapf(err, "error reading image %q", s.ID) - } - if img.TopLayer != lastLayer { - logrus.Debugf("error creating image: image with ID %q exists, but uses different layers", s.ID) - return errors.Wrapf(storage.ErrDuplicateID, "image with ID %q already exists, but uses a different top layer", s.ID) - } - logrus.Debugf("reusing image ID %q", img.ID) - } else { - logrus.Debugf("created new image ID %q", img.ID) - } - s.ID = img.ID - names := img.Names - if s.Tag != "" { - names = append(names, s.Tag) - } - // We have names to set, so move those names to this image. - if len(names) > 0 { - if err := s.imageRef.transport.store.SetNames(img.ID, names); err != nil { - if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { - logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) - } - logrus.Debugf("error setting names on image %q: %v", img.ID, err) - return err - } - logrus.Debugf("set names of image %q to %v", img.ID, names) - } - // Save the data blobs to disk, and drop their contents from memory. - keys := []ddigest.Digest{} - for k, v := range s.BlobData { - if err := s.imageRef.transport.store.SetImageBigData(img.ID, k.String(), v); err != nil { - if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { - logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) - } - logrus.Debugf("error saving big data %q for image %q: %v", k, img.ID, err) - return err - } - keys = append(keys, k) - } - for _, key := range keys { - delete(s.BlobData, key) - } - // Save the manifest, if we have one. - if err := s.imageRef.transport.store.SetImageBigData(s.ID, "manifest", s.Manifest); err != nil { - if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { - logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) - } - logrus.Debugf("error saving manifest for image %q: %v", img.ID, err) - return err - } - // Save the signatures, if we have any. - if err := s.imageRef.transport.store.SetImageBigData(s.ID, "signatures", s.Signatures); err != nil { - if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { - logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) - } - logrus.Debugf("error saving signatures for image %q: %v", img.ID, err) - return err - } - // Save our metadata. - metadata, err := json.Marshal(s) - if err != nil { - if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { - logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) - } - logrus.Debugf("error encoding metadata for image %q: %v", img.ID, err) - return err - } - if len(metadata) != 0 { - if err = s.imageRef.transport.store.SetMetadata(s.ID, string(metadata)); err != nil { - if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { - logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) - } - logrus.Debugf("error saving metadata for image %q: %v", img.ID, err) - return err - } - logrus.Debugf("saved image metadata %q", string(metadata)) - } - return nil -} - -var manifestMIMETypes = []string{ - // TODO(runcom): we'll add OCI as part of another PR here - manifest.DockerV2Schema2MediaType, - manifest.DockerV2Schema1SignedMediaType, - manifest.DockerV2Schema1MediaType, -} - -func (s *storageImageDestination) SupportedManifestMIMETypes() []string { - return manifestMIMETypes -} - -// PutManifest writes manifest to the destination. -// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. -// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), -// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. -func (s *storageImageDestination) PutManifest(manifest []byte) error { - s.Manifest = make([]byte, len(manifest)) - copy(s.Manifest, manifest) - return nil -} - -// SupportsSignatures returns an error if we can't expect GetSignatures() to -// return data that was previously supplied to PutSignatures(). -func (s *storageImageDestination) SupportsSignatures() error { - return nil -} - -// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually -// uploaded to the image destination, true otherwise. -func (s *storageImageDestination) AcceptsForeignLayerURLs() bool { - return false -} - -// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. -func (s *storageImageDestination) MustMatchRuntimeOS() bool { - return true -} - -func (s *storageImageDestination) PutSignatures(signatures [][]byte) error { - sizes := []int{} - sigblob := []byte{} - for _, sig := range signatures { - sizes = append(sizes, len(sig)) - newblob := make([]byte, len(sigblob)+len(sig)) - copy(newblob, sigblob) - copy(newblob[len(sigblob):], sig) - sigblob = newblob - } - s.Signatures = sigblob - s.SignatureSizes = sizes - return nil -} - -func (s *storageImageSource) GetBlob(info types.BlobInfo) (rc io.ReadCloser, n int64, err error) { - rc, n, _, err = s.getBlobAndLayerID(info) - return rc, n, err -} - -func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadCloser, n int64, layerID string, err error) { - err = info.Digest.Validate() - if err != nil { - return nil, -1, "", err - } - if layerList, ok := s.Layers[info.Digest]; !ok || len(layerList) < 1 { - b, err := s.imageRef.transport.store.ImageBigData(s.ID, info.Digest.String()) - if err != nil { - return nil, -1, "", err - } - r := bytes.NewReader(b) - logrus.Debugf("exporting opaque data as blob %q", info.Digest.String()) - return ioutil.NopCloser(r), int64(r.Len()), "", nil - } - // If the blob was "put" more than once, we have multiple layer IDs - // which should all produce the same diff. For the sake of tests that - // want to make sure we created different layers each time the blob was - // "put", though, cycle through the layers. - layerList := s.Layers[info.Digest] - position, ok := s.LayerPosition[info.Digest] - if !ok { - position = 0 - } - s.LayerPosition[info.Digest] = (position + 1) % len(layerList) - logrus.Debugf("exporting filesystem layer %q for blob %q", layerList[position], info.Digest) - rc, n, err = diffLayer(s.imageRef.transport.store, layerList[position]) - return rc, n, layerList[position], err -} - -func diffLayer(store storage.Store, layerID string) (rc io.ReadCloser, n int64, err error) { - layer, err := store.Layer(layerID) - if err != nil { - return nil, -1, err - } - layerMeta := storageLayerMetadata{ - CompressedSize: -1, - } - if layer.Metadata != "" { - if err := json.Unmarshal([]byte(layer.Metadata), &layerMeta); err != nil { - return nil, -1, errors.Wrapf(err, "error decoding metadata for layer %q", layerID) - } - } - if layerMeta.CompressedSize <= 0 { - n = -1 - } else { - n = layerMeta.CompressedSize - } - diff, err := store.Diff("", layer.ID, nil) - if err != nil { - return nil, -1, err - } - return diff, n, nil -} - -func (s *storageImageSource) GetManifest() (manifestBlob []byte, MIMEType string, err error) { - manifestBlob, err = s.imageRef.transport.store.ImageBigData(s.ID, "manifest") - return manifestBlob, manifest.GuessMIMEType(manifestBlob), err -} - -func (s *storageImageSource) GetTargetManifest(digest ddigest.Digest) (manifestBlob []byte, MIMEType string, err error) { - return nil, "", ErrNoManifestLists -} - -func (s *storageImageSource) GetSignatures(ctx context.Context) (signatures [][]byte, err error) { - var offset int - signature, err := s.imageRef.transport.store.ImageBigData(s.ID, "signatures") - if err != nil { - return nil, err - } - sigslice := [][]byte{} - for _, length := range s.SignatureSizes { - sigslice = append(sigslice, signature[offset:offset+length]) - offset += length - } - if offset != len(signature) { - return nil, errors.Errorf("signatures data contained %d extra bytes", len(signatures)-offset) - } - return sigslice, nil -} - -func (s *storageImageSource) getSize() (int64, error) { - var sum int64 - names, err := s.imageRef.transport.store.ListImageBigData(s.imageRef.id) - if err != nil { - return -1, errors.Wrapf(err, "error reading image %q", s.imageRef.id) - } - for _, name := range names { - bigSize, err := s.imageRef.transport.store.ImageBigDataSize(s.imageRef.id, name) - if err != nil { - return -1, errors.Wrapf(err, "error reading data blob size %q for %q", name, s.imageRef.id) - } - sum += bigSize - } - for _, sigSize := range s.SignatureSizes { - sum += int64(sigSize) - } - for _, layerList := range s.Layers { - for _, layerID := range layerList { - layer, err := s.imageRef.transport.store.Layer(layerID) - if err != nil { - return -1, err - } - layerMeta := storageLayerMetadata{ - Size: -1, - } - if layer.Metadata != "" { - if err := json.Unmarshal([]byte(layer.Metadata), &layerMeta); err != nil { - return -1, errors.Wrapf(err, "error decoding metadata for layer %q", layerID) - } - } - if layerMeta.Size < 0 { - return -1, errors.Errorf("size for layer %q is unknown, failing getSize()", layerID) - } - sum += layerMeta.Size - } - } - return sum, nil -} - -func (s *storageImage) Size() (int64, error) { - return s.size, nil -} - -// newImage creates an image that also knows its size -func newImage(s storageReference) (types.Image, error) { - src, err := newImageSource(s) - if err != nil { - return nil, err - } - img, err := image.FromSource(src) - if err != nil { - return nil, err - } - size, err := src.getSize() - if err != nil { - return nil, err - } - return &storageImage{Image: img, size: size}, nil -} diff --git a/vendor/github.com/containers/image/storage/storage_reference.go b/vendor/github.com/containers/image/storage/storage_reference.go deleted file mode 100644 index 674330b483e0..000000000000 --- a/vendor/github.com/containers/image/storage/storage_reference.go +++ /dev/null @@ -1,163 +0,0 @@ -package storage - -import ( - "strings" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/types" - "github.com/containers/storage" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// A storageReference holds an arbitrary name and/or an ID, which is a 32-byte -// value hex-encoded into a 64-character string, and a reference to a Store -// where an image is, or would be, kept. -type storageReference struct { - transport storageTransport - reference string - id string - name reference.Named -} - -func newReference(transport storageTransport, reference, id string, name reference.Named) *storageReference { - // We take a copy of the transport, which contains a pointer to the - // store that it used for resolving this reference, so that the - // transport that we'll return from Transport() won't be affected by - // further calls to the original transport's SetStore() method. - return &storageReference{ - transport: transport, - reference: reference, - id: id, - name: name, - } -} - -// Resolve the reference's name to an image ID in the store, if there's already -// one present with the same name or ID, and return the image. -func (s *storageReference) resolveImage() (*storage.Image, error) { - if s.id == "" { - image, err := s.transport.store.Image(s.reference) - if image != nil && err == nil { - s.id = image.ID - } - } - if s.id == "" { - logrus.Errorf("reference %q does not resolve to an image ID", s.StringWithinTransport()) - return nil, ErrNoSuchImage - } - img, err := s.transport.store.Image(s.id) - if err != nil { - return nil, errors.Wrapf(err, "error reading image %q", s.id) - } - if s.reference != "" { - nameMatch := false - for _, name := range img.Names { - if name == s.reference { - nameMatch = true - break - } - } - if !nameMatch { - logrus.Errorf("no image matching reference %q found", s.StringWithinTransport()) - return nil, ErrNoSuchImage - } - } - return img, nil -} - -// Return a Transport object that defaults to using the same store that we used -// to build this reference object. -func (s storageReference) Transport() types.ImageTransport { - return &storageTransport{ - store: s.transport.store, - defaultUIDMap: s.transport.defaultUIDMap, - defaultGIDMap: s.transport.defaultGIDMap, - } -} - -// Return a name with a tag, if we have a name to base them on. -func (s storageReference) DockerReference() reference.Named { - return s.name -} - -// Return a name with a tag, prefixed with the graph root and driver name, to -// disambiguate between images which may be present in multiple stores and -// share only their names. -func (s storageReference) StringWithinTransport() string { - optionsList := "" - options := s.transport.store.GraphOptions() - if len(options) > 0 { - optionsList = ":" + strings.Join(options, ",") - } - storeSpec := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "+" + s.transport.store.RunRoot() + optionsList + "]" - if s.name == nil { - return storeSpec + "@" + s.id - } - if s.id == "" { - return storeSpec + s.reference - } - return storeSpec + s.reference + "@" + s.id -} - -func (s storageReference) PolicyConfigurationIdentity() string { - storeSpec := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "]" - if s.name == nil { - return storeSpec + "@" + s.id - } - if s.id == "" { - return storeSpec + s.reference - } - return storeSpec + s.reference + "@" + s.id -} - -// Also accept policy that's tied to the combination of the graph root and -// driver name, to apply to all images stored in the Store, and to just the -// graph root, in case we're using multiple drivers in the same directory for -// some reason. -func (s storageReference) PolicyConfigurationNamespaces() []string { - storeSpec := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "]" - driverlessStoreSpec := "[" + s.transport.store.GraphRoot() + "]" - namespaces := []string{} - if s.name != nil { - if s.id != "" { - // The reference without the ID is also a valid namespace. - namespaces = append(namespaces, storeSpec+s.reference) - } - components := strings.Split(s.name.Name(), "/") - for len(components) > 0 { - namespaces = append(namespaces, storeSpec+strings.Join(components, "/")) - components = components[:len(components)-1] - } - } - namespaces = append(namespaces, storeSpec) - namespaces = append(namespaces, driverlessStoreSpec) - return namespaces -} - -func (s storageReference) NewImage(ctx *types.SystemContext) (types.Image, error) { - return newImage(s) -} - -func (s storageReference) DeleteImage(ctx *types.SystemContext) error { - img, err := s.resolveImage() - if err != nil { - return err - } - layers, err := s.transport.store.DeleteImage(img.ID, true) - if err == nil { - logrus.Debugf("deleted image %q", img.ID) - for _, layer := range layers { - logrus.Debugf("deleted layer %q", layer) - } - } - return err -} - -func (s storageReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) { - return newImageSource(s) -} - -func (s storageReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) { - return newImageDestination(s) -} diff --git a/vendor/github.com/containers/image/storage/storage_reference_test.go b/vendor/github.com/containers/image/storage/storage_reference_test.go deleted file mode 100644 index 37ddcf3364fd..000000000000 --- a/vendor/github.com/containers/image/storage/storage_reference_test.go +++ /dev/null @@ -1,103 +0,0 @@ -package storage - -import ( - "fmt" - "strings" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestStorageReferenceTransport(t *testing.T) { - newStore(t) - ref, err := Transport.ParseReference("busybox") - require.NoError(t, err) - transport := ref.Transport() - st, ok := transport.(*storageTransport) - require.True(t, ok) - assert.Equal(t, *(Transport.(*storageTransport)), *st) -} - -func TestStorageReferenceDockerReference(t *testing.T) { - ref, err := Transport.ParseReference("busybox") - require.NoError(t, err) - dr := ref.DockerReference() - require.NotNil(t, dr) - assert.Equal(t, "docker.io/library/busybox:latest", dr.String()) - - ref, err = Transport.ParseReference("@" + sha256digestHex) - require.NoError(t, err) - - dr = ref.DockerReference() - assert.Nil(t, dr) -} - -// A common list of reference formats to test for the various ImageReference methods. -var validReferenceTestCases = []struct { - input, canonical string - namespaces []string -}{ - { - "busybox", "docker.io/library/busybox:latest", - []string{"docker.io/library/busybox", "docker.io/library", "docker.io"}, - }, - { - "example.com/myns/ns2/busybox:notlatest", "example.com/myns/ns2/busybox:notlatest", - []string{"example.com/myns/ns2/busybox", "example.com/myns/ns2", "example.com/myns", "example.com"}, - }, - { - "@" + sha256digestHex, "@" + sha256digestHex, - []string{}, - }, - { - "busybox@" + sha256digestHex, "docker.io/library/busybox:latest@" + sha256digestHex, - []string{"docker.io/library/busybox:latest", "docker.io/library/busybox", "docker.io/library", "docker.io"}, - }, -} - -func TestStorageReferenceStringWithinTransport(t *testing.T) { - store := newStore(t) - optionsList := "" - options := store.GraphOptions() - if len(options) > 0 { - optionsList = ":" + strings.Join(options, ",") - } - storeSpec := fmt.Sprintf("[%s@%s+%s%s]", store.GraphDriverName(), store.GraphRoot(), store.RunRoot(), optionsList) - - for _, c := range validReferenceTestCases { - ref, err := Transport.ParseReference(c.input) - require.NoError(t, err, c.input) - assert.Equal(t, storeSpec+c.canonical, ref.StringWithinTransport(), c.input) - } -} - -func TestStorageReferencePolicyConfigurationIdentity(t *testing.T) { - store := newStore(t) - storeSpec := fmt.Sprintf("[%s@%s]", store.GraphDriverName(), store.GraphRoot()) - - for _, c := range validReferenceTestCases { - ref, err := Transport.ParseReference(c.input) - require.NoError(t, err, c.input) - assert.Equal(t, storeSpec+c.canonical, ref.PolicyConfigurationIdentity(), c.input) - } -} - -func TestStorageReferencePolicyConfigurationNamespaces(t *testing.T) { - store := newStore(t) - storeSpec := fmt.Sprintf("[%s@%s]", store.GraphDriverName(), store.GraphRoot()) - - for _, c := range validReferenceTestCases { - ref, err := Transport.ParseReference(c.input) - require.NoError(t, err, c.input) - expectedNS := []string{} - for _, ns := range c.namespaces { - expectedNS = append(expectedNS, storeSpec+ns) - } - expectedNS = append(expectedNS, storeSpec) - expectedNS = append(expectedNS, fmt.Sprintf("[%s]", store.GraphRoot())) - assert.Equal(t, expectedNS, ref.PolicyConfigurationNamespaces()) - } -} - -// NewImage, NewImageSource, NewImageDestination, DeleteImage tested in storage_test.go diff --git a/vendor/github.com/containers/image/storage/storage_test.go b/vendor/github.com/containers/image/storage/storage_test.go deleted file mode 100644 index 3f84ba086a03..000000000000 --- a/vendor/github.com/containers/image/storage/storage_test.go +++ /dev/null @@ -1,933 +0,0 @@ -package storage - -import ( - "archive/tar" - "bytes" - "context" - "crypto/rand" - "crypto/sha256" - "flag" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "testing" - "time" - - "github.com/containers/image/types" - "github.com/containers/storage" - "github.com/containers/storage/pkg/archive" - "github.com/containers/storage/pkg/idtools" - "github.com/containers/storage/pkg/ioutils" - "github.com/containers/storage/pkg/reexec" - ddigest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -var ( - _imgd types.ImageDestination = &storageImageDestination{} - _imgs types.ImageSource = &storageImageSource{} - _ref types.ImageReference = &storageReference{} - _transport types.ImageTransport = &storageTransport{} - topwd = "" -) - -const ( - layerSize = 12345 -) - -func TestMain(m *testing.M) { - if reexec.Init() { - return - } - wd, err := ioutil.TempDir("", "test.") - if err != nil { - os.Exit(1) - } - topwd = wd - debug := false - flag.BoolVar(&debug, "debug", false, "print debug statements") - flag.Parse() - if debug { - logrus.SetLevel(logrus.DebugLevel) - } - code := m.Run() - os.RemoveAll(wd) - os.Exit(code) -} - -func newStoreWithGraphDriverOptions(t *testing.T, options []string) storage.Store { - wd, err := ioutil.TempDir(topwd, "test.") - if err != nil { - t.Fatal(err) - } - err = os.MkdirAll(wd, 0700) - if err != nil { - t.Fatal(err) - } - run := filepath.Join(wd, "run") - root := filepath.Join(wd, "root") - Transport.SetDefaultUIDMap([]idtools.IDMap{{ - ContainerID: 0, - HostID: os.Getuid(), - Size: 1, - }}) - Transport.SetDefaultGIDMap([]idtools.IDMap{{ - ContainerID: 0, - HostID: os.Getgid(), - Size: 1, - }}) - store, err := storage.GetStore(storage.StoreOptions{ - RunRoot: run, - GraphRoot: root, - GraphDriverName: "vfs", - GraphDriverOptions: options, - UIDMap: Transport.DefaultUIDMap(), - GIDMap: Transport.DefaultGIDMap(), - }) - if err != nil { - t.Fatal(err) - } - Transport.SetStore(store) - return store -} - -func newStore(t *testing.T) storage.Store { - return newStoreWithGraphDriverOptions(t, []string{}) -} - -func TestParse(t *testing.T) { - store := newStore(t) - - ref, err := Transport.ParseReference("test") - if err != nil { - t.Fatalf("ParseReference(%q) returned error %v", "test", err) - } - if ref == nil { - t.Fatalf("ParseReference returned nil reference") - } - - ref, err = Transport.ParseStoreReference(store, "test") - if err != nil { - t.Fatalf("ParseStoreReference(%q) returned error %v", "test", err) - } - if ref == nil { - t.Fatalf("ParseStoreReference(%q) returned nil reference", "test") - } - - strRef := ref.StringWithinTransport() - ref, err = Transport.ParseReference(strRef) - if err != nil { - t.Fatalf("ParseReference(%q) returned error: %v", strRef, err) - } - if ref == nil { - t.Fatalf("ParseReference(%q) returned nil reference", strRef) - } - - transport := storageTransport{ - store: store, - defaultUIDMap: Transport.(*storageTransport).defaultUIDMap, - defaultGIDMap: Transport.(*storageTransport).defaultGIDMap, - } - _references := []storageReference{ - { - name: ref.(*storageReference).name, - reference: verboseName(ref.(*storageReference).name), - id: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", - transport: transport, - }, - { - name: ref.(*storageReference).name, - reference: verboseName(ref.(*storageReference).name), - transport: transport, - }, - { - id: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", - transport: transport, - }, - { - name: ref.DockerReference(), - reference: verboseName(ref.DockerReference()), - transport: transport, - }, - } - for _, reference := range _references { - s := reference.StringWithinTransport() - ref, err := Transport.ParseStoreReference(store, s) - if err != nil { - t.Fatalf("ParseReference(%q) returned error: %v", strRef, err) - } - if ref.id != reference.id { - t.Fatalf("ParseReference(%q) failed to extract ID", s) - } - if ref.reference != reference.reference { - t.Fatalf("ParseReference(%q) failed to extract reference (%q!=%q)", s, ref.reference, reference.reference) - } - } -} - -func TestParseWithGraphDriverOptions(t *testing.T) { - optionLists := [][]string{ - {}, - {"unused1"}, - {"unused1", "unused2"}, - {"unused1", "unused2", "unused3"}, - } - for _, optionList := range optionLists { - store := newStoreWithGraphDriverOptions(t, optionList) - ref, err := Transport.ParseStoreReference(store, "test") - if err != nil { - t.Fatalf("ParseStoreReference(%q, graph driver options %v) returned error %v", "test", optionList, err) - } - if ref == nil { - t.Fatalf("ParseStoreReference returned nil reference") - } - spec := ref.StringWithinTransport() - ref2, err := Transport.ParseReference(spec) - if err != nil { - t.Fatalf("ParseReference(%q) returned error %v", "test", err) - } - if ref == nil { - t.Fatalf("ParseReference returned nil reference") - } - sref, ok := ref2.(*storageReference) - if !ok { - t.Fatalf("ParseReference returned a reference from transport %s, not one of ours", ref2.Transport().Name()) - } - parsedOptions := sref.transport.store.GraphOptions() - if len(parsedOptions) != len(optionList) { - t.Fatalf("Lost options between %v and %v", optionList, parsedOptions) - } - for i := range optionList { - if parsedOptions[i] != optionList[i] { - t.Fatalf("Mismatched option %d: %v and %v", i, optionList[i], parsedOptions[i]) - } - } - } -} - -func systemContext() *types.SystemContext { - return &types.SystemContext{} -} - -func makeLayer(t *testing.T, compression archive.Compression) (ddigest.Digest, int64, int64, []byte) { - var cwriter io.WriteCloser - var uncompressed *ioutils.WriteCounter - var twriter *tar.Writer - preader, pwriter := io.Pipe() - tbuffer := bytes.Buffer{} - if compression != archive.Uncompressed { - compressor, err := archive.CompressStream(pwriter, compression) - if err != nil { - t.Fatalf("Error compressing layer: %v", err) - } - cwriter = compressor - uncompressed = ioutils.NewWriteCounter(cwriter) - } else { - uncompressed = ioutils.NewWriteCounter(pwriter) - } - twriter = tar.NewWriter(uncompressed) - buf := make([]byte, layerSize) - n, err := rand.Read(buf) - if err != nil { - t.Fatalf("Error reading tar data: %v", err) - } - if n != len(buf) { - t.Fatalf("Short read reading tar data: %d < %d", n, len(buf)) - } - for i := 1024; i < 2048; i++ { - buf[i] = 0 - } - go func() { - defer pwriter.Close() - if cwriter != nil { - defer cwriter.Close() - } - defer twriter.Close() - err := twriter.WriteHeader(&tar.Header{ - Name: "/random-single-file", - Mode: 0600, - Size: int64(len(buf)), - ModTime: time.Now(), - AccessTime: time.Now(), - ChangeTime: time.Now(), - Typeflag: tar.TypeReg, - }) - if err != nil { - t.Fatalf("Error writing tar header: %v", err) - } - n, err := twriter.Write(buf) - if err != nil { - t.Fatalf("Error writing tar header: %v", err) - } - if n != len(buf) { - t.Fatalf("Short write writing tar header: %d < %d", n, len(buf)) - } - }() - _, err = io.Copy(&tbuffer, preader) - if err != nil { - t.Fatalf("Error reading layer tar: %v", err) - } - sum := ddigest.SHA256.FromBytes(tbuffer.Bytes()) - return sum, uncompressed.Count, int64(tbuffer.Len()), tbuffer.Bytes() -} - -func TestWriteRead(t *testing.T) { - if os.Geteuid() != 0 { - t.Skip("TestWriteRead requires root privileges") - } - - config := `{"config":{"labels":{}},"created":"2006-01-02T15:04:05Z"}` - sum := ddigest.SHA256.FromBytes([]byte(config)) - configInfo := types.BlobInfo{ - Digest: sum, - Size: int64(len(config)), - } - manifests := []string{ - //`{ - // "schemaVersion": 2, - // "mediaType": "application/vnd.oci.image.manifest.v1+json", - // "config": { - // "mediaType": "application/vnd.oci.image.serialization.config.v1+json", - // "size": %cs, - // "digest": "%ch" - // }, - // "layers": [ - // { - // "mediaType": "application/vnd.oci.image.serialization.rootfs.tar.gzip", - // "digest": "%lh", - // "size": %ls - // } - // ] - //}`, - `{ - "schemaVersion": 1, - "name": "test", - "tag": "latest", - "architecture": "amd64", - "fsLayers": [ - { - "blobSum": "%lh" - } - ], - "history": [ - { - "v1Compatibility": "{\"id\":\"%li\",\"created\":\"2016-03-03T11:29:44.222098366Z\",\"container\":\"\",\"container_config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.8.2-fc22\",\"author\":\"\\\"William Temple \\u003cwtemple at redhat dot com\\u003e\\\"\",\"config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":%ls}" - } - ] - }`, - `{ - "schemaVersion": 2, - "mediaType": "application/vnd.docker.distribution.manifest.v2+json", - "config": { - "mediaType": "application/vnd.docker.container.image.v1+json", - "size": %cs, - "digest": "%ch" - }, - "layers": [ - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "digest": "%lh", - "size": %ls - } - ] - }`, - } - signatures := [][]byte{ - []byte("Signature A"), - []byte("Signature B"), - } - newStore(t) - ref, err := Transport.ParseReference("test") - if err != nil { - t.Fatalf("ParseReference(%q) returned error %v", "test", err) - } - if ref == nil { - t.Fatalf("ParseReference returned nil reference") - } - - for _, manifestFmt := range manifests { - dest, err := ref.NewImageDestination(systemContext()) - if err != nil { - t.Fatalf("NewImageDestination(%q) returned error %v", ref.StringWithinTransport(), err) - } - if dest == nil { - t.Fatalf("NewImageDestination(%q) returned no destination", ref.StringWithinTransport()) - } - if dest.Reference().StringWithinTransport() != ref.StringWithinTransport() { - t.Fatalf("NewImageDestination(%q) changed the reference to %q", ref.StringWithinTransport(), dest.Reference().StringWithinTransport()) - } - t.Logf("supported manifest MIME types: %v", dest.SupportedManifestMIMETypes()) - if err := dest.SupportsSignatures(); err != nil { - t.Fatalf("Destination image doesn't support signatures: %v", err) - } - t.Logf("compress layers: %v", dest.ShouldCompressLayers()) - compression := archive.Uncompressed - if dest.ShouldCompressLayers() { - compression = archive.Gzip - } - digest, decompressedSize, size, blob := makeLayer(t, compression) - if _, err := dest.PutBlob(bytes.NewBuffer(blob), types.BlobInfo{ - Size: size, - Digest: digest, - }); err != nil { - t.Fatalf("Error saving randomly-generated layer to destination: %v", err) - } - t.Logf("Wrote randomly-generated layer %q (%d/%d bytes) to destination", digest, size, decompressedSize) - if _, err := dest.PutBlob(bytes.NewBufferString(config), configInfo); err != nil { - t.Fatalf("Error saving config to destination: %v", err) - } - manifest := strings.Replace(manifestFmt, "%lh", digest.String(), -1) - manifest = strings.Replace(manifest, "%ch", configInfo.Digest.String(), -1) - manifest = strings.Replace(manifest, "%ls", fmt.Sprintf("%d", size), -1) - manifest = strings.Replace(manifest, "%cs", fmt.Sprintf("%d", configInfo.Size), -1) - li := digest.Hex() - manifest = strings.Replace(manifest, "%li", li, -1) - manifest = strings.Replace(manifest, "%ci", sum.Hex(), -1) - t.Logf("this manifest is %q", manifest) - if err := dest.PutManifest([]byte(manifest)); err != nil { - t.Fatalf("Error saving manifest to destination: %v", err) - } - if err := dest.PutSignatures(signatures); err != nil { - t.Fatalf("Error saving signatures to destination: %v", err) - } - if err := dest.Commit(); err != nil { - t.Fatalf("Error committing changes to destination: %v", err) - } - dest.Close() - - img, err := ref.NewImage(systemContext()) - if err != nil { - t.Fatalf("NewImage(%q) returned error %v", ref.StringWithinTransport(), err) - } - imageConfigInfo := img.ConfigInfo() - if imageConfigInfo.Digest != "" { - blob, err := img.ConfigBlob() - if err != nil { - t.Fatalf("image %q claimed there was a config blob, but couldn't produce it: %v", ref.StringWithinTransport(), err) - } - sum := ddigest.SHA256.FromBytes(blob) - if sum != configInfo.Digest { - t.Fatalf("image config blob digest for %q doesn't match", ref.StringWithinTransport()) - } - if int64(len(blob)) != configInfo.Size { - t.Fatalf("image config size for %q changed from %d to %d", ref.StringWithinTransport(), configInfo.Size, len(blob)) - } - } - layerInfos := img.LayerInfos() - if layerInfos == nil { - t.Fatalf("image for %q returned empty layer list", ref.StringWithinTransport()) - } - imageInfo, err := img.Inspect() - if err != nil { - t.Fatalf("Inspect(%q) returned error %v", ref.StringWithinTransport(), err) - } - if imageInfo.Created.IsZero() { - t.Fatalf("Image %q claims to have been created at time 0", ref.StringWithinTransport()) - } - - src, err := ref.NewImageSource(systemContext(), []string{}) - if err != nil { - t.Fatalf("NewImageSource(%q) returned error %v", ref.StringWithinTransport(), err) - } - if src == nil { - t.Fatalf("NewImageSource(%q) returned no source", ref.StringWithinTransport()) - } - if src.Reference().StringWithinTransport() != ref.StringWithinTransport() { - // As long as it's only the addition of an ID suffix, that's okay. - if !strings.HasPrefix(src.Reference().StringWithinTransport(), ref.StringWithinTransport()+"@") { - t.Fatalf("NewImageSource(%q) changed the reference to %q", ref.StringWithinTransport(), src.Reference().StringWithinTransport()) - } - } - retrievedManifest, manifestType, err := src.GetManifest() - if err != nil { - t.Fatalf("GetManifest(%q) returned error %v", ref.StringWithinTransport(), err) - } - t.Logf("this manifest's type appears to be %q", manifestType) - if string(retrievedManifest) != manifest { - t.Fatalf("NewImageSource(%q) changed the manifest: %q was %q", ref.StringWithinTransport(), string(retrievedManifest), manifest) - } - sum = ddigest.SHA256.FromBytes([]byte(manifest)) - _, _, err = src.GetTargetManifest(sum) - if err == nil { - t.Fatalf("GetTargetManifest(%q) is supposed to fail", ref.StringWithinTransport()) - } - sigs, err := src.GetSignatures(context.Background()) - if err != nil { - t.Fatalf("GetSignatures(%q) returned error %v", ref.StringWithinTransport(), err) - } - if len(sigs) < len(signatures) { - t.Fatalf("Lost %d signatures", len(signatures)-len(sigs)) - } - if len(sigs) > len(signatures) { - t.Fatalf("Gained %d signatures", len(sigs)-len(signatures)) - } - for i := range sigs { - if bytes.Compare(sigs[i], signatures[i]) != 0 { - t.Fatalf("Signature %d was corrupted", i) - } - } - for _, layerInfo := range layerInfos { - buf := bytes.Buffer{} - layer, size, err := src.GetBlob(layerInfo) - if err != nil { - t.Fatalf("Error reading layer %q from %q", layerInfo.Digest, ref.StringWithinTransport()) - } - t.Logf("Decompressing blob %q, blob size = %d, layerInfo.Size = %d bytes", layerInfo.Digest, size, layerInfo.Size) - hasher := sha256.New() - compressed := ioutils.NewWriteCounter(hasher) - countedLayer := io.TeeReader(layer, compressed) - decompressed, err := archive.DecompressStream(countedLayer) - if err != nil { - t.Fatalf("Error decompressing layer %q from %q", layerInfo.Digest, ref.StringWithinTransport()) - } - n, err := io.Copy(&buf, decompressed) - if layerInfo.Size >= 0 && compressed.Count != layerInfo.Size { - t.Fatalf("Blob size is different than expected: %d != %d, read %d", compressed.Count, layerInfo.Size, n) - } - if size >= 0 && compressed.Count != size { - t.Fatalf("Blob size mismatch: %d != %d, read %d", compressed.Count, size, n) - } - sum := hasher.Sum(nil) - if ddigest.NewDigestFromBytes(ddigest.SHA256, sum) != layerInfo.Digest { - t.Fatalf("Layer blob digest for %q doesn't match", ref.StringWithinTransport()) - } - } - src.Close() - img.Close() - err = ref.DeleteImage(systemContext()) - if err != nil { - t.Fatalf("DeleteImage(%q) returned error %v", ref.StringWithinTransport(), err) - } - } -} - -func TestDuplicateName(t *testing.T) { - if os.Geteuid() != 0 { - t.Skip("TestDuplicateName requires root privileges") - } - - newStore(t) - - ref, err := Transport.ParseReference("test") - if err != nil { - t.Fatalf("ParseReference(%q) returned error %v", "test", err) - } - if ref == nil { - t.Fatalf("ParseReference returned nil reference") - } - - dest, err := ref.NewImageDestination(systemContext()) - if err != nil { - t.Fatalf("NewImageDestination(%q, first pass) returned error %v", ref.StringWithinTransport(), err) - } - if dest == nil { - t.Fatalf("NewImageDestination(%q, first pass) returned no destination", ref.StringWithinTransport()) - } - digest, _, size, blob := makeLayer(t, archive.Uncompressed) - if _, err := dest.PutBlob(bytes.NewBuffer(blob), types.BlobInfo{ - Size: size, - Digest: digest, - }); err != nil { - t.Fatalf("Error saving randomly-generated layer to destination, first pass: %v", err) - } - if err := dest.Commit(); err != nil { - t.Fatalf("Error committing changes to destination, first pass: %v", err) - } - dest.Close() - - dest, err = ref.NewImageDestination(systemContext()) - if err != nil { - t.Fatalf("NewImageDestination(%q, second pass) returned error %v", ref.StringWithinTransport(), err) - } - if dest == nil { - t.Fatalf("NewImageDestination(%q, second pass) returned no destination", ref.StringWithinTransport()) - } - digest, _, size, blob = makeLayer(t, archive.Gzip) - if _, err := dest.PutBlob(bytes.NewBuffer(blob), types.BlobInfo{ - Size: int64(size), - Digest: digest, - }); err != nil { - t.Fatalf("Error saving randomly-generated layer to destination, second pass: %v", err) - } - if err := dest.Commit(); err != nil { - t.Fatalf("Error committing changes to destination, second pass: %v", err) - } - dest.Close() -} - -func TestDuplicateID(t *testing.T) { - if os.Geteuid() != 0 { - t.Skip("TestDuplicateID requires root privileges") - } - - newStore(t) - - ref, err := Transport.ParseReference("@aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") - if err != nil { - t.Fatalf("ParseReference(%q) returned error %v", "test", err) - } - if ref == nil { - t.Fatalf("ParseReference returned nil reference") - } - - dest, err := ref.NewImageDestination(systemContext()) - if err != nil { - t.Fatalf("NewImageDestination(%q, first pass) returned error %v", ref.StringWithinTransport(), err) - } - if dest == nil { - t.Fatalf("NewImageDestination(%q, first pass) returned no destination", ref.StringWithinTransport()) - } - digest, _, size, blob := makeLayer(t, archive.Gzip) - if _, err := dest.PutBlob(bytes.NewBuffer(blob), types.BlobInfo{ - Size: size, - Digest: digest, - }); err != nil { - t.Fatalf("Error saving randomly-generated layer to destination, first pass: %v", err) - } - if err := dest.Commit(); err != nil { - t.Fatalf("Error committing changes to destination, first pass: %v", err) - } - dest.Close() - - dest, err = ref.NewImageDestination(systemContext()) - if err != nil { - t.Fatalf("NewImageDestination(%q, second pass) returned error %v", ref.StringWithinTransport(), err) - } - if dest == nil { - t.Fatalf("NewImageDestination(%q, second pass) returned no destination", ref.StringWithinTransport()) - } - digest, _, size, blob = makeLayer(t, archive.Gzip) - if _, err := dest.PutBlob(bytes.NewBuffer(blob), types.BlobInfo{ - Size: int64(size), - Digest: digest, - }); err != nil { - t.Fatalf("Error saving randomly-generated layer to destination, second pass: %v", err) - } - if err := dest.Commit(); errors.Cause(err) != storage.ErrDuplicateID { - if err != nil { - t.Fatalf("Wrong error committing changes to destination, second pass: %v", err) - } - t.Fatal("Incorrectly succeeded committing changes to destination, second pass: no error") - } - dest.Close() -} - -func TestDuplicateNameID(t *testing.T) { - if os.Geteuid() != 0 { - t.Skip("TestDuplicateNameID requires root privileges") - } - - newStore(t) - - ref, err := Transport.ParseReference("test@aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") - if err != nil { - t.Fatalf("ParseReference(%q) returned error %v", "test", err) - } - if ref == nil { - t.Fatalf("ParseReference returned nil reference") - } - - dest, err := ref.NewImageDestination(systemContext()) - if err != nil { - t.Fatalf("NewImageDestination(%q, first pass) returned error %v", ref.StringWithinTransport(), err) - } - if dest == nil { - t.Fatalf("NewImageDestination(%q, first pass) returned no destination", ref.StringWithinTransport()) - } - digest, _, size, blob := makeLayer(t, archive.Gzip) - if _, err := dest.PutBlob(bytes.NewBuffer(blob), types.BlobInfo{ - Size: size, - Digest: digest, - }); err != nil { - t.Fatalf("Error saving randomly-generated layer to destination, first pass: %v", err) - } - if err := dest.Commit(); err != nil { - t.Fatalf("Error committing changes to destination, first pass: %v", err) - } - dest.Close() - - dest, err = ref.NewImageDestination(systemContext()) - if err != nil { - t.Fatalf("NewImageDestination(%q, second pass) returned error %v", ref.StringWithinTransport(), err) - } - if dest == nil { - t.Fatalf("NewImageDestination(%q, second pass) returned no destination", ref.StringWithinTransport()) - } - digest, _, size, blob = makeLayer(t, archive.Gzip) - if _, err := dest.PutBlob(bytes.NewBuffer(blob), types.BlobInfo{ - Size: int64(size), - Digest: digest, - }); err != nil { - t.Fatalf("Error saving randomly-generated layer to destination, second pass: %v", err) - } - if err := dest.Commit(); errors.Cause(err) != storage.ErrDuplicateID { - if err != nil { - t.Fatalf("Wrong error committing changes to destination, second pass: %v", err) - } - t.Fatal("Incorrectly succeeded committing changes to destination, second pass: no error") - } - dest.Close() -} - -func TestNamespaces(t *testing.T) { - newStore(t) - - ref, err := Transport.ParseReference("test@aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") - if err != nil { - t.Fatalf("ParseReference(%q) returned error %v", "test", err) - } - if ref == nil { - t.Fatalf("ParseReference returned nil reference") - } - - namespaces := ref.PolicyConfigurationNamespaces() - for _, namespace := range namespaces { - t.Logf("namespace: %q", namespace) - err = Transport.ValidatePolicyConfigurationScope(namespace) - if ref == nil { - t.Fatalf("ValidatePolicyConfigurationScope(%q) returned error: %v", namespace, err) - } - } - namespace := ref.StringWithinTransport() - t.Logf("ref: %q", namespace) - err = Transport.ValidatePolicyConfigurationScope(namespace) - if err != nil { - t.Fatalf("ValidatePolicyConfigurationScope(%q) returned error: %v", namespace, err) - } - for _, namespace := range []string{ - "@beefee", - ":miracle", - ":miracle@beefee", - "@beefee:miracle", - } { - t.Logf("invalid ref: %q", namespace) - err = Transport.ValidatePolicyConfigurationScope(namespace) - if err == nil { - t.Fatalf("ValidatePolicyConfigurationScope(%q) should have failed", namespace) - } - } -} - -func TestSize(t *testing.T) { - if os.Geteuid() != 0 { - t.Skip("TestSize requires root privileges") - } - - config := `{"config":{"labels":{}},"created":"2006-01-02T15:04:05Z"}` - sum := ddigest.SHA256.FromBytes([]byte(config)) - configInfo := types.BlobInfo{ - Digest: sum, - Size: int64(len(config)), - } - - newStore(t) - - ref, err := Transport.ParseReference("test") - if err != nil { - t.Fatalf("ParseReference(%q) returned error %v", "test", err) - } - if ref == nil { - t.Fatalf("ParseReference returned nil reference") - } - - dest, err := ref.NewImageDestination(systemContext()) - if err != nil { - t.Fatalf("NewImageDestination(%q) returned error %v", ref.StringWithinTransport(), err) - } - if dest == nil { - t.Fatalf("NewImageDestination(%q) returned no destination", ref.StringWithinTransport()) - } - digest1, _, size1, blob := makeLayer(t, archive.Gzip) - if _, err := dest.PutBlob(bytes.NewBuffer(blob), types.BlobInfo{ - Size: size1, - Digest: digest1, - }); err != nil { - t.Fatalf("Error saving randomly-generated layer 1 to destination: %v", err) - } - digest2, _, size2, blob := makeLayer(t, archive.Gzip) - if _, err := dest.PutBlob(bytes.NewBuffer(blob), types.BlobInfo{ - Size: size2, - Digest: digest2, - }); err != nil { - t.Fatalf("Error saving randomly-generated layer 2 to destination: %v", err) - } - manifest := fmt.Sprintf(` - { - "schemaVersion": 2, - "mediaType": "application/vnd.docker.distribution.manifest.v2+json", - "config": { - "mediaType": "application/vnd.docker.container.image.v1+json", - "size": %d, - "digest": "%s" - }, - "layers": [ - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "digest": "%s", - "size": %d - }, - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "digest": "%s", - "size": %d - } - ] - } - `, configInfo.Size, configInfo.Digest, digest1, size1, digest2, size2) - if err := dest.PutManifest([]byte(manifest)); err != nil { - t.Fatalf("Error storing manifest to destination: %v", err) - } - if err := dest.Commit(); err != nil { - t.Fatalf("Error committing changes to destination: %v", err) - } - dest.Close() - - img, err := ref.NewImage(systemContext()) - if err != nil { - t.Fatalf("NewImage(%q) returned error %v", ref.StringWithinTransport(), err) - } - usize, err := img.Size() - if usize == -1 || err != nil { - t.Fatalf("Error calculating image size: %v", err) - } - if int(usize) != layerSize*2+len(manifest) { - t.Fatalf("Unexpected image size: %d != %d + %d + %d", usize, layerSize, layerSize, len(manifest)) - } - img.Close() -} - -func TestDuplicateBlob(t *testing.T) { - if os.Geteuid() != 0 { - t.Skip("TestDuplicateBlob requires root privileges") - } - - config := `{"config":{"labels":{}},"created":"2006-01-02T15:04:05Z"}` - sum := ddigest.SHA256.FromBytes([]byte(config)) - configInfo := types.BlobInfo{ - Digest: sum, - Size: int64(len(config)), - } - - newStore(t) - - ref, err := Transport.ParseReference("test") - if err != nil { - t.Fatalf("ParseReference(%q) returned error %v", "test", err) - } - if ref == nil { - t.Fatalf("ParseReference returned nil reference") - } - - dest, err := ref.NewImageDestination(systemContext()) - if err != nil { - t.Fatalf("NewImageDestination(%q) returned error %v", ref.StringWithinTransport(), err) - } - if dest == nil { - t.Fatalf("NewImageDestination(%q) returned no destination", ref.StringWithinTransport()) - } - digest1, _, size1, blob1 := makeLayer(t, archive.Gzip) - if _, err := dest.PutBlob(bytes.NewBuffer(blob1), types.BlobInfo{ - Size: size1, - Digest: digest1, - }); err != nil { - t.Fatalf("Error saving randomly-generated layer 1 to destination (first copy): %v", err) - } - digest2, _, size2, blob2 := makeLayer(t, archive.Gzip) - if _, err := dest.PutBlob(bytes.NewBuffer(blob2), types.BlobInfo{ - Size: size2, - Digest: digest2, - }); err != nil { - t.Fatalf("Error saving randomly-generated layer 2 to destination (first copy): %v", err) - } - if _, err := dest.PutBlob(bytes.NewBuffer(blob1), types.BlobInfo{ - Size: size1, - Digest: digest1, - }); err != nil { - t.Fatalf("Error saving randomly-generated layer 1 to destination (second copy): %v", err) - } - if _, err := dest.PutBlob(bytes.NewBuffer(blob2), types.BlobInfo{ - Size: size2, - Digest: digest2, - }); err != nil { - t.Fatalf("Error saving randomly-generated layer 2 to destination (second copy): %v", err) - } - manifest := fmt.Sprintf(` - { - "schemaVersion": 2, - "mediaType": "application/vnd.docker.distribution.manifest.v2+json", - "config": { - "mediaType": "application/vnd.docker.container.image.v1+json", - "size": %d, - "digest": "%s" - }, - "layers": [ - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "digest": "%s", - "size": %d - }, - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "digest": "%s", - "size": %d - }, - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "digest": "%s", - "size": %d - }, - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "digest": "%s", - "size": %d - } - ] - } - `, configInfo.Size, configInfo.Digest, digest1, size1, digest2, size2, digest1, size1, digest2, size2) - if err := dest.PutManifest([]byte(manifest)); err != nil { - t.Fatalf("Error storing manifest to destination: %v", err) - } - if err := dest.Commit(); err != nil { - t.Fatalf("Error committing changes to destination: %v", err) - } - dest.Close() - - img, err := ref.NewImage(systemContext()) - if err != nil { - t.Fatalf("NewImage(%q) returned error %v", ref.StringWithinTransport(), err) - } - src, err := ref.NewImageSource(systemContext(), nil) - if err != nil { - t.Fatalf("NewImageSource(%q) returned error %v", ref.StringWithinTransport(), err) - } - source, ok := src.(*storageImageSource) - if !ok { - t.Fatalf("ImageSource is not a storage image") - } - layers := []string{} - for _, layerInfo := range img.LayerInfos() { - rc, _, layerID, err := source.getBlobAndLayerID(layerInfo) - if err != nil { - t.Fatalf("getBlobAndLayerID(%q) returned error %v", layerInfo.Digest, err) - } - io.Copy(ioutil.Discard, rc) - rc.Close() - layers = append(layers, layerID) - } - if len(layers) != 4 { - t.Fatalf("Incorrect number of layers: %d", len(layers)) - } - for i, layerID := range layers { - for j, otherID := range layers { - if i != j && layerID == otherID { - t.Fatalf("Layer IDs are not unique: %v", layers) - } - } - } - src.Close() - img.Close() -} diff --git a/vendor/github.com/containers/image/storage/storage_transport.go b/vendor/github.com/containers/image/storage/storage_transport.go deleted file mode 100644 index 1a0ebd040d3d..000000000000 --- a/vendor/github.com/containers/image/storage/storage_transport.go +++ /dev/null @@ -1,355 +0,0 @@ -package storage - -import ( - "path/filepath" - "strings" - - "github.com/pkg/errors" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/transports" - "github.com/containers/image/types" - "github.com/containers/storage" - "github.com/containers/storage/pkg/idtools" - "github.com/opencontainers/go-digest" - ddigest "github.com/opencontainers/go-digest" - "github.com/sirupsen/logrus" -) - -func init() { - transports.Register(Transport) -} - -var ( - // Transport is an ImageTransport that uses either a default - // storage.Store or one that's it's explicitly told to use. - Transport StoreTransport = &storageTransport{} - // ErrInvalidReference is returned when ParseReference() is passed an - // empty reference. - ErrInvalidReference = errors.New("invalid reference") - // ErrPathNotAbsolute is returned when a graph root is not an absolute - // path name. - ErrPathNotAbsolute = errors.New("path name is not absolute") -) - -// StoreTransport is an ImageTransport that uses a storage.Store to parse -// references, either its own default or one that it's told to use. -type StoreTransport interface { - types.ImageTransport - // SetStore sets the default store for this transport. - SetStore(storage.Store) - // GetImage retrieves the image from the transport's store that's named - // by the reference. - GetImage(types.ImageReference) (*storage.Image, error) - // GetStoreImage retrieves the image from a specified store that's named - // by the reference. - GetStoreImage(storage.Store, types.ImageReference) (*storage.Image, error) - // ParseStoreReference parses a reference, overriding any store - // specification that it may contain. - ParseStoreReference(store storage.Store, reference string) (*storageReference, error) - // SetDefaultUIDMap sets the default UID map to use when opening stores. - SetDefaultUIDMap(idmap []idtools.IDMap) - // SetDefaultGIDMap sets the default GID map to use when opening stores. - SetDefaultGIDMap(idmap []idtools.IDMap) - // DefaultUIDMap returns the default UID map used when opening stores. - DefaultUIDMap() []idtools.IDMap - // DefaultGIDMap returns the default GID map used when opening stores. - DefaultGIDMap() []idtools.IDMap -} - -type storageTransport struct { - store storage.Store - defaultUIDMap []idtools.IDMap - defaultGIDMap []idtools.IDMap -} - -func (s *storageTransport) Name() string { - // Still haven't really settled on a name. - return "containers-storage" -} - -// SetStore sets the Store object which the Transport will use for parsing -// references when information about a Store is not directly specified as part -// of the reference. If one is not set, the library will attempt to initialize -// one with default settings when a reference needs to be parsed. Calling -// SetStore does not affect previously parsed references. -func (s *storageTransport) SetStore(store storage.Store) { - s.store = store -} - -// SetDefaultUIDMap sets the default UID map to use when opening stores. -func (s *storageTransport) SetDefaultUIDMap(idmap []idtools.IDMap) { - s.defaultUIDMap = idmap -} - -// SetDefaultGIDMap sets the default GID map to use when opening stores. -func (s *storageTransport) SetDefaultGIDMap(idmap []idtools.IDMap) { - s.defaultGIDMap = idmap -} - -// DefaultUIDMap returns the default UID map used when opening stores. -func (s *storageTransport) DefaultUIDMap() []idtools.IDMap { - return s.defaultUIDMap -} - -// DefaultGIDMap returns the default GID map used when opening stores. -func (s *storageTransport) DefaultGIDMap() []idtools.IDMap { - return s.defaultGIDMap -} - -// ParseStoreReference takes a name or an ID, tries to figure out which it is -// relative to the given store, and returns it in a reference object. -func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (*storageReference, error) { - var name reference.Named - var sum digest.Digest - var err error - if ref == "" { - return nil, ErrInvalidReference - } - if ref[0] == '[' { - // Ignore the store specifier. - closeIndex := strings.IndexRune(ref, ']') - if closeIndex < 1 { - return nil, ErrInvalidReference - } - ref = ref[closeIndex+1:] - } - refInfo := strings.SplitN(ref, "@", 2) - if len(refInfo) == 1 { - // A name. - name, err = reference.ParseNormalizedNamed(refInfo[0]) - if err != nil { - return nil, err - } - } else if len(refInfo) == 2 { - // An ID, possibly preceded by a name. - if refInfo[0] != "" { - name, err = reference.ParseNormalizedNamed(refInfo[0]) - if err != nil { - return nil, err - } - } - sum, err = digest.Parse(refInfo[1]) - if err != nil || sum.Validate() != nil { - sum, err = digest.Parse("sha256:" + refInfo[1]) - if err != nil || sum.Validate() != nil { - return nil, err - } - } - } else { // Coverage: len(refInfo) is always 1 or 2 - // Anything else: store specified in a form we don't - // recognize. - return nil, ErrInvalidReference - } - optionsList := "" - options := store.GraphOptions() - if len(options) > 0 { - optionsList = ":" + strings.Join(options, ",") - } - storeSpec := "[" + store.GraphDriverName() + "@" + store.GraphRoot() + "+" + store.RunRoot() + optionsList + "]" - id := "" - if sum.Validate() == nil { - id = sum.Hex() - } - refname := "" - if name != nil { - name = reference.TagNameOnly(name) - refname = verboseName(name) - } - if refname == "" { - logrus.Debugf("parsed reference into %q", storeSpec+"@"+id) - } else if id == "" { - logrus.Debugf("parsed reference into %q", storeSpec+refname) - } else { - logrus.Debugf("parsed reference into %q", storeSpec+refname+"@"+id) - } - return newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, refname, id, name), nil -} - -func (s *storageTransport) GetStore() (storage.Store, error) { - // Return the transport's previously-set store. If we don't have one - // of those, initialize one now. - if s.store == nil { - options := storage.DefaultStoreOptions - options.UIDMap = s.defaultUIDMap - options.GIDMap = s.defaultGIDMap - store, err := storage.GetStore(options) - if err != nil { - return nil, err - } - s.store = store - } - return s.store, nil -} - -// ParseReference takes a name and/or an ID ("_name_"/"@_id_"/"_name_@_id_"), -// possibly prefixed with a store specifier in the form "[_graphroot_]" or -// "[_driver_@_graphroot_]" or "[_driver_@_graphroot_+_runroot_]" or -// "[_driver_@_graphroot_:_options_]" or "[_driver_@_graphroot_+_runroot_:_options_]", -// tries to figure out which it is, and returns it in a reference object. -func (s *storageTransport) ParseReference(reference string) (types.ImageReference, error) { - var store storage.Store - // Check if there's a store location prefix. If there is, then it - // needs to match a store that was previously initialized using - // storage.GetStore(), or be enough to let the storage library fill out - // the rest using knowledge that it has from elsewhere. - if reference[0] == '[' { - closeIndex := strings.IndexRune(reference, ']') - if closeIndex < 1 { - return nil, ErrInvalidReference - } - storeSpec := reference[1:closeIndex] - reference = reference[closeIndex+1:] - // Peel off a "driver@" from the start. - driverInfo := "" - driverSplit := strings.SplitN(storeSpec, "@", 2) - if len(driverSplit) != 2 { - if storeSpec == "" { - return nil, ErrInvalidReference - } - } else { - driverInfo = driverSplit[0] - if driverInfo == "" { - return nil, ErrInvalidReference - } - storeSpec = driverSplit[1] - if storeSpec == "" { - return nil, ErrInvalidReference - } - } - // Peel off a ":options" from the end. - var options []string - optionsSplit := strings.SplitN(storeSpec, ":", 2) - if len(optionsSplit) == 2 { - options = strings.Split(optionsSplit[1], ",") - storeSpec = optionsSplit[0] - } - // Peel off a "+runroot" from the new end. - runRootInfo := "" - runRootSplit := strings.SplitN(storeSpec, "+", 2) - if len(runRootSplit) == 2 { - runRootInfo = runRootSplit[1] - storeSpec = runRootSplit[0] - } - // The rest is our graph root. - rootInfo := storeSpec - // Check that any paths are absolute paths. - if rootInfo != "" && !filepath.IsAbs(rootInfo) { - return nil, ErrPathNotAbsolute - } - if runRootInfo != "" && !filepath.IsAbs(runRootInfo) { - return nil, ErrPathNotAbsolute - } - store2, err := storage.GetStore(storage.StoreOptions{ - GraphDriverName: driverInfo, - GraphRoot: rootInfo, - RunRoot: runRootInfo, - GraphDriverOptions: options, - UIDMap: s.defaultUIDMap, - GIDMap: s.defaultGIDMap, - }) - if err != nil { - return nil, err - } - store = store2 - } else { - // We didn't have a store spec, so use the default. - store2, err := s.GetStore() - if err != nil { - return nil, err - } - store = store2 - } - return s.ParseStoreReference(store, reference) -} - -func (s storageTransport) GetStoreImage(store storage.Store, ref types.ImageReference) (*storage.Image, error) { - dref := ref.DockerReference() - if dref == nil { - if sref, ok := ref.(*storageReference); ok { - if sref.id != "" { - if img, err := store.Image(sref.id); err == nil { - return img, nil - } - } - } - return nil, ErrInvalidReference - } - return store.Image(verboseName(dref)) -} - -func (s *storageTransport) GetImage(ref types.ImageReference) (*storage.Image, error) { - store, err := s.GetStore() - if err != nil { - return nil, err - } - return s.GetStoreImage(store, ref) -} - -func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error { - // Check that there's a store location prefix. Values we're passed are - // expected to come from PolicyConfigurationIdentity or - // PolicyConfigurationNamespaces, so if there's no store location, - // something's wrong. - if scope[0] != '[' { - return ErrInvalidReference - } - // Parse the store location prefix. - closeIndex := strings.IndexRune(scope, ']') - if closeIndex < 1 { - return ErrInvalidReference - } - storeSpec := scope[1:closeIndex] - scope = scope[closeIndex+1:] - storeInfo := strings.SplitN(storeSpec, "@", 2) - if len(storeInfo) == 1 && storeInfo[0] != "" { - // One component: the graph root. - if !filepath.IsAbs(storeInfo[0]) { - return ErrPathNotAbsolute - } - } else if len(storeInfo) == 2 && storeInfo[0] != "" && storeInfo[1] != "" { - // Two components: the driver type and the graph root. - if !filepath.IsAbs(storeInfo[1]) { - return ErrPathNotAbsolute - } - } else { - // Anything else: scope specified in a form we don't - // recognize. - return ErrInvalidReference - } - // That might be all of it, and that's okay. - if scope == "" { - return nil - } - // But if there is anything left, it has to be a name, with or without - // a tag, with or without an ID, since we don't return namespace values - // that are just bare IDs. - scopeInfo := strings.SplitN(scope, "@", 2) - if len(scopeInfo) == 1 && scopeInfo[0] != "" { - _, err := reference.ParseNormalizedNamed(scopeInfo[0]) - if err != nil { - return err - } - } else if len(scopeInfo) == 2 && scopeInfo[0] != "" && scopeInfo[1] != "" { - _, err := reference.ParseNormalizedNamed(scopeInfo[0]) - if err != nil { - return err - } - _, err = ddigest.Parse("sha256:" + scopeInfo[1]) - if err != nil { - return err - } - } else { - return ErrInvalidReference - } - return nil -} - -func verboseName(name reference.Named) string { - name = reference.TagNameOnly(name) - tag := "" - if tagged, ok := name.(reference.NamedTagged); ok { - tag = ":" + tagged.Tag() - } - return name.Name() + tag -} diff --git a/vendor/github.com/containers/image/storage/storage_transport_test.go b/vendor/github.com/containers/image/storage/storage_transport_test.go deleted file mode 100644 index bcccfcf45d12..000000000000 --- a/vendor/github.com/containers/image/storage/storage_transport_test.go +++ /dev/null @@ -1,149 +0,0 @@ -package storage - -import ( - "fmt" - "testing" - - "github.com/containers/image/docker/reference" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -const ( - sha256digestHex = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" -) - -func TestTransportName(t *testing.T) { - assert.Equal(t, "containers-storage", Transport.Name()) -} - -func TestTransportParseStoreReference(t *testing.T) { - for _, c := range []struct{ input, expectedRef, expectedID string }{ - {"", "", ""}, // Empty input - // Handling of the store prefix - // FIXME? Should we be silently discarding input like this? - {"[unterminated", "", ""}, // Unterminated store specifier - {"[garbage]busybox", "docker.io/library/busybox:latest", ""}, // Store specifier is overridden by the store we pass to ParseStoreReference - - {"UPPERCASEISINVALID", "", ""}, // Invalid single-component name - {"sha256:" + sha256digestHex, "docker.io/library/sha256:" + sha256digestHex, ""}, // Valid single-component name; the hex part is not an ID unless it has a "@" prefix - {sha256digestHex, "", ""}, // Invalid single-component ID; not an ID without a "@" prefix, so it's parsed as a name, but names aren't allowed to look like IDs - {"@" + sha256digestHex, "", sha256digestHex}, // Valid single-component ID - {"sha256:ab", "docker.io/library/sha256:ab", ""}, // Valid single-component name, explicit tag - {"busybox", "docker.io/library/busybox:latest", ""}, // Valid single-component name, implicit tag - {"busybox:notlatest", "docker.io/library/busybox:notlatest", ""}, // Valid single-component name, explicit tag - {"docker.io/library/busybox:notlatest", "docker.io/library/busybox:notlatest", ""}, // Valid single-component name, everything explicit - - {"UPPERCASEISINVALID@" + sha256digestHex, "", ""}, // Invalid name in name@ID - {"busybox@ab", "", ""}, // Invalid ID in name@ID - {"busybox@", "", ""}, // Empty ID in name@ID - {"busybox@sha256:" + sha256digestHex, "docker.io/library/busybox:latest", sha256digestHex}, // Valid two-component name, with ID using "sha256:" prefix - {"@" + sha256digestHex, "", sha256digestHex}, // Valid two-component name, with ID only - {"busybox@" + sha256digestHex, "docker.io/library/busybox:latest", sha256digestHex}, // Valid two-component name, implicit tag - {"busybox:notlatest@" + sha256digestHex, "docker.io/library/busybox:notlatest", sha256digestHex}, // Valid two-component name, explicit tag - {"docker.io/library/busybox:notlatest@" + sha256digestHex, "docker.io/library/busybox:notlatest", sha256digestHex}, // Valid two-component name, everything explicit - } { - storageRef, err := Transport.ParseStoreReference(Transport.(*storageTransport).store, c.input) - if c.expectedRef == "" && c.expectedID == "" { - assert.Error(t, err, c.input) - } else { - require.NoError(t, err, c.input) - assert.Equal(t, *(Transport.(*storageTransport)), storageRef.transport, c.input) - assert.Equal(t, c.expectedRef, storageRef.reference, c.input) - assert.Equal(t, c.expectedID, storageRef.id, c.input) - if c.expectedRef == "" { - assert.Nil(t, storageRef.name, c.input) - } else { - dockerRef, err := reference.ParseNormalizedNamed(c.expectedRef) - require.NoError(t, err) - require.NotNil(t, storageRef.name, c.input) - assert.Equal(t, dockerRef.String(), storageRef.name.String()) - } - } - } -} - -func TestTransportParseReference(t *testing.T) { - store := newStore(t) - driver := store.GraphDriverName() - root := store.GraphRoot() - - for _, c := range []struct{ prefix, expectedDriver, expectedRoot, expectedRunRoot string }{ - {"", driver, root, ""}, // Implicit store location prefix - {"[unterminated", "", "", ""}, // Unterminated store specifier - {"[]", "", "", ""}, // Empty store specifier - {"[relative/path]", "", "", ""}, // Non-absolute graph root path - {"[" + driver + "@relative/path]", "", "", ""}, // Non-absolute graph root path - {"[thisisunknown@" + root + "suffix2]", "", "", ""}, // Unknown graph driver - {"[" + root + "suffix1]", "", root + "suffix1", ""}, // A valid root path, but no run dir - {"[" + driver + "@" + root + "suffix3+" + root + "suffix4]", - driver, - root + "suffix3", - root + "suffix4"}, // A valid root@graph+run set - {"[" + driver + "@" + root + "suffix3+" + root + "suffix4:options,options,options]", - driver, - root + "suffix3", - root + "suffix4"}, // A valid root@graph+run+options set - } { - t.Logf("parsing %q", c.prefix+"busybox") - ref, err := Transport.ParseReference(c.prefix + "busybox") - if c.expectedDriver == "" { - assert.Error(t, err, c.prefix) - } else { - require.NoError(t, err, c.prefix) - storageRef, ok := ref.(*storageReference) - require.True(t, ok, c.prefix) - assert.Equal(t, c.expectedDriver, storageRef.transport.store.GraphDriverName(), c.prefix) - assert.Equal(t, c.expectedRoot, storageRef.transport.store.GraphRoot(), c.prefix) - if c.expectedRunRoot != "" { - assert.Equal(t, c.expectedRunRoot, storageRef.transport.store.RunRoot(), c.prefix) - } - } - } -} - -func TestTransportValidatePolicyConfigurationScope(t *testing.T) { - store := newStore(t) - driver := store.GraphDriverName() - root := store.GraphRoot() - storeSpec := fmt.Sprintf("[%s@%s]", driver, root) // As computed in PolicyConfigurationNamespaces - - // Valid inputs - for _, scope := range []string{ - "[" + root + "suffix1]", // driverlessStoreSpec in PolicyConfigurationNamespaces - "[" + driver + "@" + root + "suffix3]", // storeSpec in PolicyConfigurationNamespaces - storeSpec + "sha256:ab", // Valid single-component name, explicit tag - storeSpec + "sha256:" + sha256digestHex, // Valid single-component ID with a longer explicit tag - storeSpec + "busybox", // Valid single-component name, implicit tag; NOTE that this non-canonical form would be interpreted as a scope for host busybox - storeSpec + "busybox:notlatest", // Valid single-component name, explicit tag; NOTE that this non-canonical form would be interpreted as a scope for host busybox - storeSpec + "docker.io/library/busybox:notlatest", // Valid single-component name, everything explicit - storeSpec + "busybox@" + sha256digestHex, // Valid two-component name, implicit tag; NOTE that this non-canonical form would be interpreted as a scope for host busybox (and never match) - storeSpec + "busybox:notlatest@" + sha256digestHex, // Valid two-component name, explicit tag; NOTE that this non-canonical form would be interpreted as a scope for host busybox (and never match) - storeSpec + "docker.io/library/busybox:notlatest@" + sha256digestHex, // Valid two-component name, everything explicit - } { - err := Transport.ValidatePolicyConfigurationScope(scope) - assert.NoError(t, err, scope) - } - - // Invalid inputs - for _, scope := range []string{ - "busybox", // Unprefixed reference - "[unterminated", // Unterminated store specifier - "[]", // Empty store specifier - "[relative/path]", // Non-absolute graph root path - "[" + driver + "@relative/path]", // Non-absolute graph root path - // "[thisisunknown@" + root + "suffix2]", // Unknown graph driver FIXME: validate against storage.ListGraphDrivers() once that's available - storeSpec + sha256digestHex, // Almost a valid single-component name, but rejected because it looks like an ID that's missing its "@" prefix - storeSpec + "@", // An incomplete two-component name - storeSpec + "@" + sha256digestHex, // A valid two-component name, but ID-only, so not a valid scope - - storeSpec + "UPPERCASEISINVALID", // Invalid single-component name - storeSpec + "UPPERCASEISINVALID@" + sha256digestHex, // Invalid name in name@ID - storeSpec + "busybox@ab", // Invalid ID in name@ID - storeSpec + "busybox@", // Empty ID in name@ID - storeSpec + "busybox@sha256:" + sha256digestHex, // This (in a digested docker/docker reference format) is also invalid; this can't actually be matched by a storageReference.PolicyConfigurationIdentity, so it should be rejected - } { - err := Transport.ValidatePolicyConfigurationScope(scope) - assert.Error(t, err, scope) - } -} diff --git a/vendor/github.com/containers/image/transports/alltransports/alltransports.go b/vendor/github.com/containers/image/transports/alltransports/alltransports.go deleted file mode 100644 index dd80b7f91612..000000000000 --- a/vendor/github.com/containers/image/transports/alltransports/alltransports.go +++ /dev/null @@ -1,33 +0,0 @@ -package alltransports - -import ( - "strings" - - // register all known transports - // NOTE: Make sure docs/policy.json.md is updated when adding or updating - // a transport. - _ "github.com/containers/image/directory" - _ "github.com/containers/image/docker" - _ "github.com/containers/image/docker/archive" - _ "github.com/containers/image/docker/daemon" - _ "github.com/containers/image/oci/layout" - _ "github.com/containers/image/openshift" - // The ostree transport is registered by ostree*.go - _ "github.com/containers/image/storage" - "github.com/containers/image/transports" - "github.com/containers/image/types" - "github.com/pkg/errors" -) - -// ParseImageName converts a URL-like image name to a types.ImageReference. -func ParseImageName(imgName string) (types.ImageReference, error) { - parts := strings.SplitN(imgName, ":", 2) - if len(parts) != 2 { - return nil, errors.Errorf(`Invalid image name "%s", expected colon-separated transport:reference`, imgName) - } - transport := transports.Get(parts[0]) - if transport == nil { - return nil, errors.Errorf(`Invalid image name "%s", unknown transport "%s"`, imgName, parts[0]) - } - return transport.ParseReference(parts[1]) -} diff --git a/vendor/github.com/containers/image/transports/alltransports/alltransports_test.go b/vendor/github.com/containers/image/transports/alltransports/alltransports_test.go deleted file mode 100644 index 2cff97852fb1..000000000000 --- a/vendor/github.com/containers/image/transports/alltransports/alltransports_test.go +++ /dev/null @@ -1,52 +0,0 @@ -package alltransports - -import ( - "testing" - - "github.com/containers/image/transports" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestParseImageName(t *testing.T) { - // This primarily tests error handling, TestImageNameHandling is a table-driven - // test for the expected values. - for _, name := range []string{ - "", // Empty - "busybox", // No transport name - ":busybox", // Empty transport name - "docker:", // Empty transport reference - } { - _, err := ParseImageName(name) - assert.Error(t, err, name) - } -} - -// A table-driven test summarizing the various transports' behavior. -func TestImageNameHandling(t *testing.T) { - // Always registered transports - for _, c := range []struct{ transport, input, roundtrip string }{ - {"dir", "/etc", "/etc"}, - {"docker", "//busybox", "//busybox:latest"}, - {"docker", "//busybox:notlatest", "//busybox:notlatest"}, // This also tests handling of multiple ":" characters - {"docker-daemon", "sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef", "sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"}, - {"docker-daemon", "busybox:latest", "busybox:latest"}, - {"docker-archive", "/var/lib/oci/busybox.tar:busybox:latest", "/var/lib/oci/busybox.tar:docker.io/library/busybox:latest"}, - {"docker-archive", "busybox.tar:busybox:latest", "busybox.tar:docker.io/library/busybox:latest"}, - {"oci", "/etc:sometag", "/etc:sometag"}, - // "atomic" not tested here because it depends on per-user configuration for the default cluster. - // "containers-storage" not tested here because it needs to initialize various directories on the fs. - } { - fullInput := c.transport + ":" + c.input - ref, err := ParseImageName(fullInput) - require.NoError(t, err, fullInput) - s := transports.ImageName(ref) - assert.Equal(t, c.transport+":"+c.roundtrip, s, fullInput) - } - - // Possibly stubbed-out transports: Only verify that something is registered. - for _, c := range []string{"ostree"} { - transport := transports.Get(c) - assert.NotNil(t, transport, c) - } -} diff --git a/vendor/github.com/containers/image/transports/alltransports/ostree.go b/vendor/github.com/containers/image/transports/alltransports/ostree.go deleted file mode 100644 index 0fc5d7ef79f8..000000000000 --- a/vendor/github.com/containers/image/transports/alltransports/ostree.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !containers_image_ostree_stub - -package alltransports - -import ( - // Register the ostree transport - _ "github.com/containers/image/ostree" -) diff --git a/vendor/github.com/containers/image/transports/alltransports/ostree_stub.go b/vendor/github.com/containers/image/transports/alltransports/ostree_stub.go deleted file mode 100644 index 8b01afe7cc06..000000000000 --- a/vendor/github.com/containers/image/transports/alltransports/ostree_stub.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build containers_image_ostree_stub - -package alltransports - -import "github.com/containers/image/transports" - -func init() { - transports.Register(transports.NewStubTransport("ostree")) -} diff --git a/vendor/github.com/containers/image/transports/stub.go b/vendor/github.com/containers/image/transports/stub.go deleted file mode 100644 index 087f69b6eacd..000000000000 --- a/vendor/github.com/containers/image/transports/stub.go +++ /dev/null @@ -1,36 +0,0 @@ -package transports - -import ( - "fmt" - - "github.com/containers/image/types" -) - -// stubTransport is an implementation of types.ImageTransport which has a name, but rejects any references with “the transport $name: is not supported in this build”. -type stubTransport string - -// NewStubTransport returns an implementation of types.ImageTransport which has a name, but rejects any references with “the transport $name: is not supported in this build”. -func NewStubTransport(name string) types.ImageTransport { - return stubTransport(name) -} - -// Name returns the name of the transport, which must be unique among other transports. -func (s stubTransport) Name() string { - return string(s) -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. -func (s stubTransport) ParseReference(reference string) (types.ImageReference, error) { - return nil, fmt.Errorf(`The transport "%s:" is not supported in this build`, string(s)) -} - -// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys -// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). -// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. -// scope passed to this function will not be "", that value is always allowed. -func (s stubTransport) ValidatePolicyConfigurationScope(scope string) error { - // Allowing any reference in here allows tools with some transports stubbed-out to still - // use signature verification policies which refer to these stubbed-out transports. - // See also the treatment of unknown transports in policyTransportScopesWithTransport.UnmarshalJSON . - return nil -} diff --git a/vendor/github.com/containers/image/transports/stub_test.go b/vendor/github.com/containers/image/transports/stub_test.go deleted file mode 100644 index f181a1ace90d..000000000000 --- a/vendor/github.com/containers/image/transports/stub_test.go +++ /dev/null @@ -1,18 +0,0 @@ -package transports - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestStubTransport(t *testing.T) { - const name = "whatever" - - s := NewStubTransport(name) - assert.Equal(t, name, s.Name()) - _, err := s.ParseReference("this is rejected regardless of content") - assert.Error(t, err) - err = s.ValidatePolicyConfigurationScope("this is accepted regardless of content") - assert.NoError(t, err) -} diff --git a/vendor/github.com/containers/image/transports/transports.go b/vendor/github.com/containers/image/transports/transports.go deleted file mode 100644 index 687d0a44e31e..000000000000 --- a/vendor/github.com/containers/image/transports/transports.go +++ /dev/null @@ -1,90 +0,0 @@ -package transports - -import ( - "fmt" - "sort" - "sync" - - "github.com/containers/image/types" -) - -// knownTransports is a registry of known ImageTransport instances. -type knownTransports struct { - transports map[string]types.ImageTransport - mu sync.Mutex -} - -func (kt *knownTransports) Get(k string) types.ImageTransport { - kt.mu.Lock() - t := kt.transports[k] - kt.mu.Unlock() - return t -} - -func (kt *knownTransports) Remove(k string) { - kt.mu.Lock() - delete(kt.transports, k) - kt.mu.Unlock() -} - -func (kt *knownTransports) Add(t types.ImageTransport) { - kt.mu.Lock() - defer kt.mu.Unlock() - name := t.Name() - if t := kt.transports[name]; t != nil { - panic(fmt.Sprintf("Duplicate image transport name %s", name)) - } - kt.transports[name] = t -} - -var kt *knownTransports - -func init() { - kt = &knownTransports{ - transports: make(map[string]types.ImageTransport), - } -} - -// Get returns the transport specified by name or nil when unavailable. -func Get(name string) types.ImageTransport { - return kt.Get(name) -} - -// Delete deletes a transport from the registered transports. -func Delete(name string) { - kt.Remove(name) -} - -// Register registers a transport. -func Register(t types.ImageTransport) { - kt.Add(t) -} - -// ImageName converts a types.ImageReference into an URL-like image name, which MUST be such that -// ParseImageName(ImageName(reference)) returns an equivalent reference. -// -// This is the generally recommended way to refer to images in the UI. -// -// NOTE: The returned string is not promised to be equal to the original input to ParseImageName; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. -func ImageName(ref types.ImageReference) string { - return ref.Transport().Name() + ":" + ref.StringWithinTransport() -} - -// ListNames returns a list of non deprecated transport names. -// Deprecated transports can be used, but are not presented to users. -func ListNames() []string { - kt.mu.Lock() - defer kt.mu.Unlock() - deprecated := map[string]bool{ - "atomic": true, - } - var names []string - for _, transport := range kt.transports { - if !deprecated[transport.Name()] { - names = append(names, transport.Name()) - } - } - sort.Strings(names) - return names -} diff --git a/vendor/github.com/containers/image/types/types.go b/vendor/github.com/containers/image/types/types.go deleted file mode 100644 index 6bcd392f1358..000000000000 --- a/vendor/github.com/containers/image/types/types.go +++ /dev/null @@ -1,333 +0,0 @@ -package types - -import ( - "context" - "io" - "time" - - "github.com/containers/image/docker/reference" - "github.com/opencontainers/go-digest" - "github.com/opencontainers/image-spec/specs-go/v1" -) - -// ImageTransport is a top-level namespace for ways to to store/load an image. -// It should generally correspond to ImageSource/ImageDestination implementations. -// -// Note that ImageTransport is based on "ways the users refer to image storage", not necessarily on the underlying physical transport. -// For example, all Docker References would be used within a single "docker" transport, regardless of whether the images are pulled over HTTP or HTTPS -// (or, even, IPv4 or IPv6). -// -// OTOH all images using the same transport should (apart from versions of the image format), be interoperable. -// For example, several different ImageTransport implementations may be based on local filesystem paths, -// but using completely different formats for the contents of that path (a single tar file, a directory containing tarballs, a fully expanded container filesystem, ...) -// -// See also transports.KnownTransports. -type ImageTransport interface { - // Name returns the name of the transport, which must be unique among other transports. - Name() string - // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. - ParseReference(reference string) (ImageReference, error) - // ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys - // (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). - // It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. - // scope passed to this function will not be "", that value is always allowed. - ValidatePolicyConfigurationScope(scope string) error -} - -// ImageReference is an abstracted way to refer to an image location, namespaced within an ImageTransport. -// -// The object should preferably be immutable after creation, with any parsing/state-dependent resolving happening -// within an ImageTransport.ParseReference() or equivalent API creating the reference object. -// That's also why the various identification/formatting methods of this type do not support returning errors. -// -// WARNING: While this design freezes the content of the reference within this process, it can not freeze the outside -// world: paths may be replaced by symlinks elsewhere, HTTP APIs may start returning different results, and so on. -type ImageReference interface { - Transport() ImageTransport - // StringWithinTransport returns a string representation of the reference, which MUST be such that - // reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. - // NOTE: The returned string is not promised to be equal to the original input to ParseReference; - // e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. - // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix; - // instead, see transports.ImageName(). - StringWithinTransport() string - - // DockerReference returns a Docker reference associated with this reference - // (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, - // not e.g. after redirect or alias processing), or nil if unknown/not applicable. - DockerReference() reference.Named - - // PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. - // This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; - // The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical - // (i.e. various references with exactly the same semantics should return the same configuration identity) - // It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but - // not required/guaranteed that it will be a valid input to Transport().ParseReference(). - // Returns "" if configuration identities for these references are not supported. - PolicyConfigurationIdentity() string - - // PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search - // for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed - // in order, terminating on first match, and an implicit "" is always checked at the end. - // It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), - // and each following element to be a prefix of the element preceding it. - PolicyConfigurationNamespaces() []string - - // NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport. - // The caller must call .Close() on the returned Image. - // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, - // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. - NewImage(ctx *SystemContext) (Image, error) - // NewImageSource returns a types.ImageSource for this reference, - // asking the backend to use a manifest from requestedManifestMIMETypes if possible. - // nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes. - // The caller must call .Close() on the returned ImageSource. - NewImageSource(ctx *SystemContext, requestedManifestMIMETypes []string) (ImageSource, error) - // NewImageDestination returns a types.ImageDestination for this reference. - // The caller must call .Close() on the returned ImageDestination. - NewImageDestination(ctx *SystemContext) (ImageDestination, error) - - // DeleteImage deletes the named image from the registry, if supported. - DeleteImage(ctx *SystemContext) error -} - -// BlobInfo collects known information about a blob (layer/config). -// In some situations, some fields may be unknown, in others they may be mandatory; documenting an “unknown” value here does not override that. -type BlobInfo struct { - Digest digest.Digest // "" if unknown. - Size int64 // -1 if unknown - URLs []string -} - -// ImageSource is a service, possibly remote (= slow), to download components of a single image. -// This is primarily useful for copying images around; for examining their properties, Image (below) -// is usually more useful. -// Each ImageSource should eventually be closed by calling Close(). -// -// WARNING: Various methods which return an object identified by digest generally do not -// validate that the returned data actually matches that digest; this is the caller’s responsibility. -type ImageSource interface { - // Reference returns the reference used to set up this source, _as specified by the user_ - // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. - Reference() ImageReference - // Close removes resources associated with an initialized ImageSource, if any. - Close() error - // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). - // It may use a remote (= slow) service. - GetManifest() ([]byte, string, error) - // GetTargetManifest returns an image's manifest given a digest. This is mainly used to retrieve a single image's manifest - // out of a manifest list. - GetTargetManifest(digest digest.Digest) ([]byte, string, error) - // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). - // The Digest field in BlobInfo is guaranteed to be provided; Size may be -1. - GetBlob(BlobInfo) (io.ReadCloser, int64, error) - // GetSignatures returns the image's signatures. It may use a remote (= slow) service. - GetSignatures(context.Context) ([][]byte, error) -} - -// ImageDestination is a service, possibly remote (= slow), to store components of a single image. -// -// There is a specific required order for some of the calls: -// PutBlob on the various blobs, if any, MUST be called before PutManifest (manifest references blobs, which may be created or compressed only at push time) -// ReapplyBlob, if used, MUST only be called if HasBlob returned true for the same blob digest -// PutSignatures, if called, MUST be called after PutManifest (signatures reference manifest contents) -// Finally, Commit MUST be called if the caller wants the image, as formed by the components saved above, to persist. -// -// Each ImageDestination should eventually be closed by calling Close(). -type ImageDestination interface { - // Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, - // e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. - Reference() ImageReference - // Close removes resources associated with an initialized ImageDestination, if any. - Close() error - - // SupportedManifestMIMETypes tells which manifest mime types the destination supports - // If an empty slice or nil it's returned, then any mime type can be tried to upload - SupportedManifestMIMETypes() []string - // SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. - // Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. - SupportsSignatures() error - // ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination. - ShouldCompressLayers() bool - // AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually - // uploaded to the image destination, true otherwise. - AcceptsForeignLayerURLs() bool - // MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. - MustMatchRuntimeOS() bool - // PutBlob writes contents of stream and returns data representing the result (with all data filled in). - // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. - // inputInfo.Size is the expected length of stream, if known. - // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available - // to any other readers for download using the supplied digest. - // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. - PutBlob(stream io.Reader, inputInfo BlobInfo) (BlobInfo, error) - // HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob. - // Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned. - // If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil); - // it returns a non-nil error only on an unexpected failure. - HasBlob(info BlobInfo) (bool, int64, error) - // ReapplyBlob informs the image destination that a blob for which HasBlob previously returned true would have been passed to PutBlob if it had returned false. Like HasBlob and unlike PutBlob, the digest can not be empty. If the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree. - ReapplyBlob(info BlobInfo) (BlobInfo, error) - // PutManifest writes manifest to the destination. - // FIXME? This should also receive a MIME type if known, to differentiate between schema versions. - // If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), - // but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. - PutManifest(manifest []byte) error - PutSignatures(signatures [][]byte) error - // Commit marks the process of storing the image as successful and asks for the image to be persisted. - // WARNING: This does not have any transactional semantics: - // - Uploaded data MAY be visible to others before Commit() is called - // - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) - Commit() error -} - -// ManifestTypeRejectedError is returned by ImageDestination.PutManifest if the destination is in principle available, -// refuses specifically this manifest type, but may accept a different manifest type. -type ManifestTypeRejectedError struct { // We only use a struct to allow a type assertion, without limiting the contents of the error otherwise. - Err error -} - -func (e ManifestTypeRejectedError) Error() string { - return e.Err.Error() -} - -// UnparsedImage is an Image-to-be; until it is verified and accepted, it only caries its identity and caches manifest and signature blobs. -// Thus, an UnparsedImage can be created from an ImageSource simply by fetching blobs without interpreting them, -// allowing cryptographic signature verification to happen first, before even fetching the manifest, or parsing anything else. -// This also makes the UnparsedImage→Image conversion an explicitly visible step. -// Each UnparsedImage should eventually be closed by calling Close(). -type UnparsedImage interface { - // Reference returns the reference used to set up this source, _as specified by the user_ - // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. - Reference() ImageReference - // Close removes resources associated with an initialized UnparsedImage, if any. - Close() error - // Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. - Manifest() ([]byte, string, error) - // Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. - Signatures(ctx context.Context) ([][]byte, error) -} - -// Image is the primary API for inspecting properties of images. -// Each Image should eventually be closed by calling Close(). -type Image interface { - // Note that Reference may return nil in the return value of UpdatedImage! - UnparsedImage - // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. - // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. - ConfigInfo() BlobInfo - // ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. - // The result is cached; it is OK to call this however often you need. - ConfigBlob() ([]byte, error) - // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about - // layers in the resulting configuration isn't guaranteed to be returned to due how - // old image manifests work (docker v2s1 especially). - OCIConfig() (*v1.Image, error) - // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). - // The Digest field is guaranteed to be provided; Size may be -1. - // WARNING: The list may contain duplicates, and they are semantically relevant. - LayerInfos() []BlobInfo - // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. - // It returns false if the manifest does not embed a Docker reference. - // (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) - EmbeddedDockerReferenceConflicts(ref reference.Named) bool - // Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. - Inspect() (*ImageInspectInfo, error) - // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. - // This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute - // (most importantly it forces us to download the full layers even if they are already present at the destination). - UpdatedImageNeedsLayerDiffIDs(options ManifestUpdateOptions) bool - // UpdatedImage returns a types.Image modified according to options. - // Everything in options.InformationOnly should be provided, other fields should be set only if a modification is desired. - // This does not change the state of the original Image object. - UpdatedImage(options ManifestUpdateOptions) (Image, error) - // IsMultiImage returns true if the image's manifest is a list of images, false otherwise. - IsMultiImage() bool - // Size returns an approximation of the amount of disk space which is consumed by the image in its current - // location. If the size is not known, -1 will be returned. - Size() (int64, error) -} - -// ManifestUpdateOptions is a way to pass named optional arguments to Image.UpdatedManifest -type ManifestUpdateOptions struct { - LayerInfos []BlobInfo // Complete BlobInfos (size+digest+urls) which should replace the originals, in order (the root layer first, and then successive layered layers) - EmbeddedDockerReference reference.Named - ManifestMIMEType string - // The values below are NOT requests to modify the image; they provide optional context which may or may not be used. - InformationOnly ManifestUpdateInformation -} - -// ManifestUpdateInformation is a component of ManifestUpdateOptions, named here -// only to make writing struct literals possible. -type ManifestUpdateInformation struct { - Destination ImageDestination // and yes, UpdatedManifest may write to Destination (see the schema2 → schema1 conversion logic in image/docker_schema2.go) - LayerInfos []BlobInfo // Complete BlobInfos (size+digest) which have been uploaded, in order (the root layer first, and then successive layered layers) - LayerDiffIDs []digest.Digest // Digest values for the _uncompressed_ contents of the blobs which have been uploaded, in the same order. -} - -// ImageInspectInfo is a set of metadata describing Docker images, primarily their manifest and configuration. -// The Tag field is a legacy field which is here just for the Docker v2s1 manifest. It won't be supported -// for other manifest types. -type ImageInspectInfo struct { - Tag string - Created time.Time - DockerVersion string - Labels map[string]string - Architecture string - Os string - Layers []string -} - -// DockerAuthConfig contains authorization information for connecting to a registry. -type DockerAuthConfig struct { - Username string - Password string -} - -// SystemContext allows parametrizing access to implicitly-accessed resources, -// like configuration files in /etc and users' login state in their home directory. -// Various components can share the same field only if their semantics is exactly -// the same; if in doubt, add a new field. -// It is always OK to pass nil instead of a SystemContext. -type SystemContext struct { - // If not "", prefixed to any absolute paths used by default by the library (e.g. in /etc/). - // Not used for any of the more specific path overrides available in this struct. - // Not used for any paths specified by users in config files (even if the location of the config file _was_ affected by it). - // NOTE: If this is set, environment-variable overrides of paths are ignored (to keep the semantics simple: to create an /etc replacement, just set RootForImplicitAbsolutePaths . - // and there is no need to worry about the environment.) - // NOTE: This does NOT affect paths starting by $HOME. - RootForImplicitAbsolutePaths string - - // === Global configuration overrides === - // If not "", overrides the system's default path for signature.Policy configuration. - SignaturePolicyPath string - // If not "", overrides the system's default path for registries.d (Docker signature storage configuration) - RegistriesDirPath string - - // === docker.Transport overrides === - // If not "", a directory containing a CA certificate (ending with ".crt"), - // a client certificate (ending with ".cert") and a client ceritificate key - // (ending with ".key") used when talking to a Docker Registry. - DockerCertPath string - // If not "", overrides the system’s default path for a directory containing host[:port] subdirectories with the same structure as DockerCertPath above. - // Ignored if DockerCertPath is non-empty. - DockerPerHostCertDirPath string - DockerInsecureSkipTLSVerify bool // Allow contacting docker registries over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections. - // if nil, the library tries to parse ~/.docker/config.json to retrieve credentials - DockerAuthConfig *DockerAuthConfig - // if not "", an User-Agent header is added to each request when contacting a registry. - DockerRegistryUserAgent string - // if true, a V1 ping attempt isn't done to give users a better error. Default is false. - // Note that this field is used mainly to integrate containers/image into projectatomic/docker - // in order to not break any existing docker's integration tests. - DockerDisableV1Ping bool - // Directory to use for OSTree temporary files - OSTreeTmpDirPath string -} - -// ProgressProperties is used to pass information from the copy code to a monitor which -// can use the real-time information to produce output or react to changes. -type ProgressProperties struct { - Artifact BlobInfo - Offset uint64 -} diff --git a/vendor/github.com/containers/image/vendor.conf b/vendor/github.com/containers/image/vendor.conf deleted file mode 100644 index d4725e12161a..000000000000 --- a/vendor/github.com/containers/image/vendor.conf +++ /dev/null @@ -1,37 +0,0 @@ -github.com/sirupsen/logrus v1.0.0 -github.com/containers/storage 5d8c2f87387fa5be9fa526ae39fbd79b8bdf27be -github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76 -github.com/docker/distribution df5327f76fb6468b84a87771e361762b8be23fdb -github.com/docker/docker 75843d36aa5c3eaade50da005f9e0ff2602f3d5e -github.com/docker/go-connections 7da10c8c50cad14494ec818dcdfb6506265c0086 -github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52 -github.com/docker/libtrust aabc10ec26b754e797f9028f4589c5b7bd90dc20 -github.com/ghodss/yaml 04f313413ffd65ce25f2541bfd2b2ceec5c0908c -github.com/gorilla/context 08b5f424b9271eedf6f9f0ce86cb9396ed337a42 -github.com/gorilla/mux 94e7d24fd285520f3d12ae998f7fdd6b5393d453 -github.com/imdario/mergo 50d4dbd4eb0e84778abe37cefef140271d96fade -github.com/mattn/go-runewidth 14207d285c6c197daabb5c9793d63e7af9ab2d50 -github.com/mattn/go-shellwords 005a0944d84452842197c2108bd9168ced206f78 -github.com/mistifyio/go-zfs c0224de804d438efd11ea6e52ada8014537d6062 -github.com/mtrmac/gpgme b2432428689ca58c2b8e8dea9449d3295cf96fc9 -github.com/opencontainers/go-digest aa2ec055abd10d26d539eb630a92241b781ce4bc -github.com/opencontainers/image-spec v1.0.0 -github.com/opencontainers/runc 6b1d0e76f239ffb435445e5ae316d2676c07c6e3 -github.com/pborman/uuid 1b00554d822231195d1babd97ff4a781231955c9 -github.com/pkg/errors 248dadf4e9068a0b3e79f02ed0a610d935de5302 -github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2 -github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987 -github.com/vbatts/tar-split bd4c5d64c3e9297f410025a3b1bd0c58f659e721 -golang.org/x/crypto 453249f01cfeb54c3d549ddb75ff152ca243f9d8 -golang.org/x/net 6b27048ae5e6ad1ef927e72e437531493de612fe -golang.org/x/sys 075e574b89e4c2d22f2286a7e2b919519c6f3547 -gopkg.in/cheggaaa/pb.v1 d7e6ca3010b6f084d8056847f55d7f572f180678 -gopkg.in/yaml.v2 a3f3340b5840cee44f372bddb5880fcbc419b46a -k8s.io/client-go bcde30fb7eaed76fd98a36b4120321b94995ffb6 -github.com/xeipuuv/gojsonschema master -github.com/xeipuuv/gojsonreference master -github.com/xeipuuv/gojsonpointer master -github.com/tchap/go-patricia v2.2.6 -github.com/opencontainers/selinux ba1aefe8057f1d0cfb8e88d0ec1dc85925ef987d -github.com/BurntSushi/toml b26d9c308763d68093482582cea63d69be07a0f0 -github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460 diff --git a/vendor/github.com/containers/image/version/version.go b/vendor/github.com/containers/image/version/version.go deleted file mode 100644 index 6644bcff3b2e..000000000000 --- a/vendor/github.com/containers/image/version/version.go +++ /dev/null @@ -1,18 +0,0 @@ -package version - -import "fmt" - -const ( - // VersionMajor is for an API incompatible changes - VersionMajor = 0 - // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 1 - // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 0 - - // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "-dev" -) - -// Version is the specification version that the package types support. -var Version = fmt.Sprintf("%d.%d.%d%s", VersionMajor, VersionMinor, VersionPatch, VersionDev) diff --git a/vendor/github.com/docker/go-metrics/CONTRIBUTING.md b/vendor/github.com/docker/go-metrics/CONTRIBUTING.md deleted file mode 100644 index b8a512c36654..000000000000 --- a/vendor/github.com/docker/go-metrics/CONTRIBUTING.md +++ /dev/null @@ -1,55 +0,0 @@ -# Contributing - -## Sign your work - -The sign-off is a simple line at the end of the explanation for the patch. Your -signature certifies that you wrote the patch or otherwise have the right to pass -it on as an open-source patch. The rules are pretty simple: if you can certify -the below (from [developercertificate.org](http://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -660 York Street, Suite 102, -San Francisco, CA 94110 USA - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` - -Then you just add a line to every git commit message: - - Signed-off-by: Joe Smith - -Use your real name (sorry, no pseudonyms or anonymous contributions.) - -If you set your `user.name` and `user.email` git configs, you can sign your -commit automatically with `git commit -s`. diff --git a/vendor/github.com/docker/go-metrics/LICENSE b/vendor/github.com/docker/go-metrics/LICENSE deleted file mode 100644 index 8f3fee627a45..000000000000 --- a/vendor/github.com/docker/go-metrics/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2013-2016 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/docker/go-metrics/LICENSE.docs b/vendor/github.com/docker/go-metrics/LICENSE.docs deleted file mode 100644 index e26cd4fc8ed9..000000000000 --- a/vendor/github.com/docker/go-metrics/LICENSE.docs +++ /dev/null @@ -1,425 +0,0 @@ -Attribution-ShareAlike 4.0 International - -======================================================================= - -Creative Commons Corporation ("Creative Commons") is not a law firm and -does not provide legal services or legal advice. Distribution of -Creative Commons public licenses does not create a lawyer-client or -other relationship. Creative Commons makes its licenses and related -information available on an "as-is" basis. Creative Commons gives no -warranties regarding its licenses, any material licensed under their -terms and conditions, or any related information. Creative Commons -disclaims all liability for damages resulting from their use to the -fullest extent possible. - -Using Creative Commons Public Licenses - -Creative Commons public licenses provide a standard set of terms and -conditions that creators and other rights holders may use to share -original works of authorship and other material subject to copyright -and certain other rights specified in the public license below. The -following considerations are for informational purposes only, are not -exhaustive, and do not form part of our licenses. - - Considerations for licensors: Our public licenses are - intended for use by those authorized to give the public - permission to use material in ways otherwise restricted by - copyright and certain other rights. Our licenses are - irrevocable. Licensors should read and understand the terms - and conditions of the license they choose before applying it. - Licensors should also secure all rights necessary before - applying our licenses so that the public can reuse the - material as expected. Licensors should clearly mark any - material not subject to the license. This includes other CC- - licensed material, or material used under an exception or - limitation to copyright. More considerations for licensors: - wiki.creativecommons.org/Considerations_for_licensors - - Considerations for the public: By using one of our public - licenses, a licensor grants the public permission to use the - licensed material under specified terms and conditions. If - the licensor's permission is not necessary for any reason--for - example, because of any applicable exception or limitation to - copyright--then that use is not regulated by the license. Our - licenses grant only permissions under copyright and certain - other rights that a licensor has authority to grant. Use of - the licensed material may still be restricted for other - reasons, including because others have copyright or other - rights in the material. A licensor may make special requests, - such as asking that all changes be marked or described. - Although not required by our licenses, you are encouraged to - respect those requests where reasonable. More_considerations - for the public: - wiki.creativecommons.org/Considerations_for_licensees - -======================================================================= - -Creative Commons Attribution-ShareAlike 4.0 International Public -License - -By exercising the Licensed Rights (defined below), You accept and agree -to be bound by the terms and conditions of this Creative Commons -Attribution-ShareAlike 4.0 International Public License ("Public -License"). To the extent this Public License may be interpreted as a -contract, You are granted the Licensed Rights in consideration of Your -acceptance of these terms and conditions, and the Licensor grants You -such rights in consideration of benefits the Licensor receives from -making the Licensed Material available under these terms and -conditions. - - -Section 1 -- Definitions. - - a. Adapted Material means material subject to Copyright and Similar - Rights that is derived from or based upon the Licensed Material - and in which the Licensed Material is translated, altered, - arranged, transformed, or otherwise modified in a manner requiring - permission under the Copyright and Similar Rights held by the - Licensor. For purposes of this Public License, where the Licensed - Material is a musical work, performance, or sound recording, - Adapted Material is always produced where the Licensed Material is - synched in timed relation with a moving image. - - b. Adapter's License means the license You apply to Your Copyright - and Similar Rights in Your contributions to Adapted Material in - accordance with the terms and conditions of this Public License. - - c. BY-SA Compatible License means a license listed at - creativecommons.org/compatiblelicenses, approved by Creative - Commons as essentially the equivalent of this Public License. - - d. Copyright and Similar Rights means copyright and/or similar rights - closely related to copyright including, without limitation, - performance, broadcast, sound recording, and Sui Generis Database - Rights, without regard to how the rights are labeled or - categorized. For purposes of this Public License, the rights - specified in Section 2(b)(1)-(2) are not Copyright and Similar - Rights. - - e. Effective Technological Measures means those measures that, in the - absence of proper authority, may not be circumvented under laws - fulfilling obligations under Article 11 of the WIPO Copyright - Treaty adopted on December 20, 1996, and/or similar international - agreements. - - f. Exceptions and Limitations means fair use, fair dealing, and/or - any other exception or limitation to Copyright and Similar Rights - that applies to Your use of the Licensed Material. - - g. License Elements means the license attributes listed in the name - of a Creative Commons Public License. The License Elements of this - Public License are Attribution and ShareAlike. - - h. Licensed Material means the artistic or literary work, database, - or other material to which the Licensor applied this Public - License. - - i. Licensed Rights means the rights granted to You subject to the - terms and conditions of this Public License, which are limited to - all Copyright and Similar Rights that apply to Your use of the - Licensed Material and that the Licensor has authority to license. - - j. Licensor means the individual(s) or entity(ies) granting rights - under this Public License. - - k. Share means to provide material to the public by any means or - process that requires permission under the Licensed Rights, such - as reproduction, public display, public performance, distribution, - dissemination, communication, or importation, and to make material - available to the public including in ways that members of the - public may access the material from a place and at a time - individually chosen by them. - - l. Sui Generis Database Rights means rights other than copyright - resulting from Directive 96/9/EC of the European Parliament and of - the Council of 11 March 1996 on the legal protection of databases, - as amended and/or succeeded, as well as other essentially - equivalent rights anywhere in the world. - - m. You means the individual or entity exercising the Licensed Rights - under this Public License. Your has a corresponding meaning. - - -Section 2 -- Scope. - - a. License grant. - - 1. Subject to the terms and conditions of this Public License, - the Licensor hereby grants You a worldwide, royalty-free, - non-sublicensable, non-exclusive, irrevocable license to - exercise the Licensed Rights in the Licensed Material to: - - a. reproduce and Share the Licensed Material, in whole or - in part; and - - b. produce, reproduce, and Share Adapted Material. - - 2. Exceptions and Limitations. For the avoidance of doubt, where - Exceptions and Limitations apply to Your use, this Public - License does not apply, and You do not need to comply with - its terms and conditions. - - 3. Term. The term of this Public License is specified in Section - 6(a). - - 4. Media and formats; technical modifications allowed. The - Licensor authorizes You to exercise the Licensed Rights in - all media and formats whether now known or hereafter created, - and to make technical modifications necessary to do so. The - Licensor waives and/or agrees not to assert any right or - authority to forbid You from making technical modifications - necessary to exercise the Licensed Rights, including - technical modifications necessary to circumvent Effective - Technological Measures. For purposes of this Public License, - simply making modifications authorized by this Section 2(a) - (4) never produces Adapted Material. - - 5. Downstream recipients. - - a. Offer from the Licensor -- Licensed Material. Every - recipient of the Licensed Material automatically - receives an offer from the Licensor to exercise the - Licensed Rights under the terms and conditions of this - Public License. - - b. Additional offer from the Licensor -- Adapted Material. - Every recipient of Adapted Material from You - automatically receives an offer from the Licensor to - exercise the Licensed Rights in the Adapted Material - under the conditions of the Adapter's License You apply. - - c. No downstream restrictions. You may not offer or impose - any additional or different terms or conditions on, or - apply any Effective Technological Measures to, the - Licensed Material if doing so restricts exercise of the - Licensed Rights by any recipient of the Licensed - Material. - - 6. No endorsement. Nothing in this Public License constitutes or - may be construed as permission to assert or imply that You - are, or that Your use of the Licensed Material is, connected - with, or sponsored, endorsed, or granted official status by, - the Licensor or others designated to receive attribution as - provided in Section 3(a)(1)(A)(i). - - b. Other rights. - - 1. Moral rights, such as the right of integrity, are not - licensed under this Public License, nor are publicity, - privacy, and/or other similar personality rights; however, to - the extent possible, the Licensor waives and/or agrees not to - assert any such rights held by the Licensor to the limited - extent necessary to allow You to exercise the Licensed - Rights, but not otherwise. - - 2. Patent and trademark rights are not licensed under this - Public License. - - 3. To the extent possible, the Licensor waives any right to - collect royalties from You for the exercise of the Licensed - Rights, whether directly or through a collecting society - under any voluntary or waivable statutory or compulsory - licensing scheme. In all other cases the Licensor expressly - reserves any right to collect such royalties. - - -Section 3 -- License Conditions. - -Your exercise of the Licensed Rights is expressly made subject to the -following conditions. - - a. Attribution. - - 1. If You Share the Licensed Material (including in modified - form), You must: - - a. retain the following if it is supplied by the Licensor - with the Licensed Material: - - i. identification of the creator(s) of the Licensed - Material and any others designated to receive - attribution, in any reasonable manner requested by - the Licensor (including by pseudonym if - designated); - - ii. a copyright notice; - - iii. a notice that refers to this Public License; - - iv. a notice that refers to the disclaimer of - warranties; - - v. a URI or hyperlink to the Licensed Material to the - extent reasonably practicable; - - b. indicate if You modified the Licensed Material and - retain an indication of any previous modifications; and - - c. indicate the Licensed Material is licensed under this - Public License, and include the text of, or the URI or - hyperlink to, this Public License. - - 2. You may satisfy the conditions in Section 3(a)(1) in any - reasonable manner based on the medium, means, and context in - which You Share the Licensed Material. For example, it may be - reasonable to satisfy the conditions by providing a URI or - hyperlink to a resource that includes the required - information. - - 3. If requested by the Licensor, You must remove any of the - information required by Section 3(a)(1)(A) to the extent - reasonably practicable. - - b. ShareAlike. - - In addition to the conditions in Section 3(a), if You Share - Adapted Material You produce, the following conditions also apply. - - 1. The Adapter's License You apply must be a Creative Commons - license with the same License Elements, this version or - later, or a BY-SA Compatible License. - - 2. You must include the text of, or the URI or hyperlink to, the - Adapter's License You apply. You may satisfy this condition - in any reasonable manner based on the medium, means, and - context in which You Share Adapted Material. - - 3. You may not offer or impose any additional or different terms - or conditions on, or apply any Effective Technological - Measures to, Adapted Material that restrict exercise of the - rights granted under the Adapter's License You apply. - - -Section 4 -- Sui Generis Database Rights. - -Where the Licensed Rights include Sui Generis Database Rights that -apply to Your use of the Licensed Material: - - a. for the avoidance of doubt, Section 2(a)(1) grants You the right - to extract, reuse, reproduce, and Share all or a substantial - portion of the contents of the database; - - b. if You include all or a substantial portion of the database - contents in a database in which You have Sui Generis Database - Rights, then the database in which You have Sui Generis Database - Rights (but not its individual contents) is Adapted Material, - - including for purposes of Section 3(b); and - c. You must comply with the conditions in Section 3(a) if You Share - all or a substantial portion of the contents of the database. - -For the avoidance of doubt, this Section 4 supplements and does not -replace Your obligations under this Public License where the Licensed -Rights include other Copyright and Similar Rights. - - -Section 5 -- Disclaimer of Warranties and Limitation of Liability. - - a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE - EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS - AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF - ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, - IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, - WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR - PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, - ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT - KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT - ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. - - b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE - TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, - NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, - INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, - COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR - USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN - ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR - DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR - IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. - - c. The disclaimer of warranties and limitation of liability provided - above shall be interpreted in a manner that, to the extent - possible, most closely approximates an absolute disclaimer and - waiver of all liability. - - -Section 6 -- Term and Termination. - - a. This Public License applies for the term of the Copyright and - Similar Rights licensed here. However, if You fail to comply with - this Public License, then Your rights under this Public License - terminate automatically. - - b. Where Your right to use the Licensed Material has terminated under - Section 6(a), it reinstates: - - 1. automatically as of the date the violation is cured, provided - it is cured within 30 days of Your discovery of the - violation; or - - 2. upon express reinstatement by the Licensor. - - For the avoidance of doubt, this Section 6(b) does not affect any - right the Licensor may have to seek remedies for Your violations - of this Public License. - - c. For the avoidance of doubt, the Licensor may also offer the - Licensed Material under separate terms or conditions or stop - distributing the Licensed Material at any time; however, doing so - will not terminate this Public License. - - d. Sections 1, 5, 6, 7, and 8 survive termination of this Public - License. - - -Section 7 -- Other Terms and Conditions. - - a. The Licensor shall not be bound by any additional or different - terms or conditions communicated by You unless expressly agreed. - - b. Any arrangements, understandings, or agreements regarding the - Licensed Material not stated herein are separate from and - independent of the terms and conditions of this Public License. - - -Section 8 -- Interpretation. - - a. For the avoidance of doubt, this Public License does not, and - shall not be interpreted to, reduce, limit, restrict, or impose - conditions on any use of the Licensed Material that could lawfully - be made without permission under this Public License. - - b. To the extent possible, if any provision of this Public License is - deemed unenforceable, it shall be automatically reformed to the - minimum extent necessary to make it enforceable. If the provision - cannot be reformed, it shall be severed from this Public License - without affecting the enforceability of the remaining terms and - conditions. - - c. No term or condition of this Public License will be waived and no - failure to comply consented to unless expressly agreed to by the - Licensor. - - d. Nothing in this Public License constitutes or may be interpreted - as a limitation upon, or waiver of, any privileges and immunities - that apply to the Licensor or You, including from the legal - processes of any jurisdiction or authority. - - -======================================================================= - -Creative Commons is not a party to its public licenses. -Notwithstanding, Creative Commons may elect to apply one of its public -licenses to material it publishes and in those instances will be -considered the "Licensor." Except for the limited purpose of indicating -that material is shared under a Creative Commons public license or as -otherwise permitted by the Creative Commons policies published at -creativecommons.org/policies, Creative Commons does not authorize the -use of the trademark "Creative Commons" or any other trademark or logo -of Creative Commons without its prior written consent including, -without limitation, in connection with any unauthorized modifications -to any of its public licenses or any other arrangements, -understandings, or agreements concerning use of licensed material. For -the avoidance of doubt, this paragraph does not form part of the public -licenses. - -Creative Commons may be contacted at creativecommons.org. diff --git a/vendor/github.com/docker/go-metrics/NOTICE b/vendor/github.com/docker/go-metrics/NOTICE deleted file mode 100644 index 8915f02773f5..000000000000 --- a/vendor/github.com/docker/go-metrics/NOTICE +++ /dev/null @@ -1,16 +0,0 @@ -Docker -Copyright 2012-2015 Docker, Inc. - -This product includes software developed at Docker, Inc. (https://www.docker.com). - -The following is courtesy of our legal counsel: - - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see https://www.bis.doc.gov - -See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/docker/go-metrics/README.md b/vendor/github.com/docker/go-metrics/README.md deleted file mode 100644 index a9e947cb566b..000000000000 --- a/vendor/github.com/docker/go-metrics/README.md +++ /dev/null @@ -1,91 +0,0 @@ -# go-metrics [![GoDoc](https://godoc.org/github.com/docker/go-metrics?status.svg)](https://godoc.org/github.com/docker/go-metrics) ![Badge Badge](http://doyouevenbadge.com/github.com/docker/go-metrics) - -This package is small wrapper around the prometheus go client to help enforce convention and best practices for metrics collection in Docker projects. - -## Best Practices - -This packages is meant to be used for collecting metrics in Docker projects. -It is not meant to be used as a replacement for the prometheus client but to help enforce consistent naming across metrics collected. -If you have not already read the prometheus best practices around naming and labels you can read the page [here](https://prometheus.io/docs/practices/naming/). - -The following are a few Docker specific rules that will help you name and work with metrics in your project. - -1. Namespace and Subsystem - -This package provides you with a namespace type that allows you to specify the same namespace and subsystem for your metrics. - -```go -ns := metrics.NewNamespace("engine", "daemon", metrics.Labels{ - "version": dockerversion.Version, - "commit": dockerversion.GitCommit, -}) -``` - -In the example above we are creating metrics for the Docker engine's daemon package. -`engine` would be the namespace in this example where `daemon` is the subsystem or package where we are collecting the metrics. - -A namespace also allows you to attach constant labels to the metrics such as the git commit and version that it is collecting. - -2. Declaring your Metrics - -Try to keep all your metric declarations in one file. -This makes it easy for others to see what constant labels are defined on the namespace and what labels are defined on the metrics when they are created. - -3. Use labels instead of multiple metrics - -Labels allow you to define one metric such as the time it takes to perform a certain action on an object. -If we wanted to collect timings on various container actions such as create, start, and delete then we can define one metric called `container_actions` and use labels to specify the type of action. - - -```go -containerActions = ns.NewLabeledTimer("container_actions", "The number of milliseconds it takes to process each container action", "action") -``` - -The last parameter is the label name or key. -When adding a data point to the metric you will use the `WithValues` function to specify the `action` that you are collecting for. - -```go -containerActions.WithValues("create").UpdateSince(start) -``` - -4. Always use a unit - -The metric name should describe what you are measuring but you also need to provide the unit that it is being measured with. -For a timer, the standard unit is seconds and a counter's standard unit is a total. -For gauges you must provide the unit. -This package provides a standard set of units for use within the Docker projects. - -```go -Nanoseconds Unit = "nanoseconds" -Seconds Unit = "seconds" -Bytes Unit = "bytes" -Total Unit = "total" -``` - -If you need to use a unit but it is not defined in the package please open a PR to add it but first try to see if one of the already created units will work for your metric, i.e. seconds or nanoseconds vs adding milliseconds. - -## Docs - -Package documentation can be found [here](https://godoc.org/github.com/docker/go-metrics). - -## HTTP Metrics - -To instrument a http handler, you can wrap the code like this: - -```go -namespace := metrics.NewNamespace("docker_distribution", "http", metrics.Labels{"handler": "your_http_handler_name"}) -httpMetrics := namespace.NewDefaultHttpMetrics() -metrics.Register(namespace) -instrumentedHandler = metrics.InstrumentHandler(httpMetrics, unInstrumentedHandler) -``` -Note: The `handler` label must be provided when a new namespace is created. - -## Additional Metrics - -Additional metrics are also defined here that are not available in the prometheus client. -If you need a custom metrics and it is generic enough to be used by multiple projects, define it here. - - -## Copyright and license - -Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code is released under the Apache 2.0 license. The README.md file, and files in the "docs" folder are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file "LICENSE.docs". You may obtain a duplicate copy of the same license, titled CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/. diff --git a/vendor/github.com/docker/go-metrics/counter.go b/vendor/github.com/docker/go-metrics/counter.go deleted file mode 100644 index fe36316a45c0..000000000000 --- a/vendor/github.com/docker/go-metrics/counter.go +++ /dev/null @@ -1,52 +0,0 @@ -package metrics - -import "github.com/prometheus/client_golang/prometheus" - -// Counter is a metrics that can only increment its current count -type Counter interface { - // Inc adds Sum(vs) to the counter. Sum(vs) must be positive. - // - // If len(vs) == 0, increments the counter by 1. - Inc(vs ...float64) -} - -// LabeledCounter is counter that must have labels populated before use. -type LabeledCounter interface { - WithValues(vs ...string) Counter -} - -type labeledCounter struct { - pc *prometheus.CounterVec -} - -func (lc *labeledCounter) WithValues(vs ...string) Counter { - return &counter{pc: lc.pc.WithLabelValues(vs...)} -} - -func (lc *labeledCounter) Describe(ch chan<- *prometheus.Desc) { - lc.pc.Describe(ch) -} - -func (lc *labeledCounter) Collect(ch chan<- prometheus.Metric) { - lc.pc.Collect(ch) -} - -type counter struct { - pc prometheus.Counter -} - -func (c *counter) Inc(vs ...float64) { - if len(vs) == 0 { - c.pc.Inc() - } - - c.pc.Add(sumFloat64(vs...)) -} - -func (c *counter) Describe(ch chan<- *prometheus.Desc) { - c.pc.Describe(ch) -} - -func (c *counter) Collect(ch chan<- prometheus.Metric) { - c.pc.Collect(ch) -} diff --git a/vendor/github.com/docker/go-metrics/docs.go b/vendor/github.com/docker/go-metrics/docs.go deleted file mode 100644 index 8fbdfc697d5b..000000000000 --- a/vendor/github.com/docker/go-metrics/docs.go +++ /dev/null @@ -1,3 +0,0 @@ -// This package is small wrapper around the prometheus go client to help enforce convention and best practices for metrics collection in Docker projects. - -package metrics diff --git a/vendor/github.com/docker/go-metrics/gauge.go b/vendor/github.com/docker/go-metrics/gauge.go deleted file mode 100644 index 74296e87740b..000000000000 --- a/vendor/github.com/docker/go-metrics/gauge.go +++ /dev/null @@ -1,72 +0,0 @@ -package metrics - -import "github.com/prometheus/client_golang/prometheus" - -// Gauge is a metric that allows incrementing and decrementing a value -type Gauge interface { - Inc(...float64) - Dec(...float64) - - // Add adds the provided value to the gauge's current value - Add(float64) - - // Set replaces the gauge's current value with the provided value - Set(float64) -} - -// LabeledGauge describes a gauge the must have values populated before use. -type LabeledGauge interface { - WithValues(labels ...string) Gauge -} - -type labeledGauge struct { - pg *prometheus.GaugeVec -} - -func (lg *labeledGauge) WithValues(labels ...string) Gauge { - return &gauge{pg: lg.pg.WithLabelValues(labels...)} -} - -func (lg *labeledGauge) Describe(c chan<- *prometheus.Desc) { - lg.pg.Describe(c) -} - -func (lg *labeledGauge) Collect(c chan<- prometheus.Metric) { - lg.pg.Collect(c) -} - -type gauge struct { - pg prometheus.Gauge -} - -func (g *gauge) Inc(vs ...float64) { - if len(vs) == 0 { - g.pg.Inc() - } - - g.Add(sumFloat64(vs...)) -} - -func (g *gauge) Dec(vs ...float64) { - if len(vs) == 0 { - g.pg.Dec() - } - - g.Add(-sumFloat64(vs...)) -} - -func (g *gauge) Add(v float64) { - g.pg.Add(v) -} - -func (g *gauge) Set(v float64) { - g.pg.Set(v) -} - -func (g *gauge) Describe(c chan<- *prometheus.Desc) { - g.pg.Describe(c) -} - -func (g *gauge) Collect(c chan<- prometheus.Metric) { - g.pg.Collect(c) -} diff --git a/vendor/github.com/docker/go-metrics/handler.go b/vendor/github.com/docker/go-metrics/handler.go deleted file mode 100644 index 05601e9ecd28..000000000000 --- a/vendor/github.com/docker/go-metrics/handler.go +++ /dev/null @@ -1,74 +0,0 @@ -package metrics - -import ( - "net/http" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" -) - -// HTTPHandlerOpts describes a set of configurable options of http metrics -type HTTPHandlerOpts struct { - DurationBuckets []float64 - RequestSizeBuckets []float64 - ResponseSizeBuckets []float64 -} - -const ( - InstrumentHandlerResponseSize = iota - InstrumentHandlerRequestSize - InstrumentHandlerDuration - InstrumentHandlerCounter - InstrumentHandlerInFlight -) - -type HTTPMetric struct { - prometheus.Collector - handlerType int -} - -var ( - defaultDurationBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10, 25, 60} - defaultRequestSizeBuckets = prometheus.ExponentialBuckets(1024, 2, 22) //1K to 4G - defaultResponseSizeBuckets = defaultRequestSizeBuckets -) - -// Handler returns the global http.Handler that provides the prometheus -// metrics format on GET requests. This handler is no longer instrumented. -func Handler() http.Handler { - return promhttp.Handler() -} - -func InstrumentHandler(metrics []*HTTPMetric, handler http.Handler) http.HandlerFunc { - return InstrumentHandlerFunc(metrics, handler.ServeHTTP) -} - -func InstrumentHandlerFunc(metrics []*HTTPMetric, handlerFunc http.HandlerFunc) http.HandlerFunc { - var handler http.Handler - handler = http.HandlerFunc(handlerFunc) - for _, metric := range metrics { - switch metric.handlerType { - case InstrumentHandlerResponseSize: - if collector, ok := metric.Collector.(prometheus.ObserverVec); ok { - handler = promhttp.InstrumentHandlerResponseSize(collector, handler) - } - case InstrumentHandlerRequestSize: - if collector, ok := metric.Collector.(prometheus.ObserverVec); ok { - handler = promhttp.InstrumentHandlerRequestSize(collector, handler) - } - case InstrumentHandlerDuration: - if collector, ok := metric.Collector.(prometheus.ObserverVec); ok { - handler = promhttp.InstrumentHandlerDuration(collector, handler) - } - case InstrumentHandlerCounter: - if collector, ok := metric.Collector.(*prometheus.CounterVec); ok { - handler = promhttp.InstrumentHandlerCounter(collector, handler) - } - case InstrumentHandlerInFlight: - if collector, ok := metric.Collector.(prometheus.Gauge); ok { - handler = promhttp.InstrumentHandlerInFlight(collector, handler) - } - } - } - return handler.ServeHTTP -} diff --git a/vendor/github.com/docker/go-metrics/helpers.go b/vendor/github.com/docker/go-metrics/helpers.go deleted file mode 100644 index 68b7f51b3383..000000000000 --- a/vendor/github.com/docker/go-metrics/helpers.go +++ /dev/null @@ -1,10 +0,0 @@ -package metrics - -func sumFloat64(vs ...float64) float64 { - var sum float64 - for _, v := range vs { - sum += v - } - - return sum -} diff --git a/vendor/github.com/docker/go-metrics/namespace.go b/vendor/github.com/docker/go-metrics/namespace.go deleted file mode 100644 index 798315451a7d..000000000000 --- a/vendor/github.com/docker/go-metrics/namespace.go +++ /dev/null @@ -1,315 +0,0 @@ -package metrics - -import ( - "fmt" - "sync" - - "github.com/prometheus/client_golang/prometheus" -) - -type Labels map[string]string - -// NewNamespace returns a namespaces that is responsible for managing a collection of -// metrics for a particual namespace and subsystem -// -// labels allows const labels to be added to all metrics created in this namespace -// and are commonly used for data like application version and git commit -func NewNamespace(name, subsystem string, labels Labels) *Namespace { - if labels == nil { - labels = make(map[string]string) - } - return &Namespace{ - name: name, - subsystem: subsystem, - labels: labels, - } -} - -// Namespace describes a set of metrics that share a namespace and subsystem. -type Namespace struct { - name string - subsystem string - labels Labels - mu sync.Mutex - metrics []prometheus.Collector -} - -// WithConstLabels returns a namespace with the provided set of labels merged -// with the existing constant labels on the namespace. -// -// Only metrics created with the returned namespace will get the new constant -// labels. The returned namespace must be registered separately. -func (n *Namespace) WithConstLabels(labels Labels) *Namespace { - n.mu.Lock() - ns := &Namespace{ - name: n.name, - subsystem: n.subsystem, - labels: mergeLabels(n.labels, labels), - } - n.mu.Unlock() - return ns -} - -func (n *Namespace) NewCounter(name, help string) Counter { - c := &counter{pc: prometheus.NewCounter(n.newCounterOpts(name, help))} - n.Add(c) - return c -} - -func (n *Namespace) NewLabeledCounter(name, help string, labels ...string) LabeledCounter { - c := &labeledCounter{pc: prometheus.NewCounterVec(n.newCounterOpts(name, help), labels)} - n.Add(c) - return c -} - -func (n *Namespace) newCounterOpts(name, help string) prometheus.CounterOpts { - return prometheus.CounterOpts{ - Namespace: n.name, - Subsystem: n.subsystem, - Name: makeName(name, Total), - Help: help, - ConstLabels: prometheus.Labels(n.labels), - } -} - -func (n *Namespace) NewTimer(name, help string) Timer { - t := &timer{ - m: prometheus.NewHistogram(n.newTimerOpts(name, help)), - } - n.Add(t) - return t -} - -func (n *Namespace) NewLabeledTimer(name, help string, labels ...string) LabeledTimer { - t := &labeledTimer{ - m: prometheus.NewHistogramVec(n.newTimerOpts(name, help), labels), - } - n.Add(t) - return t -} - -func (n *Namespace) newTimerOpts(name, help string) prometheus.HistogramOpts { - return prometheus.HistogramOpts{ - Namespace: n.name, - Subsystem: n.subsystem, - Name: makeName(name, Seconds), - Help: help, - ConstLabels: prometheus.Labels(n.labels), - } -} - -func (n *Namespace) NewGauge(name, help string, unit Unit) Gauge { - g := &gauge{ - pg: prometheus.NewGauge(n.newGaugeOpts(name, help, unit)), - } - n.Add(g) - return g -} - -func (n *Namespace) NewLabeledGauge(name, help string, unit Unit, labels ...string) LabeledGauge { - g := &labeledGauge{ - pg: prometheus.NewGaugeVec(n.newGaugeOpts(name, help, unit), labels), - } - n.Add(g) - return g -} - -func (n *Namespace) newGaugeOpts(name, help string, unit Unit) prometheus.GaugeOpts { - return prometheus.GaugeOpts{ - Namespace: n.name, - Subsystem: n.subsystem, - Name: makeName(name, unit), - Help: help, - ConstLabels: prometheus.Labels(n.labels), - } -} - -func (n *Namespace) Describe(ch chan<- *prometheus.Desc) { - n.mu.Lock() - defer n.mu.Unlock() - - for _, metric := range n.metrics { - metric.Describe(ch) - } -} - -func (n *Namespace) Collect(ch chan<- prometheus.Metric) { - n.mu.Lock() - defer n.mu.Unlock() - - for _, metric := range n.metrics { - metric.Collect(ch) - } -} - -func (n *Namespace) Add(collector prometheus.Collector) { - n.mu.Lock() - n.metrics = append(n.metrics, collector) - n.mu.Unlock() -} - -func (n *Namespace) NewDesc(name, help string, unit Unit, labels ...string) *prometheus.Desc { - name = makeName(name, unit) - namespace := n.name - if n.subsystem != "" { - namespace = fmt.Sprintf("%s_%s", namespace, n.subsystem) - } - name = fmt.Sprintf("%s_%s", namespace, name) - return prometheus.NewDesc(name, help, labels, prometheus.Labels(n.labels)) -} - -// mergeLabels merges two or more labels objects into a single map, favoring -// the later labels. -func mergeLabels(lbs ...Labels) Labels { - merged := make(Labels) - - for _, target := range lbs { - for k, v := range target { - merged[k] = v - } - } - - return merged -} - -func makeName(name string, unit Unit) string { - if unit == "" { - return name - } - - return fmt.Sprintf("%s_%s", name, unit) -} - -func (n *Namespace) NewDefaultHttpMetrics(handlerName string) []*HTTPMetric { - return n.NewHttpMetricsWithOpts(handlerName, HTTPHandlerOpts{ - DurationBuckets: defaultDurationBuckets, - RequestSizeBuckets: defaultResponseSizeBuckets, - ResponseSizeBuckets: defaultResponseSizeBuckets, - }) -} - -func (n *Namespace) NewHttpMetrics(handlerName string, durationBuckets, requestSizeBuckets, responseSizeBuckets []float64) []*HTTPMetric { - return n.NewHttpMetricsWithOpts(handlerName, HTTPHandlerOpts{ - DurationBuckets: durationBuckets, - RequestSizeBuckets: requestSizeBuckets, - ResponseSizeBuckets: responseSizeBuckets, - }) -} - -func (n *Namespace) NewHttpMetricsWithOpts(handlerName string, opts HTTPHandlerOpts) []*HTTPMetric { - var httpMetrics []*HTTPMetric - inFlightMetric := n.NewInFlightGaugeMetric(handlerName) - requestTotalMetric := n.NewRequestTotalMetric(handlerName) - requestDurationMetric := n.NewRequestDurationMetric(handlerName, opts.DurationBuckets) - requestSizeMetric := n.NewRequestSizeMetric(handlerName, opts.RequestSizeBuckets) - responseSizeMetric := n.NewResponseSizeMetric(handlerName, opts.ResponseSizeBuckets) - httpMetrics = append(httpMetrics, inFlightMetric, requestDurationMetric, requestTotalMetric, requestSizeMetric, responseSizeMetric) - return httpMetrics -} - -func (n *Namespace) NewInFlightGaugeMetric(handlerName string) *HTTPMetric { - labels := prometheus.Labels(n.labels) - labels["handler"] = handlerName - metric := prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: n.name, - Subsystem: n.subsystem, - Name: "in_flight_requests", - Help: "The in-flight HTTP requests", - ConstLabels: prometheus.Labels(labels), - }) - httpMetric := &HTTPMetric{ - Collector: metric, - handlerType: InstrumentHandlerInFlight, - } - n.Add(httpMetric) - return httpMetric -} - -func (n *Namespace) NewRequestTotalMetric(handlerName string) *HTTPMetric { - labels := prometheus.Labels(n.labels) - labels["handler"] = handlerName - metric := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: n.name, - Subsystem: n.subsystem, - Name: "requests_total", - Help: "Total number of HTTP requests made.", - ConstLabels: prometheus.Labels(labels), - }, - []string{"code", "method"}, - ) - httpMetric := &HTTPMetric{ - Collector: metric, - handlerType: InstrumentHandlerCounter, - } - n.Add(httpMetric) - return httpMetric -} -func (n *Namespace) NewRequestDurationMetric(handlerName string, buckets []float64) *HTTPMetric { - if len(buckets) == 0 { - panic("DurationBuckets must be provided") - } - labels := prometheus.Labels(n.labels) - labels["handler"] = handlerName - opts := prometheus.HistogramOpts{ - Namespace: n.name, - Subsystem: n.subsystem, - Name: "request_duration_seconds", - Help: "The HTTP request latencies in seconds.", - Buckets: buckets, - ConstLabels: prometheus.Labels(labels), - } - metric := prometheus.NewHistogramVec(opts, []string{"method"}) - httpMetric := &HTTPMetric{ - Collector: metric, - handlerType: InstrumentHandlerDuration, - } - n.Add(httpMetric) - return httpMetric -} - -func (n *Namespace) NewRequestSizeMetric(handlerName string, buckets []float64) *HTTPMetric { - if len(buckets) == 0 { - panic("RequestSizeBuckets must be provided") - } - labels := prometheus.Labels(n.labels) - labels["handler"] = handlerName - opts := prometheus.HistogramOpts{ - Namespace: n.name, - Subsystem: n.subsystem, - Name: "request_size_bytes", - Help: "The HTTP request sizes in bytes.", - Buckets: buckets, - ConstLabels: prometheus.Labels(labels), - } - metric := prometheus.NewHistogramVec(opts, []string{}) - httpMetric := &HTTPMetric{ - Collector: metric, - handlerType: InstrumentHandlerRequestSize, - } - n.Add(httpMetric) - return httpMetric -} - -func (n *Namespace) NewResponseSizeMetric(handlerName string, buckets []float64) *HTTPMetric { - if len(buckets) == 0 { - panic("ResponseSizeBuckets must be provided") - } - labels := prometheus.Labels(n.labels) - labels["handler"] = handlerName - opts := prometheus.HistogramOpts{ - Namespace: n.name, - Subsystem: n.subsystem, - Name: "response_size_bytes", - Help: "The HTTP response sizes in bytes.", - Buckets: buckets, - ConstLabels: prometheus.Labels(labels), - } - metrics := prometheus.NewHistogramVec(opts, []string{}) - httpMetric := &HTTPMetric{ - Collector: metrics, - handlerType: InstrumentHandlerResponseSize, - } - n.Add(httpMetric) - return httpMetric -} diff --git a/vendor/github.com/docker/go-metrics/register.go b/vendor/github.com/docker/go-metrics/register.go deleted file mode 100644 index 708358df01d6..000000000000 --- a/vendor/github.com/docker/go-metrics/register.go +++ /dev/null @@ -1,15 +0,0 @@ -package metrics - -import "github.com/prometheus/client_golang/prometheus" - -// Register adds all the metrics in the provided namespace to the global -// metrics registry -func Register(n *Namespace) { - prometheus.MustRegister(n) -} - -// Deregister removes all the metrics in the provided namespace from the -// global metrics registry -func Deregister(n *Namespace) { - prometheus.Unregister(n) -} diff --git a/vendor/github.com/docker/go-metrics/timer.go b/vendor/github.com/docker/go-metrics/timer.go deleted file mode 100644 index 824c98739cf5..000000000000 --- a/vendor/github.com/docker/go-metrics/timer.go +++ /dev/null @@ -1,85 +0,0 @@ -package metrics - -import ( - "time" - - "github.com/prometheus/client_golang/prometheus" -) - -// StartTimer begins a timer observation at the callsite. When the target -// operation is completed, the caller should call the return done func(). -func StartTimer(timer Timer) (done func()) { - start := time.Now() - return func() { - timer.Update(time.Since(start)) - } -} - -// Timer is a metric that allows collecting the duration of an action in seconds -type Timer interface { - // Update records an observation, duration, and converts to the target - // units. - Update(duration time.Duration) - - // UpdateSince will add the duration from the provided starting time to the - // timer's summary with the precisions that was used in creation of the timer - UpdateSince(time.Time) -} - -// LabeledTimer is a timer that must have label values populated before use. -type LabeledTimer interface { - WithValues(labels ...string) *labeledTimerObserver -} - -type labeledTimer struct { - m *prometheus.HistogramVec -} - -type labeledTimerObserver struct { - m prometheus.Observer -} - -func (lbo *labeledTimerObserver) Update(duration time.Duration) { - lbo.m.Observe(duration.Seconds()) -} - -func (lbo *labeledTimerObserver) UpdateSince(since time.Time) { - lbo.m.Observe(time.Since(since).Seconds()) -} - -func (lt *labeledTimer) WithValues(labels ...string) *labeledTimerObserver { - return &labeledTimerObserver{m: lt.m.WithLabelValues(labels...)} -} - -func (lt *labeledTimer) Describe(c chan<- *prometheus.Desc) { - lt.m.Describe(c) -} - -func (lt *labeledTimer) Collect(c chan<- prometheus.Metric) { - lt.m.Collect(c) -} - -type timer struct { - m prometheus.Observer -} - -func (t *timer) Update(duration time.Duration) { - t.m.Observe(duration.Seconds()) -} - -func (t *timer) UpdateSince(since time.Time) { - t.m.Observe(time.Since(since).Seconds()) -} - -func (t *timer) Describe(c chan<- *prometheus.Desc) { - c <- t.m.(prometheus.Metric).Desc() -} - -func (t *timer) Collect(c chan<- prometheus.Metric) { - // Are there any observers that don't implement Collector? It is really - // unclear what the point of the upstream change was, but we'll let this - // panic if we get an observer that doesn't implement collector. In this - // case, we should almost always see metricVec objects, so this should - // never panic. - t.m.(prometheus.Collector).Collect(c) -} diff --git a/vendor/github.com/docker/go-metrics/unit.go b/vendor/github.com/docker/go-metrics/unit.go deleted file mode 100644 index c96622f9031d..000000000000 --- a/vendor/github.com/docker/go-metrics/unit.go +++ /dev/null @@ -1,12 +0,0 @@ -package metrics - -// Unit represents the type or precision of a metric that is appended to -// the metrics fully qualified name -type Unit string - -const ( - Nanoseconds Unit = "nanoseconds" - Seconds Unit = "seconds" - Bytes Unit = "bytes" - Total Unit = "total" -) diff --git a/vendor/github.com/docker/libtrust/README.md b/vendor/github.com/docker/libtrust/README.md index dcffb31ae4a7..8e7db38186e6 100644 --- a/vendor/github.com/docker/libtrust/README.md +++ b/vendor/github.com/docker/libtrust/README.md @@ -1,9 +1,5 @@ # libtrust -> **WARNING** this library is no longer actively developed, and will be integrated -> in the [docker/distribution][https://www.github.com/docker/distribution] -> repository in future. - Libtrust is library for managing authentication and authorization using public key cryptography. Authentication is handled using the identity attached to the public key. diff --git a/vendor/github.com/docker/libtrust/util.go b/vendor/github.com/docker/libtrust/util.go index a5a101d3f117..d88176cc3d59 100644 --- a/vendor/github.com/docker/libtrust/util.go +++ b/vendor/github.com/docker/libtrust/util.go @@ -152,7 +152,7 @@ func NewIdentityAuthTLSClientConfig(dockerUrl string, trustUnknownHosts bool, ro } // joseBase64UrlEncode encodes the given data using the standard base64 url -// encoding format but with all trailing '=' characters omitted in accordance +// encoding format but with all trailing '=' characters ommitted in accordance // with the jose specification. // http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 func joseBase64UrlEncode(b []byte) string { diff --git a/vendor/github.com/ghodss/yaml/.travis.yml b/vendor/github.com/ghodss/yaml/.travis.yml index 0e9d6edc010a..930860e0a808 100644 --- a/vendor/github.com/ghodss/yaml/.travis.yml +++ b/vendor/github.com/ghodss/yaml/.travis.yml @@ -1,7 +1,8 @@ language: go go: - - 1.3 - - 1.4 + - "1.3" + - "1.4" + - "1.10" script: - go test - go build diff --git a/vendor/github.com/ghodss/yaml/README.md b/vendor/github.com/ghodss/yaml/README.md index f8f7e369549c..0200f75b4d12 100644 --- a/vendor/github.com/ghodss/yaml/README.md +++ b/vendor/github.com/ghodss/yaml/README.md @@ -4,13 +4,13 @@ ## Introduction -A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs. +A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs. In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/). ## Compatibility -This package uses [go-yaml v2](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility). +This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility). ## Caveats @@ -44,6 +44,8 @@ import "github.com/ghodss/yaml" Usage is very similar to the JSON library: ```go +package main + import ( "fmt" @@ -51,8 +53,8 @@ import ( ) type Person struct { - Name string `json:"name"` // Affects YAML field names too. - Age int `json:"name"` + Name string `json:"name"` // Affects YAML field names too. + Age int `json:"age"` } func main() { @@ -65,13 +67,13 @@ func main() { } fmt.Println(string(y)) /* Output: - name: John age: 30 + name: John */ // Unmarshal the YAML back into a Person struct. var p2 Person - err := yaml.Unmarshal(y, &p2) + err = yaml.Unmarshal(y, &p2) if err != nil { fmt.Printf("err: %v\n", err) return @@ -86,11 +88,14 @@ func main() { `yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available: ```go +package main + import ( "fmt" "github.com/ghodss/yaml" ) + func main() { j := []byte(`{"name": "John", "age": 30}`) y, err := yaml.JSONToYAML(j) diff --git a/vendor/github.com/ghodss/yaml/fields.go b/vendor/github.com/ghodss/yaml/fields.go index 0bd3c2b46267..58600740266c 100644 --- a/vendor/github.com/ghodss/yaml/fields.go +++ b/vendor/github.com/ghodss/yaml/fields.go @@ -45,7 +45,11 @@ func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.Te break } if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) + if v.CanSet() { + v.Set(reflect.New(v.Type().Elem())) + } else { + v = reflect.New(v.Type().Elem()) + } } if v.Type().NumMethod() > 0 { if u, ok := v.Interface().(json.Unmarshaler); ok { diff --git a/vendor/github.com/ghodss/yaml/yaml.go b/vendor/github.com/ghodss/yaml/yaml.go index c02beacb9a41..6e7f14fc7fb7 100644 --- a/vendor/github.com/ghodss/yaml/yaml.go +++ b/vendor/github.com/ghodss/yaml/yaml.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/json" "fmt" + "io" "reflect" "strconv" @@ -15,26 +16,30 @@ import ( func Marshal(o interface{}) ([]byte, error) { j, err := json.Marshal(o) if err != nil { - return nil, fmt.Errorf("error marshaling into JSON: ", err) + return nil, fmt.Errorf("error marshaling into JSON: %v", err) } y, err := JSONToYAML(j) if err != nil { - return nil, fmt.Errorf("error converting JSON to YAML: ", err) + return nil, fmt.Errorf("error converting JSON to YAML: %v", err) } return y, nil } -// Converts YAML to JSON then uses JSON to unmarshal into an object. -func Unmarshal(y []byte, o interface{}) error { +// JSONOpt is a decoding option for decoding from JSON format. +type JSONOpt func(*json.Decoder) *json.Decoder + +// Unmarshal converts YAML to JSON then uses JSON to unmarshal into an object, +// optionally configuring the behavior of the JSON unmarshal. +func Unmarshal(y []byte, o interface{}, opts ...JSONOpt) error { vo := reflect.ValueOf(o) - j, err := yamlToJSON(y, &vo) + j, err := yamlToJSON(y, &vo, yaml.Unmarshal) if err != nil { return fmt.Errorf("error converting YAML to JSON: %v", err) } - err = json.Unmarshal(j, o) + err = jsonUnmarshal(bytes.NewReader(j), o, opts...) if err != nil { return fmt.Errorf("error unmarshaling JSON: %v", err) } @@ -42,13 +47,28 @@ func Unmarshal(y []byte, o interface{}) error { return nil } +// jsonUnmarshal unmarshals the JSON byte stream from the given reader into the +// object, optionally applying decoder options prior to decoding. We are not +// using json.Unmarshal directly as we want the chance to pass in non-default +// options. +func jsonUnmarshal(r io.Reader, o interface{}, opts ...JSONOpt) error { + d := json.NewDecoder(r) + for _, opt := range opts { + d = opt(d) + } + if err := d.Decode(&o); err != nil { + return fmt.Errorf("while decoding JSON: %v", err) + } + return nil +} + // Convert JSON to YAML. func JSONToYAML(j []byte) ([]byte, error) { // Convert the JSON to an object. var jsonObj interface{} // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the // Go JSON library doesn't try to pick the right number type (int, float, - // etc.) when unmarshling to interface{}, it just picks float64 + // etc.) when unmarshalling to interface{}, it just picks float64 // universally. go-yaml does go through the effort of picking the right // number type, so we can preserve number type throughout this process. err := yaml.Unmarshal(j, &jsonObj) @@ -60,8 +80,8 @@ func JSONToYAML(j []byte) ([]byte, error) { return yaml.Marshal(jsonObj) } -// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through -// this method should be a no-op. +// YAMLToJSON converts YAML to JSON. Since JSON is a subset of YAML, +// passing JSON through this method should be a no-op. // // Things YAML can do that are not supported by JSON: // * In YAML you can have binary and null keys in your maps. These are invalid @@ -70,14 +90,22 @@ func JSONToYAML(j []byte) ([]byte, error) { // use binary data with this library, encode the data as base64 as usual but do // not use the !!binary tag in your YAML. This will ensure the original base64 // encoded data makes it all the way through to the JSON. +// +// For strict decoding of YAML, use YAMLToJSONStrict. func YAMLToJSON(y []byte) ([]byte, error) { - return yamlToJSON(y, nil) + return yamlToJSON(y, nil, yaml.Unmarshal) +} + +// YAMLToJSONStrict is like YAMLToJSON but enables strict YAML decoding, +// returning an error on any duplicate field names. +func YAMLToJSONStrict(y []byte) ([]byte, error) { + return yamlToJSON(y, nil, yaml.UnmarshalStrict) } -func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) { +func yamlToJSON(y []byte, jsonTarget *reflect.Value, yamlUnmarshal func([]byte, interface{}) error) ([]byte, error) { // Convert the YAML to an object. var yamlObj interface{} - err := yaml.Unmarshal(y, &yamlObj) + err := yamlUnmarshal(y, &yamlObj) if err != nil { return nil, err } diff --git a/vendor/github.com/ghodss/yaml/yaml_go110.go b/vendor/github.com/ghodss/yaml/yaml_go110.go new file mode 100644 index 000000000000..ab3e06a222a6 --- /dev/null +++ b/vendor/github.com/ghodss/yaml/yaml_go110.go @@ -0,0 +1,14 @@ +// This file contains changes that are only compatible with go 1.10 and onwards. + +// +build go1.10 + +package yaml + +import "encoding/json" + +// DisallowUnknownFields configures the JSON decoder to error out if unknown +// fields come along, instead of dropping them by default. +func DisallowUnknownFields(d *json.Decoder) *json.Decoder { + d.DisallowUnknownFields() + return d +} diff --git a/vendor/github.com/ghodss/yaml/yaml_go110_test.go b/vendor/github.com/ghodss/yaml/yaml_go110_test.go new file mode 100644 index 000000000000..b7767b7c4ff6 --- /dev/null +++ b/vendor/github.com/ghodss/yaml/yaml_go110_test.go @@ -0,0 +1,46 @@ +// +build go1.10 + +package yaml + +import ( + "fmt" + "testing" +) + +func TestUnmarshalWithTags(t *testing.T) { + type WithTaggedField struct { + Field string `json:"field"` + } + + t.Run("Known tagged field", func(t *testing.T) { + y := []byte(`field: "hello"`) + v := WithTaggedField{} + if err := Unmarshal(y, &v, DisallowUnknownFields); err != nil { + t.Errorf("unexpected error: %v", err) + } + if v.Field != "hello" { + t.Errorf("v.Field=%v, want 'hello'", v.Field) + } + + }) + t.Run("With unknown tagged field", func(t *testing.T) { + y := []byte(`unknown: "hello"`) + v := WithTaggedField{} + err := Unmarshal(y, &v, DisallowUnknownFields) + if err == nil { + t.Errorf("want error because of unknown field, got : v=%#v", v) + } + }) + +} + +func ExampleUnknown() { + type WithTaggedField struct { + Field string `json:"field"` + } + y := []byte(`unknown: "hello"`) + v := WithTaggedField{} + fmt.Printf("%v\n", Unmarshal(y, &v, DisallowUnknownFields)) + // Ouptut: + // unmarshaling JSON: while decoding JSON: json: unknown field "unknown" +} diff --git a/vendor/github.com/ghodss/yaml/yaml_test.go b/vendor/github.com/ghodss/yaml/yaml_test.go index 0ae0954e9013..9250cf242aa5 100644 --- a/vendor/github.com/ghodss/yaml/yaml_test.go +++ b/vendor/github.com/ghodss/yaml/yaml_test.go @@ -88,10 +88,26 @@ func TestUnmarshal(t *testing.T) { s4 := UnmarshalStringMap{} e4 := UnmarshalStringMap{map[string]string{"b": "1"}} unmarshal(t, y, &s4, &e4) + + y = []byte(` +a: + name: TestA +b: + name: TestB +`) + type NamedThing struct { + Name string `json:"name"` + } + s5 := map[string]*NamedThing{} + e5 := map[string]*NamedThing{ + "a": &NamedThing{Name: "TestA"}, + "b": &NamedThing{Name: "TestB"}, + } + unmarshal(t, y, &s5, &e5) } -func unmarshal(t *testing.T, y []byte, s, e interface{}) { - err := Unmarshal(y, s) +func unmarshal(t *testing.T, y []byte, s, e interface{}, opts ...JSONOpt) { + err := Unmarshal(y, s, opts...) if err != nil { t.Errorf("error unmarshaling YAML: %v", err) } @@ -269,3 +285,16 @@ func runCases(t *testing.T, runType RunType, cases []Case) { func strPtr(s string) *string { return &s } + +func TestYAMLToJSONStrict(t *testing.T) { + const data = ` +foo: bar +foo: baz +` + if _, err := YAMLToJSON([]byte(data)); err != nil { + t.Error("expected YAMLtoJSON to pass on duplicate field names") + } + if _, err := YAMLToJSONStrict([]byte(data)); err == nil { + t.Error("expected YAMLtoJSONStrict to fail on duplicate field names") + } +} diff --git a/vendor/github.com/google/btree/btree.go b/vendor/github.com/google/btree/btree.go index 6ff062f9bb4e..fc5aaaa13aa9 100644 --- a/vendor/github.com/google/btree/btree.go +++ b/vendor/github.com/google/btree/btree.go @@ -22,7 +22,7 @@ // See some discussion on the matter here: // http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html // Note, though, that this project is in no way related to the C++ B-Tree -// implementation written about there. +// implmentation written about there. // // Within this tree, each node contains a slice of items and a (possibly nil) // slice of children. For basic numeric values or raw structs, this can cause @@ -44,7 +44,7 @@ // widely used ordered tree implementation in the Go ecosystem currently. // Its functions, therefore, exactly mirror those of // llrb.LLRB where possible. Unlike gollrb, though, we currently don't -// support storing multiple equivalent values. +// support storing multiple equivalent values or backwards iteration. package btree import ( @@ -52,7 +52,6 @@ import ( "io" "sort" "strings" - "sync" ) // Item represents a single object in the tree. @@ -69,17 +68,11 @@ const ( DefaultFreeListSize = 32 ) -var ( - nilItems = make(items, 16) - nilChildren = make(children, 16) -) - // FreeList represents a free list of btree nodes. By default each // BTree has its own FreeList, but multiple BTrees can share the same // FreeList. -// Two Btrees using the same freelist are safe for concurrent write access. +// Two Btrees using the same freelist are not safe for concurrent write access. type FreeList struct { - mu sync.Mutex freelist []*node } @@ -90,29 +83,18 @@ func NewFreeList(size int) *FreeList { } func (f *FreeList) newNode() (n *node) { - f.mu.Lock() index := len(f.freelist) - 1 if index < 0 { - f.mu.Unlock() return new(node) } - n = f.freelist[index] - f.freelist[index] = nil - f.freelist = f.freelist[:index] - f.mu.Unlock() + f.freelist, n = f.freelist[:index], f.freelist[index] return } -// freeNode adds the given node to the list, returning true if it was added -// and false if it was discarded. -func (f *FreeList) freeNode(n *node) (out bool) { - f.mu.Lock() +func (f *FreeList) freeNode(n *node) { if len(f.freelist) < cap(f.freelist) { f.freelist = append(f.freelist, n) - out = true } - f.mu.Unlock() - return } // ItemIterator allows callers of Ascend* to iterate in-order over portions of @@ -134,8 +116,8 @@ func NewWithFreeList(degree int, f *FreeList) *BTree { panic("bad degree") } return &BTree{ - degree: degree, - cow: ©OnWriteContext{freelist: f}, + degree: degree, + freelist: f, } } @@ -156,8 +138,8 @@ func (s *items) insertAt(index int, item Item) { // back. func (s *items) removeAt(index int) Item { item := (*s)[index] + (*s)[index] = nil copy((*s)[index:], (*s)[index+1:]) - (*s)[len(*s)-1] = nil *s = (*s)[:len(*s)-1] return item } @@ -171,16 +153,6 @@ func (s *items) pop() (out Item) { return } -// truncate truncates this instance at index so that it contains only the -// first index items. index must be less than or equal to length. -func (s *items) truncate(index int) { - var toClear items - *s, toClear = (*s)[:index], (*s)[index:] - for len(toClear) > 0 { - toClear = toClear[copy(toClear, nilItems):] - } -} - // find returns the index where the given item should be inserted into this // list. 'found' is true if the item already exists in the list at the given // index. @@ -211,8 +183,8 @@ func (s *children) insertAt(index int, n *node) { // back. func (s *children) removeAt(index int) *node { n := (*s)[index] + (*s)[index] = nil copy((*s)[index:], (*s)[index+1:]) - (*s)[len(*s)-1] = nil *s = (*s)[:len(*s)-1] return n } @@ -226,16 +198,6 @@ func (s *children) pop() (out *node) { return } -// truncate truncates this instance at index so that it contains only the -// first index children. index must be less than or equal to length. -func (s *children) truncate(index int) { - var toClear children - *s, toClear = (*s)[:index], (*s)[index:] - for len(toClear) > 0 { - toClear = toClear[copy(toClear, nilChildren):] - } -} - // node is an internal node in a tree. // // It must at all times maintain the invariant that either @@ -244,34 +206,7 @@ func (s *children) truncate(index int) { type node struct { items items children children - cow *copyOnWriteContext -} - -func (n *node) mutableFor(cow *copyOnWriteContext) *node { - if n.cow == cow { - return n - } - out := cow.newNode() - if cap(out.items) >= len(n.items) { - out.items = out.items[:len(n.items)] - } else { - out.items = make(items, len(n.items), cap(n.items)) - } - copy(out.items, n.items) - // Copy children - if cap(out.children) >= len(n.children) { - out.children = out.children[:len(n.children)] - } else { - out.children = make(children, len(n.children), cap(n.children)) - } - copy(out.children, n.children) - return out -} - -func (n *node) mutableChild(i int) *node { - c := n.children[i].mutableFor(n.cow) - n.children[i] = c - return c + t *BTree } // split splits the given node at the given index. The current node shrinks, @@ -279,12 +214,12 @@ func (n *node) mutableChild(i int) *node { // containing all items/children after it. func (n *node) split(i int) (Item, *node) { item := n.items[i] - next := n.cow.newNode() + next := n.t.newNode() next.items = append(next.items, n.items[i+1:]...) - n.items.truncate(i) + n.items = n.items[:i] if len(n.children) > 0 { next.children = append(next.children, n.children[i+1:]...) - n.children.truncate(i + 1) + n.children = n.children[:i+1] } return item, next } @@ -295,7 +230,7 @@ func (n *node) maybeSplitChild(i, maxItems int) bool { if len(n.children[i].items) < maxItems { return false } - first := n.mutableChild(i) + first := n.children[i] item, second := first.split(maxItems / 2) n.items.insertAt(i, item) n.children.insertAt(i+1, second) @@ -329,7 +264,7 @@ func (n *node) insert(item Item, maxItems int) Item { return out } } - return n.mutableChild(i).insert(item, maxItems) + return n.children[i].insert(item, maxItems) } // get finds the given key in the subtree and returns it. @@ -407,10 +342,10 @@ func (n *node) remove(item Item, minItems int, typ toRemove) Item { panic("invalid type") } // If we get to here, we have children. - if len(n.children[i].items) <= minItems { + child := n.children[i] + if len(child.items) <= minItems { return n.growChildAndRemove(i, item, minItems, typ) } - child := n.mutableChild(i) // Either we had enough items to begin with, or we've done some // merging/stealing, because we've got enough now and we're ready to return // stuff. @@ -449,10 +384,10 @@ func (n *node) remove(item Item, minItems int, typ toRemove) Item { // whether we're in case 1 or 2), we'll have enough items and can guarantee // that we hit case A. func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) Item { + child := n.children[i] if i > 0 && len(n.children[i-1].items) > minItems { // Steal from left child - child := n.mutableChild(i) - stealFrom := n.mutableChild(i - 1) + stealFrom := n.children[i-1] stolenItem := stealFrom.items.pop() child.items.insertAt(0, n.items[i-1]) n.items[i-1] = stolenItem @@ -461,8 +396,7 @@ func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) } } else if i < len(n.items) && len(n.children[i+1].items) > minItems { // steal from right child - child := n.mutableChild(i) - stealFrom := n.mutableChild(i + 1) + stealFrom := n.children[i+1] stolenItem := stealFrom.items.removeAt(0) child.items = append(child.items, n.items[i]) n.items[i] = stolenItem @@ -472,99 +406,47 @@ func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) } else { if i >= len(n.items) { i-- + child = n.children[i] } - child := n.mutableChild(i) // merge with right child mergeItem := n.items.removeAt(i) mergeChild := n.children.removeAt(i + 1) child.items = append(child.items, mergeItem) child.items = append(child.items, mergeChild.items...) child.children = append(child.children, mergeChild.children...) - n.cow.freeNode(mergeChild) + n.t.freeNode(mergeChild) } return n.remove(item, minItems, typ) } -type direction int - -const ( - descend = direction(-1) - ascend = direction(+1) -) - // iterate provides a simple method for iterating over elements in the tree. +// It could probably use some work to be extra-efficient (it calls from() a +// little more than it should), but it works pretty well for now. // -// When ascending, the 'start' should be less than 'stop' and when descending, -// the 'start' should be greater than 'stop'. Setting 'includeStart' to true -// will force the iterator to include the first item when it equals 'start', -// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a -// "greaterThan" or "lessThan" queries. -func (n *node) iterate(dir direction, start, stop Item, includeStart bool, hit bool, iter ItemIterator) (bool, bool) { - var ok, found bool - var index int - switch dir { - case ascend: - if start != nil { - index, _ = n.items.find(start) +// It requires that 'from' and 'to' both return true for values we should hit +// with the iterator. It should also be the case that 'from' returns true for +// values less than or equal to values 'to' returns true for, and 'to' +// returns true for values greater than or equal to those that 'from' +// does. +func (n *node) iterate(from, to func(Item) bool, iter ItemIterator) bool { + for i, item := range n.items { + if !from(item) { + continue } - for i := index; i < len(n.items); i++ { - if len(n.children) > 0 { - if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok { - return hit, false - } - } - if !includeStart && !hit && start != nil && !start.Less(n.items[i]) { - hit = true - continue - } - hit = true - if stop != nil && !n.items[i].Less(stop) { - return hit, false - } - if !iter(n.items[i]) { - return hit, false - } - } - if len(n.children) > 0 { - if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok { - return hit, false - } - } - case descend: - if start != nil { - index, found = n.items.find(start) - if !found { - index = index - 1 - } - } else { - index = len(n.items) - 1 + if len(n.children) > 0 && !n.children[i].iterate(from, to, iter) { + return false } - for i := index; i >= 0; i-- { - if start != nil && !n.items[i].Less(start) { - if !includeStart || hit || start.Less(n.items[i]) { - continue - } - } - if len(n.children) > 0 { - if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok { - return hit, false - } - } - if stop != nil && !stop.Less(n.items[i]) { - return hit, false // continue - } - hit = true - if !iter(n.items[i]) { - return hit, false - } + if !to(item) { + return false } - if len(n.children) > 0 { - if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok { - return hit, false - } + if !iter(item) { + return false } } - return hit, true + if len(n.children) > 0 { + return n.children[len(n.children)-1].iterate(from, to, iter) + } + return true } // Used for testing/debugging purposes. @@ -583,54 +465,12 @@ func (n *node) print(w io.Writer, level int) { // Write operations are not safe for concurrent mutation by multiple // goroutines, but Read operations are. type BTree struct { - degree int - length int - root *node - cow *copyOnWriteContext -} - -// copyOnWriteContext pointers determine node ownership... a tree with a write -// context equivalent to a node's write context is allowed to modify that node. -// A tree whose write context does not match a node's is not allowed to modify -// it, and must create a new, writable copy (IE: it's a Clone). -// -// When doing any write operation, we maintain the invariant that the current -// node's context is equal to the context of the tree that requested the write. -// We do this by, before we descend into any node, creating a copy with the -// correct context if the contexts don't match. -// -// Since the node we're currently visiting on any write has the requesting -// tree's context, that node is modifiable in place. Children of that node may -// not share context, but before we descend into them, we'll make a mutable -// copy. -type copyOnWriteContext struct { + degree int + length int + root *node freelist *FreeList } -// Clone clones the btree, lazily. Clone should not be called concurrently, -// but the original tree (t) and the new tree (t2) can be used concurrently -// once the Clone call completes. -// -// The internal tree structure of b is marked read-only and shared between t and -// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes -// whenever one of b's original nodes would have been modified. Read operations -// should have no performance degredation. Write operations for both t and t2 -// will initially experience minor slow-downs caused by additional allocs and -// copies due to the aforementioned copy-on-write logic, but should converge to -// the original performance characteristics of the original tree. -func (t *BTree) Clone() (t2 *BTree) { - // Create two entirely new copy-on-write contexts. - // This operation effectively creates three trees: - // the original, shared nodes (old b.cow) - // the new b.cow nodes - // the new out.cow nodes - cow1, cow2 := *t.cow, *t.cow - out := *t - t.cow = &cow1 - out.cow = &cow2 - return &out -} - // maxItems returns the max number of items to allow per node. func (t *BTree) maxItems() int { return t.degree*2 - 1 @@ -642,37 +482,23 @@ func (t *BTree) minItems() int { return t.degree - 1 } -func (c *copyOnWriteContext) newNode() (n *node) { - n = c.freelist.newNode() - n.cow = c +func (t *BTree) newNode() (n *node) { + n = t.freelist.newNode() + n.t = t return } -type freeType int - -const ( - ftFreelistFull freeType = iota // node was freed (available for GC, not stored in freelist) - ftStored // node was stored in the freelist for later use - ftNotOwned // node was ignored by COW, since it's owned by another one -) - -// freeNode frees a node within a given COW context, if it's owned by that -// context. It returns what happened to the node (see freeType const -// documentation). -func (c *copyOnWriteContext) freeNode(n *node) freeType { - if n.cow == c { - // clear to allow GC - n.items.truncate(0) - n.children.truncate(0) - n.cow = nil - if c.freelist.freeNode(n) { - return ftStored - } else { - return ftFreelistFull - } - } else { - return ftNotOwned +func (t *BTree) freeNode(n *node) { + for i := range n.items { + n.items[i] = nil // clear to allow GC + } + n.items = n.items[:0] + for i := range n.children { + n.children[i] = nil // clear to allow GC } + n.children = n.children[:0] + n.t = nil // clear to allow GC + t.freelist.freeNode(n) } // ReplaceOrInsert adds the given item to the tree. If an item in the tree @@ -685,19 +511,16 @@ func (t *BTree) ReplaceOrInsert(item Item) Item { panic("nil item being added to BTree") } if t.root == nil { - t.root = t.cow.newNode() + t.root = t.newNode() t.root.items = append(t.root.items, item) t.length++ return nil - } else { - t.root = t.root.mutableFor(t.cow) - if len(t.root.items) >= t.maxItems() { - item2, second := t.root.split(t.maxItems() / 2) - oldroot := t.root - t.root = t.cow.newNode() - t.root.items = append(t.root.items, item2) - t.root.children = append(t.root.children, oldroot, second) - } + } else if len(t.root.items) >= t.maxItems() { + item2, second := t.root.split(t.maxItems() / 2) + oldroot := t.root + t.root = t.newNode() + t.root.items = append(t.root.items, item2) + t.root.children = append(t.root.children, oldroot, second) } out := t.root.insert(item, t.maxItems()) if out == nil { @@ -728,12 +551,11 @@ func (t *BTree) deleteItem(item Item, typ toRemove) Item { if t.root == nil || len(t.root.items) == 0 { return nil } - t.root = t.root.mutableFor(t.cow) out := t.root.remove(item, t.minItems(), typ) if len(t.root.items) == 0 && len(t.root.children) > 0 { oldroot := t.root t.root = t.root.children[0] - t.cow.freeNode(oldroot) + t.freeNode(oldroot) } if out != nil { t.length-- @@ -747,7 +569,10 @@ func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator if t.root == nil { return } - t.root.iterate(ascend, greaterOrEqual, lessThan, true, false, iterator) + t.root.iterate( + func(a Item) bool { return !a.Less(greaterOrEqual) }, + func(a Item) bool { return a.Less(lessThan) }, + iterator) } // AscendLessThan calls the iterator for every value in the tree within the range @@ -756,7 +581,10 @@ func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) { if t.root == nil { return } - t.root.iterate(ascend, nil, pivot, false, false, iterator) + t.root.iterate( + func(a Item) bool { return true }, + func(a Item) bool { return a.Less(pivot) }, + iterator) } // AscendGreaterOrEqual calls the iterator for every value in the tree within @@ -765,7 +593,10 @@ func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) { if t.root == nil { return } - t.root.iterate(ascend, pivot, nil, true, false, iterator) + t.root.iterate( + func(a Item) bool { return !a.Less(pivot) }, + func(a Item) bool { return true }, + iterator) } // Ascend calls the iterator for every value in the tree within the range @@ -774,43 +605,10 @@ func (t *BTree) Ascend(iterator ItemIterator) { if t.root == nil { return } - t.root.iterate(ascend, nil, nil, false, false, iterator) -} - -// DescendRange calls the iterator for every value in the tree within the range -// [lessOrEqual, greaterThan), until iterator returns false. -func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(descend, lessOrEqual, greaterThan, true, false, iterator) -} - -// DescendLessOrEqual calls the iterator for every value in the tree within the range -// [pivot, first], until iterator returns false. -func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(descend, pivot, nil, true, false, iterator) -} - -// DescendGreaterThan calls the iterator for every value in the tree within -// the range (pivot, last], until iterator returns false. -func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(descend, nil, pivot, false, false, iterator) -} - -// Descend calls the iterator for every value in the tree within the range -// [last, first], until iterator returns false. -func (t *BTree) Descend(iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(descend, nil, nil, false, false, iterator) + t.root.iterate( + func(a Item) bool { return true }, + func(a Item) bool { return true }, + iterator) } // Get looks for the key item in the tree, returning it. It returns nil if @@ -842,45 +640,6 @@ func (t *BTree) Len() int { return t.length } -// Clear removes all items from the btree. If addNodesToFreelist is true, -// t's nodes are added to its freelist as part of this call, until the freelist -// is full. Otherwise, the root node is simply dereferenced and the subtree -// left to Go's normal GC processes. -// -// This can be much faster -// than calling Delete on all elements, because that requires finding/removing -// each element in the tree and updating the tree accordingly. It also is -// somewhat faster than creating a new tree to replace the old one, because -// nodes from the old tree are reclaimed into the freelist for use by the new -// one, instead of being lost to the garbage collector. -// -// This call takes: -// O(1): when addNodesToFreelist is false, this is a single operation. -// O(1): when the freelist is already full, it breaks out immediately -// O(freelist size): when the freelist is empty and the nodes are all owned -// by this tree, nodes are added to the freelist until full. -// O(tree size): when all nodes are owned by another tree, all nodes are -// iterated over looking for nodes to add to the freelist, and due to -// ownership, none are. -func (t *BTree) Clear(addNodesToFreelist bool) { - if t.root != nil && addNodesToFreelist { - t.root.reset(t.cow) - } - t.root, t.length = nil, 0 -} - -// reset returns a subtree to the freelist. It breaks out immediately if the -// freelist is full, since the only benefit of iterating is to fill that -// freelist up. Returns true if parent reset call should continue. -func (n *node) reset(c *copyOnWriteContext) bool { - for _, child := range n.children { - if !child.reset(c) { - return false - } - } - return c.freeNode(n) != ftFreelistFull -} - // Int implements the Item interface for integers. type Int int diff --git a/vendor/github.com/google/btree/btree_test.go b/vendor/github.com/google/btree/btree_test.go index 78a90cd8a6c8..0a2fdde112b8 100644 --- a/vendor/github.com/google/btree/btree_test.go +++ b/vendor/github.com/google/btree/btree_test.go @@ -19,8 +19,6 @@ import ( "fmt" "math/rand" "reflect" - "sort" - "sync" "testing" "time" ) @@ -56,23 +54,6 @@ func all(t *BTree) (out []Item) { return } -// rangerev returns a reversed ordered list of Int items in the range [0, n). -func rangrev(n int) (out []Item) { - for i := n - 1; i >= 0; i-- { - out = append(out, Int(i)) - } - return -} - -// allrev extracts all items from a tree in reverse order as a slice. -func allrev(t *BTree) (out []Item) { - t.Descend(func(a Item) bool { - out = append(out, a) - return true - }) - return -} - var btreeDegree = flag.Int("degree", 32, "B-Tree degree") func TestBTree(t *testing.T) { @@ -106,13 +87,6 @@ func TestBTree(t *testing.T) { if !reflect.DeepEqual(got, want) { t.Fatalf("mismatch:\n got: %v\nwant: %v", got, want) } - - gotrev := allrev(tr) - wantrev := rangrev(treeSize) - if !reflect.DeepEqual(gotrev, wantrev) { - t.Fatalf("mismatch:\n got: %v\nwant: %v", got, want) - } - for _, item := range perm(treeSize) { if x := tr.Delete(item); x == nil { t.Fatalf("didn't find %v", item) @@ -214,31 +188,6 @@ func TestAscendRange(t *testing.T) { } } -func TestDescendRange(t *testing.T) { - tr := New(2) - for _, v := range perm(100) { - tr.ReplaceOrInsert(v) - } - var got []Item - tr.DescendRange(Int(60), Int(40), func(a Item) bool { - got = append(got, a) - return true - }) - if want := rangrev(100)[39:59]; !reflect.DeepEqual(got, want) { - t.Fatalf("descendrange:\n got: %v\nwant: %v", got, want) - } - got = got[:0] - tr.DescendRange(Int(60), Int(40), func(a Item) bool { - if a.(Int) < 50 { - return false - } - got = append(got, a) - return true - }) - if want := rangrev(100)[39:50]; !reflect.DeepEqual(got, want) { - t.Fatalf("descendrange:\n got: %v\nwant: %v", got, want) - } -} func TestAscendLessThan(t *testing.T) { tr := New(*btreeDegree) for _, v := range perm(100) { @@ -265,31 +214,6 @@ func TestAscendLessThan(t *testing.T) { } } -func TestDescendLessOrEqual(t *testing.T) { - tr := New(*btreeDegree) - for _, v := range perm(100) { - tr.ReplaceOrInsert(v) - } - var got []Item - tr.DescendLessOrEqual(Int(40), func(a Item) bool { - got = append(got, a) - return true - }) - if want := rangrev(100)[59:]; !reflect.DeepEqual(got, want) { - t.Fatalf("descendlessorequal:\n got: %v\nwant: %v", got, want) - } - got = got[:0] - tr.DescendLessOrEqual(Int(60), func(a Item) bool { - if a.(Int) < 50 { - return false - } - got = append(got, a) - return true - }) - if want := rangrev(100)[39:50]; !reflect.DeepEqual(got, want) { - t.Fatalf("descendlessorequal:\n got: %v\nwant: %v", got, want) - } -} func TestAscendGreaterOrEqual(t *testing.T) { tr := New(*btreeDegree) for _, v := range perm(100) { @@ -316,32 +240,6 @@ func TestAscendGreaterOrEqual(t *testing.T) { } } -func TestDescendGreaterThan(t *testing.T) { - tr := New(*btreeDegree) - for _, v := range perm(100) { - tr.ReplaceOrInsert(v) - } - var got []Item - tr.DescendGreaterThan(Int(40), func(a Item) bool { - got = append(got, a) - return true - }) - if want := rangrev(100)[:59]; !reflect.DeepEqual(got, want) { - t.Fatalf("descendgreaterthan:\n got: %v\nwant: %v", got, want) - } - got = got[:0] - tr.DescendGreaterThan(Int(40), func(a Item) bool { - if a.(Int) < 50 { - return false - } - got = append(got, a) - return true - }) - if want := rangrev(100)[:50]; !reflect.DeepEqual(got, want) { - t.Fatalf("descendgreaterthan:\n got: %v\nwant: %v", got, want) - } -} - const benchmarkTreeSize = 10000 func BenchmarkInsert(b *testing.B) { @@ -361,65 +259,6 @@ func BenchmarkInsert(b *testing.B) { } } -func BenchmarkSeek(b *testing.B) { - b.StopTimer() - size := 100000 - insertP := perm(size) - tr := New(*btreeDegree) - for _, item := range insertP { - tr.ReplaceOrInsert(item) - } - b.StartTimer() - - for i := 0; i < b.N; i++ { - tr.AscendGreaterOrEqual(Int(i%size), func(i Item) bool { return false }) - } -} - -func BenchmarkDeleteInsert(b *testing.B) { - b.StopTimer() - insertP := perm(benchmarkTreeSize) - tr := New(*btreeDegree) - for _, item := range insertP { - tr.ReplaceOrInsert(item) - } - b.StartTimer() - for i := 0; i < b.N; i++ { - tr.Delete(insertP[i%benchmarkTreeSize]) - tr.ReplaceOrInsert(insertP[i%benchmarkTreeSize]) - } -} - -func BenchmarkDeleteInsertCloneOnce(b *testing.B) { - b.StopTimer() - insertP := perm(benchmarkTreeSize) - tr := New(*btreeDegree) - for _, item := range insertP { - tr.ReplaceOrInsert(item) - } - tr = tr.Clone() - b.StartTimer() - for i := 0; i < b.N; i++ { - tr.Delete(insertP[i%benchmarkTreeSize]) - tr.ReplaceOrInsert(insertP[i%benchmarkTreeSize]) - } -} - -func BenchmarkDeleteInsertCloneEachTime(b *testing.B) { - b.StopTimer() - insertP := perm(benchmarkTreeSize) - tr := New(*btreeDegree) - for _, item := range insertP { - tr.ReplaceOrInsert(item) - } - b.StartTimer() - for i := 0; i < b.N; i++ { - tr = tr.Clone() - tr.Delete(insertP[i%benchmarkTreeSize]) - tr.ReplaceOrInsert(insertP[i%benchmarkTreeSize]) - } -} - func BenchmarkDelete(b *testing.B) { b.StopTimer() insertP := perm(benchmarkTreeSize) @@ -468,318 +307,3 @@ func BenchmarkGet(b *testing.B) { } } } - -func BenchmarkGetCloneEachTime(b *testing.B) { - b.StopTimer() - insertP := perm(benchmarkTreeSize) - removeP := perm(benchmarkTreeSize) - b.StartTimer() - i := 0 - for i < b.N { - b.StopTimer() - tr := New(*btreeDegree) - for _, v := range insertP { - tr.ReplaceOrInsert(v) - } - b.StartTimer() - for _, item := range removeP { - tr = tr.Clone() - tr.Get(item) - i++ - if i >= b.N { - return - } - } - } -} - -type byInts []Item - -func (a byInts) Len() int { - return len(a) -} - -func (a byInts) Less(i, j int) bool { - return a[i].(Int) < a[j].(Int) -} - -func (a byInts) Swap(i, j int) { - a[i], a[j] = a[j], a[i] -} - -func BenchmarkAscend(b *testing.B) { - arr := perm(benchmarkTreeSize) - tr := New(*btreeDegree) - for _, v := range arr { - tr.ReplaceOrInsert(v) - } - sort.Sort(byInts(arr)) - b.ResetTimer() - for i := 0; i < b.N; i++ { - j := 0 - tr.Ascend(func(item Item) bool { - if item.(Int) != arr[j].(Int) { - b.Fatalf("mismatch: expected: %v, got %v", arr[j].(Int), item.(Int)) - } - j++ - return true - }) - } -} - -func BenchmarkDescend(b *testing.B) { - arr := perm(benchmarkTreeSize) - tr := New(*btreeDegree) - for _, v := range arr { - tr.ReplaceOrInsert(v) - } - sort.Sort(byInts(arr)) - b.ResetTimer() - for i := 0; i < b.N; i++ { - j := len(arr) - 1 - tr.Descend(func(item Item) bool { - if item.(Int) != arr[j].(Int) { - b.Fatalf("mismatch: expected: %v, got %v", arr[j].(Int), item.(Int)) - } - j-- - return true - }) - } -} -func BenchmarkAscendRange(b *testing.B) { - arr := perm(benchmarkTreeSize) - tr := New(*btreeDegree) - for _, v := range arr { - tr.ReplaceOrInsert(v) - } - sort.Sort(byInts(arr)) - b.ResetTimer() - for i := 0; i < b.N; i++ { - j := 100 - tr.AscendRange(Int(100), arr[len(arr)-100], func(item Item) bool { - if item.(Int) != arr[j].(Int) { - b.Fatalf("mismatch: expected: %v, got %v", arr[j].(Int), item.(Int)) - } - j++ - return true - }) - if j != len(arr)-100 { - b.Fatalf("expected: %v, got %v", len(arr)-100, j) - } - } -} - -func BenchmarkDescendRange(b *testing.B) { - arr := perm(benchmarkTreeSize) - tr := New(*btreeDegree) - for _, v := range arr { - tr.ReplaceOrInsert(v) - } - sort.Sort(byInts(arr)) - b.ResetTimer() - for i := 0; i < b.N; i++ { - j := len(arr) - 100 - tr.DescendRange(arr[len(arr)-100], Int(100), func(item Item) bool { - if item.(Int) != arr[j].(Int) { - b.Fatalf("mismatch: expected: %v, got %v", arr[j].(Int), item.(Int)) - } - j-- - return true - }) - if j != 100 { - b.Fatalf("expected: %v, got %v", len(arr)-100, j) - } - } -} -func BenchmarkAscendGreaterOrEqual(b *testing.B) { - arr := perm(benchmarkTreeSize) - tr := New(*btreeDegree) - for _, v := range arr { - tr.ReplaceOrInsert(v) - } - sort.Sort(byInts(arr)) - b.ResetTimer() - for i := 0; i < b.N; i++ { - j := 100 - k := 0 - tr.AscendGreaterOrEqual(Int(100), func(item Item) bool { - if item.(Int) != arr[j].(Int) { - b.Fatalf("mismatch: expected: %v, got %v", arr[j].(Int), item.(Int)) - } - j++ - k++ - return true - }) - if j != len(arr) { - b.Fatalf("expected: %v, got %v", len(arr), j) - } - if k != len(arr)-100 { - b.Fatalf("expected: %v, got %v", len(arr)-100, k) - } - } -} -func BenchmarkDescendLessOrEqual(b *testing.B) { - arr := perm(benchmarkTreeSize) - tr := New(*btreeDegree) - for _, v := range arr { - tr.ReplaceOrInsert(v) - } - sort.Sort(byInts(arr)) - b.ResetTimer() - for i := 0; i < b.N; i++ { - j := len(arr) - 100 - k := len(arr) - tr.DescendLessOrEqual(arr[len(arr)-100], func(item Item) bool { - if item.(Int) != arr[j].(Int) { - b.Fatalf("mismatch: expected: %v, got %v", arr[j].(Int), item.(Int)) - } - j-- - k-- - return true - }) - if j != -1 { - b.Fatalf("expected: %v, got %v", -1, j) - } - if k != 99 { - b.Fatalf("expected: %v, got %v", 99, k) - } - } -} - -const cloneTestSize = 10000 - -func cloneTest(t *testing.T, b *BTree, start int, p []Item, wg *sync.WaitGroup, trees *[]*BTree) { - t.Logf("Starting new clone at %v", start) - *trees = append(*trees, b) - for i := start; i < cloneTestSize; i++ { - b.ReplaceOrInsert(p[i]) - if i%(cloneTestSize/5) == 0 { - wg.Add(1) - go cloneTest(t, b.Clone(), i+1, p, wg, trees) - } - } - wg.Done() -} - -func TestCloneConcurrentOperations(t *testing.T) { - b := New(*btreeDegree) - trees := []*BTree{} - p := perm(cloneTestSize) - var wg sync.WaitGroup - wg.Add(1) - go cloneTest(t, b, 0, p, &wg, &trees) - wg.Wait() - want := rang(cloneTestSize) - t.Logf("Starting equality checks on %d trees", len(trees)) - for i, tree := range trees { - if !reflect.DeepEqual(want, all(tree)) { - t.Errorf("tree %v mismatch", i) - } - } - t.Log("Removing half from first half") - toRemove := rang(cloneTestSize)[cloneTestSize/2:] - for i := 0; i < len(trees)/2; i++ { - tree := trees[i] - wg.Add(1) - go func() { - for _, item := range toRemove { - tree.Delete(item) - } - wg.Done() - }() - } - wg.Wait() - t.Log("Checking all values again") - for i, tree := range trees { - var wantpart []Item - if i < len(trees)/2 { - wantpart = want[:cloneTestSize/2] - } else { - wantpart = want - } - if got := all(tree); !reflect.DeepEqual(wantpart, got) { - t.Errorf("tree %v mismatch, want %v got %v", i, len(want), len(got)) - } - } -} - -func BenchmarkDeleteAndRestore(b *testing.B) { - items := perm(16392) - b.ResetTimer() - b.Run(`CopyBigFreeList`, func(b *testing.B) { - fl := NewFreeList(16392) - tr := NewWithFreeList(*btreeDegree, fl) - for _, v := range items { - tr.ReplaceOrInsert(v) - } - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - dels := make([]Item, 0, tr.Len()) - tr.Ascend(ItemIterator(func(b Item) bool { - dels = append(dels, b) - return true - })) - for _, del := range dels { - tr.Delete(del) - } - // tr is now empty, we make a new empty copy of it. - tr = NewWithFreeList(*btreeDegree, fl) - for _, v := range items { - tr.ReplaceOrInsert(v) - } - } - }) - b.Run(`Copy`, func(b *testing.B) { - tr := New(*btreeDegree) - for _, v := range items { - tr.ReplaceOrInsert(v) - } - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - dels := make([]Item, 0, tr.Len()) - tr.Ascend(ItemIterator(func(b Item) bool { - dels = append(dels, b) - return true - })) - for _, del := range dels { - tr.Delete(del) - } - // tr is now empty, we make a new empty copy of it. - tr = New(*btreeDegree) - for _, v := range items { - tr.ReplaceOrInsert(v) - } - } - }) - b.Run(`ClearBigFreelist`, func(b *testing.B) { - fl := NewFreeList(16392) - tr := NewWithFreeList(*btreeDegree, fl) - for _, v := range items { - tr.ReplaceOrInsert(v) - } - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - tr.Clear(true) - for _, v := range items { - tr.ReplaceOrInsert(v) - } - } - }) - b.Run(`Clear`, func(b *testing.B) { - tr := New(*btreeDegree) - for _, v := range items { - tr.ReplaceOrInsert(v) - } - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - tr.Clear(true) - for _, v := range items { - tr.ReplaceOrInsert(v) - } - } - }) -} diff --git a/vendor/github.com/google/btree/go.mod b/vendor/github.com/google/btree/go.mod deleted file mode 100644 index fe4d5ca17b37..000000000000 --- a/vendor/github.com/google/btree/go.mod +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2014 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -module github.com/google/btree - -go 1.12 diff --git a/vendor/github.com/gorilla/mux/.circleci/config.yml b/vendor/github.com/gorilla/mux/.circleci/config.yml deleted file mode 100644 index 536bc119f682..000000000000 --- a/vendor/github.com/gorilla/mux/.circleci/config.yml +++ /dev/null @@ -1,87 +0,0 @@ -version: 2.0 - -jobs: - # Base test configuration for Go library tests Each distinct version should - # inherit this base, and override (at least) the container image used. - "test": &test - docker: - - image: circleci/golang:latest - working_directory: /go/src/github.com/gorilla/mux - steps: &steps - # Our build steps: we checkout the repo, fetch our deps, lint, and finally - # run "go test" on the package. - - checkout - # Logs the version in our build logs, for posterity - - run: go version - - run: - name: "Fetch dependencies" - command: > - go get -t -v ./... - # Only run gofmt, vet & lint against the latest Go version - - run: - name: "Run golint" - command: > - if [ "${LATEST}" = true ] && [ -z "${SKIP_GOLINT}" ]; then - go get -u golang.org/x/lint/golint - golint ./... - fi - - run: - name: "Run gofmt" - command: > - if [[ "${LATEST}" = true ]]; then - diff -u <(echo -n) <(gofmt -d -e .) - fi - - run: - name: "Run go vet" - command: > - if [[ "${LATEST}" = true ]]; then - go vet -v ./... - fi - - run: go test -v -race ./... - - "latest": - <<: *test - environment: - LATEST: true - - "1.12": - <<: *test - docker: - - image: circleci/golang:1.12 - - "1.11": - <<: *test - docker: - - image: circleci/golang:1.11 - - "1.10": - <<: *test - docker: - - image: circleci/golang:1.10 - - "1.9": - <<: *test - docker: - - image: circleci/golang:1.9 - - "1.8": - <<: *test - docker: - - image: circleci/golang:1.8 - - "1.7": - <<: *test - docker: - - image: circleci/golang:1.7 - -workflows: - version: 2 - build: - jobs: - - "latest" - - "1.12" - - "1.11" - - "1.10" - - "1.9" - - "1.8" - - "1.7" diff --git a/vendor/github.com/gorilla/mux/.github/release-drafter.yml b/vendor/github.com/gorilla/mux/.github/release-drafter.yml deleted file mode 100644 index 2db2e1397ec7..000000000000 --- a/vendor/github.com/gorilla/mux/.github/release-drafter.yml +++ /dev/null @@ -1,8 +0,0 @@ -# Config for https://github.com/apps/release-drafter -template: | - - - - ## CHANGELOG - - $CHANGES diff --git a/vendor/github.com/gorilla/mux/.github/stale.yml b/vendor/github.com/gorilla/mux/.github/stale.yml deleted file mode 100644 index f4b12d30baf3..000000000000 --- a/vendor/github.com/gorilla/mux/.github/stale.yml +++ /dev/null @@ -1,12 +0,0 @@ -daysUntilStale: 75 -daysUntilClose: 14 -# Issues with these labels will never be considered stale -exemptLabels: - - proposal - - needs review - - build system -staleLabel: stale -markComment: > - This issue has been automatically marked as stale because it hasn't seen - a recent update. It'll be automatically closed in a few days. -closeComment: false diff --git a/vendor/github.com/gorilla/mux/AUTHORS b/vendor/github.com/gorilla/mux/AUTHORS deleted file mode 100644 index b722392ee592..000000000000 --- a/vendor/github.com/gorilla/mux/AUTHORS +++ /dev/null @@ -1,8 +0,0 @@ -# This is the official list of gorilla/mux authors for copyright purposes. -# -# Please keep the list sorted. - -Google LLC (https://opensource.google.com/) -Kamil Kisielk -Matt Silverlock -Rodrigo Moraes (https://github.com/moraes) diff --git a/vendor/github.com/gorilla/mux/LICENSE b/vendor/github.com/gorilla/mux/LICENSE deleted file mode 100644 index 6903df6386e9..000000000000 --- a/vendor/github.com/gorilla/mux/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012-2018 The Gorilla Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/gorilla/mux/README.md deleted file mode 100644 index 28df9a9f2ed5..000000000000 --- a/vendor/github.com/gorilla/mux/README.md +++ /dev/null @@ -1,805 +0,0 @@ -# gorilla/mux - -[![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux) -[![CircleCI](https://circleci.com/gh/gorilla/mux.svg?style=svg)](https://circleci.com/gh/gorilla/mux) -[![Sourcegraph](https://sourcegraph.com/github.com/gorilla/mux/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/mux?badge) - -![Gorilla Logo](http://www.gorillatoolkit.org/static/images/gorilla-icon-64.png) - -https://www.gorillatoolkit.org/pkg/mux - -Package `gorilla/mux` implements a request router and dispatcher for matching incoming requests to -their respective handler. - -The name mux stands for "HTTP request multiplexer". Like the standard `http.ServeMux`, `mux.Router` matches incoming requests against a list of registered routes and calls a handler for the route that matches the URL or other conditions. The main features are: - -* It implements the `http.Handler` interface so it is compatible with the standard `http.ServeMux`. -* Requests can be matched based on URL host, path, path prefix, schemes, header and query values, HTTP methods or using custom matchers. -* URL hosts, paths and query values can have variables with an optional regular expression. -* Registered URLs can be built, or "reversed", which helps maintaining references to resources. -* Routes can be used as subrouters: nested routes are only tested if the parent route matches. This is useful to define groups of routes that share common conditions like a host, a path prefix or other repeated attributes. As a bonus, this optimizes request matching. - ---- - -* [Install](#install) -* [Examples](#examples) -* [Matching Routes](#matching-routes) -* [Static Files](#static-files) -* [Serving Single Page Applications](#serving-single-page-applications) (e.g. React, Vue, Ember.js, etc.) -* [Registered URLs](#registered-urls) -* [Walking Routes](#walking-routes) -* [Graceful Shutdown](#graceful-shutdown) -* [Middleware](#middleware) -* [Handling CORS Requests](#handling-cors-requests) -* [Testing Handlers](#testing-handlers) -* [Full Example](#full-example) - ---- - -## Install - -With a [correctly configured](https://golang.org/doc/install#testing) Go toolchain: - -```sh -go get -u github.com/gorilla/mux -``` - -## Examples - -Let's start registering a couple of URL paths and handlers: - -```go -func main() { - r := mux.NewRouter() - r.HandleFunc("/", HomeHandler) - r.HandleFunc("/products", ProductsHandler) - r.HandleFunc("/articles", ArticlesHandler) - http.Handle("/", r) -} -``` - -Here we register three routes mapping URL paths to handlers. This is equivalent to how `http.HandleFunc()` works: if an incoming request URL matches one of the paths, the corresponding handler is called passing (`http.ResponseWriter`, `*http.Request`) as parameters. - -Paths can have variables. They are defined using the format `{name}` or `{name:pattern}`. If a regular expression pattern is not defined, the matched variable will be anything until the next slash. For example: - -```go -r := mux.NewRouter() -r.HandleFunc("/products/{key}", ProductHandler) -r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) -r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) -``` - -The names are used to create a map of route variables which can be retrieved calling `mux.Vars()`: - -```go -func ArticlesCategoryHandler(w http.ResponseWriter, r *http.Request) { - vars := mux.Vars(r) - w.WriteHeader(http.StatusOK) - fmt.Fprintf(w, "Category: %v\n", vars["category"]) -} -``` - -And this is all you need to know about the basic usage. More advanced options are explained below. - -### Matching Routes - -Routes can also be restricted to a domain or subdomain. Just define a host pattern to be matched. They can also have variables: - -```go -r := mux.NewRouter() -// Only matches if domain is "www.example.com". -r.Host("www.example.com") -// Matches a dynamic subdomain. -r.Host("{subdomain:[a-z]+}.example.com") -``` - -There are several other matchers that can be added. To match path prefixes: - -```go -r.PathPrefix("/products/") -``` - -...or HTTP methods: - -```go -r.Methods("GET", "POST") -``` - -...or URL schemes: - -```go -r.Schemes("https") -``` - -...or header values: - -```go -r.Headers("X-Requested-With", "XMLHttpRequest") -``` - -...or query values: - -```go -r.Queries("key", "value") -``` - -...or to use a custom matcher function: - -```go -r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { - return r.ProtoMajor == 0 -}) -``` - -...and finally, it is possible to combine several matchers in a single route: - -```go -r.HandleFunc("/products", ProductsHandler). - Host("www.example.com"). - Methods("GET"). - Schemes("http") -``` - -Routes are tested in the order they were added to the router. If two routes match, the first one wins: - -```go -r := mux.NewRouter() -r.HandleFunc("/specific", specificHandler) -r.PathPrefix("/").Handler(catchAllHandler) -``` - -Setting the same matching conditions again and again can be boring, so we have a way to group several routes that share the same requirements. We call it "subrouting". - -For example, let's say we have several URLs that should only match when the host is `www.example.com`. Create a route for that host and get a "subrouter" from it: - -```go -r := mux.NewRouter() -s := r.Host("www.example.com").Subrouter() -``` - -Then register routes in the subrouter: - -```go -s.HandleFunc("/products/", ProductsHandler) -s.HandleFunc("/products/{key}", ProductHandler) -s.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) -``` - -The three URL paths we registered above will only be tested if the domain is `www.example.com`, because the subrouter is tested first. This is not only convenient, but also optimizes request matching. You can create subrouters combining any attribute matchers accepted by a route. - -Subrouters can be used to create domain or path "namespaces": you define subrouters in a central place and then parts of the app can register its paths relatively to a given subrouter. - -There's one more thing about subroutes. When a subrouter has a path prefix, the inner routes use it as base for their paths: - -```go -r := mux.NewRouter() -s := r.PathPrefix("/products").Subrouter() -// "/products/" -s.HandleFunc("/", ProductsHandler) -// "/products/{key}/" -s.HandleFunc("/{key}/", ProductHandler) -// "/products/{key}/details" -s.HandleFunc("/{key}/details", ProductDetailsHandler) -``` - - -### Static Files - -Note that the path provided to `PathPrefix()` represents a "wildcard": calling -`PathPrefix("/static/").Handler(...)` means that the handler will be passed any -request that matches "/static/\*". This makes it easy to serve static files with mux: - -```go -func main() { - var dir string - - flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir") - flag.Parse() - r := mux.NewRouter() - - // This will serve files under http://localhost:8000/static/ - r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir)))) - - srv := &http.Server{ - Handler: r, - Addr: "127.0.0.1:8000", - // Good practice: enforce timeouts for servers you create! - WriteTimeout: 15 * time.Second, - ReadTimeout: 15 * time.Second, - } - - log.Fatal(srv.ListenAndServe()) -} -``` - -### Serving Single Page Applications - -Most of the time it makes sense to serve your SPA on a separate web server from your API, -but sometimes it's desirable to serve them both from one place. It's possible to write a simple -handler for serving your SPA (for use with React Router's [BrowserRouter](https://reacttraining.com/react-router/web/api/BrowserRouter) for example), and leverage -mux's powerful routing for your API endpoints. - -```go -package main - -import ( - "encoding/json" - "log" - "net/http" - "os" - "path/filepath" - "time" - - "github.com/gorilla/mux" -) - -// spaHandler implements the http.Handler interface, so we can use it -// to respond to HTTP requests. The path to the static directory and -// path to the index file within that static directory are used to -// serve the SPA in the given static directory. -type spaHandler struct { - staticPath string - indexPath string -} - -// ServeHTTP inspects the URL path to locate a file within the static dir -// on the SPA handler. If a file is found, it will be served. If not, the -// file located at the index path on the SPA handler will be served. This -// is suitable behavior for serving an SPA (single page application). -func (h spaHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - // get the absolute path to prevent directory traversal - path, err := filepath.Abs(r.URL.Path) - if err != nil { - // if we failed to get the absolute path respond with a 400 bad request - // and stop - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - // prepend the path with the path to the static directory - path = filepath.Join(h.staticPath, path) - - // check whether a file exists at the given path - _, err = os.Stat(path) - if os.IsNotExist(err) { - // file does not exist, serve index.html - http.ServeFile(w, r, filepath.Join(h.staticPath, h.indexPath)) - return - } else if err != nil { - // if we got an error (that wasn't that the file doesn't exist) stating the - // file, return a 500 internal server error and stop - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - // otherwise, use http.FileServer to serve the static dir - http.FileServer(http.Dir(h.staticPath)).ServeHTTP(w, r) -} - -func main() { - router := mux.NewRouter() - - router.HandleFunc("/api/health", func(w http.ResponseWriter, r *http.Request) { - // an example API handler - json.NewEncoder(w).Encode(map[string]bool{"ok": true}) - }) - - spa := spaHandler{staticPath: "build", indexPath: "index.html"} - router.PathPrefix("/").Handler(spa) - - srv := &http.Server{ - Handler: router, - Addr: "127.0.0.1:8000", - // Good practice: enforce timeouts for servers you create! - WriteTimeout: 15 * time.Second, - ReadTimeout: 15 * time.Second, - } - - log.Fatal(srv.ListenAndServe()) -} -``` - -### Registered URLs - -Now let's see how to build registered URLs. - -Routes can be named. All routes that define a name can have their URLs built, or "reversed". We define a name calling `Name()` on a route. For example: - -```go -r := mux.NewRouter() -r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). - Name("article") -``` - -To build a URL, get the route and call the `URL()` method, passing a sequence of key/value pairs for the route variables. For the previous route, we would do: - -```go -url, err := r.Get("article").URL("category", "technology", "id", "42") -``` - -...and the result will be a `url.URL` with the following path: - -``` -"/articles/technology/42" -``` - -This also works for host and query value variables: - -```go -r := mux.NewRouter() -r.Host("{subdomain}.example.com"). - Path("/articles/{category}/{id:[0-9]+}"). - Queries("filter", "{filter}"). - HandlerFunc(ArticleHandler). - Name("article") - -// url.String() will be "http://news.example.com/articles/technology/42?filter=gorilla" -url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42", - "filter", "gorilla") -``` - -All variables defined in the route are required, and their values must conform to the corresponding patterns. These requirements guarantee that a generated URL will always match a registered route -- the only exception is for explicitly defined "build-only" routes which never match. - -Regex support also exists for matching Headers within a route. For example, we could do: - -```go -r.HeadersRegexp("Content-Type", "application/(text|json)") -``` - -...and the route will match both requests with a Content-Type of `application/json` as well as `application/text` - -There's also a way to build only the URL host or path for a route: use the methods `URLHost()` or `URLPath()` instead. For the previous route, we would do: - -```go -// "http://news.example.com/" -host, err := r.Get("article").URLHost("subdomain", "news") - -// "/articles/technology/42" -path, err := r.Get("article").URLPath("category", "technology", "id", "42") -``` - -And if you use subrouters, host and path defined separately can be built as well: - -```go -r := mux.NewRouter() -s := r.Host("{subdomain}.example.com").Subrouter() -s.Path("/articles/{category}/{id:[0-9]+}"). - HandlerFunc(ArticleHandler). - Name("article") - -// "http://news.example.com/articles/technology/42" -url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42") -``` - -### Walking Routes - -The `Walk` function on `mux.Router` can be used to visit all of the routes that are registered on a router. For example, -the following prints all of the registered routes: - -```go -package main - -import ( - "fmt" - "net/http" - "strings" - - "github.com/gorilla/mux" -) - -func handler(w http.ResponseWriter, r *http.Request) { - return -} - -func main() { - r := mux.NewRouter() - r.HandleFunc("/", handler) - r.HandleFunc("/products", handler).Methods("POST") - r.HandleFunc("/articles", handler).Methods("GET") - r.HandleFunc("/articles/{id}", handler).Methods("GET", "PUT") - r.HandleFunc("/authors", handler).Queries("surname", "{surname}") - err := r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error { - pathTemplate, err := route.GetPathTemplate() - if err == nil { - fmt.Println("ROUTE:", pathTemplate) - } - pathRegexp, err := route.GetPathRegexp() - if err == nil { - fmt.Println("Path regexp:", pathRegexp) - } - queriesTemplates, err := route.GetQueriesTemplates() - if err == nil { - fmt.Println("Queries templates:", strings.Join(queriesTemplates, ",")) - } - queriesRegexps, err := route.GetQueriesRegexp() - if err == nil { - fmt.Println("Queries regexps:", strings.Join(queriesRegexps, ",")) - } - methods, err := route.GetMethods() - if err == nil { - fmt.Println("Methods:", strings.Join(methods, ",")) - } - fmt.Println() - return nil - }) - - if err != nil { - fmt.Println(err) - } - - http.Handle("/", r) -} -``` - -### Graceful Shutdown - -Go 1.8 introduced the ability to [gracefully shutdown](https://golang.org/doc/go1.8#http_shutdown) a `*http.Server`. Here's how to do that alongside `mux`: - -```go -package main - -import ( - "context" - "flag" - "log" - "net/http" - "os" - "os/signal" - "time" - - "github.com/gorilla/mux" -) - -func main() { - var wait time.Duration - flag.DurationVar(&wait, "graceful-timeout", time.Second * 15, "the duration for which the server gracefully wait for existing connections to finish - e.g. 15s or 1m") - flag.Parse() - - r := mux.NewRouter() - // Add your routes as needed - - srv := &http.Server{ - Addr: "0.0.0.0:8080", - // Good practice to set timeouts to avoid Slowloris attacks. - WriteTimeout: time.Second * 15, - ReadTimeout: time.Second * 15, - IdleTimeout: time.Second * 60, - Handler: r, // Pass our instance of gorilla/mux in. - } - - // Run our server in a goroutine so that it doesn't block. - go func() { - if err := srv.ListenAndServe(); err != nil { - log.Println(err) - } - }() - - c := make(chan os.Signal, 1) - // We'll accept graceful shutdowns when quit via SIGINT (Ctrl+C) - // SIGKILL, SIGQUIT or SIGTERM (Ctrl+/) will not be caught. - signal.Notify(c, os.Interrupt) - - // Block until we receive our signal. - <-c - - // Create a deadline to wait for. - ctx, cancel := context.WithTimeout(context.Background(), wait) - defer cancel() - // Doesn't block if no connections, but will otherwise wait - // until the timeout deadline. - srv.Shutdown(ctx) - // Optionally, you could run srv.Shutdown in a goroutine and block on - // <-ctx.Done() if your application should wait for other services - // to finalize based on context cancellation. - log.Println("shutting down") - os.Exit(0) -} -``` - -### Middleware - -Mux supports the addition of middlewares to a [Router](https://godoc.org/github.com/gorilla/mux#Router), which are executed in the order they are added if a match is found, including its subrouters. -Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or `ResponseWriter` hijacking. - -Mux middlewares are defined using the de facto standard type: - -```go -type MiddlewareFunc func(http.Handler) http.Handler -``` - -Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc. This takes advantage of closures being able access variables from the context where they are created, while retaining the signature enforced by the receivers. - -A very basic middleware which logs the URI of the request being handled could be written as: - -```go -func loggingMiddleware(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // Do stuff here - log.Println(r.RequestURI) - // Call the next handler, which can be another middleware in the chain, or the final handler. - next.ServeHTTP(w, r) - }) -} -``` - -Middlewares can be added to a router using `Router.Use()`: - -```go -r := mux.NewRouter() -r.HandleFunc("/", handler) -r.Use(loggingMiddleware) -``` - -A more complex authentication middleware, which maps session token to users, could be written as: - -```go -// Define our struct -type authenticationMiddleware struct { - tokenUsers map[string]string -} - -// Initialize it somewhere -func (amw *authenticationMiddleware) Populate() { - amw.tokenUsers["00000000"] = "user0" - amw.tokenUsers["aaaaaaaa"] = "userA" - amw.tokenUsers["05f717e5"] = "randomUser" - amw.tokenUsers["deadbeef"] = "user0" -} - -// Middleware function, which will be called for each request -func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - token := r.Header.Get("X-Session-Token") - - if user, found := amw.tokenUsers[token]; found { - // We found the token in our map - log.Printf("Authenticated user %s\n", user) - // Pass down the request to the next middleware (or final handler) - next.ServeHTTP(w, r) - } else { - // Write an error and stop the handler chain - http.Error(w, "Forbidden", http.StatusForbidden) - } - }) -} -``` - -```go -r := mux.NewRouter() -r.HandleFunc("/", handler) - -amw := authenticationMiddleware{} -amw.Populate() - -r.Use(amw.Middleware) -``` - -Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. Middlewares _should_ write to `ResponseWriter` if they _are_ going to terminate the request, and they _should not_ write to `ResponseWriter` if they _are not_ going to terminate it. - -### Handling CORS Requests - -[CORSMethodMiddleware](https://godoc.org/github.com/gorilla/mux#CORSMethodMiddleware) intends to make it easier to strictly set the `Access-Control-Allow-Methods` response header. - -* You will still need to use your own CORS handler to set the other CORS headers such as `Access-Control-Allow-Origin` -* The middleware will set the `Access-Control-Allow-Methods` header to all the method matchers (e.g. `r.Methods(http.MethodGet, http.MethodPut, http.MethodOptions)` -> `Access-Control-Allow-Methods: GET,PUT,OPTIONS`) on a route -* If you do not specify any methods, then: -> _Important_: there must be an `OPTIONS` method matcher for the middleware to set the headers. - -Here is an example of using `CORSMethodMiddleware` along with a custom `OPTIONS` handler to set all the required CORS headers: - -```go -package main - -import ( - "net/http" - "github.com/gorilla/mux" -) - -func main() { - r := mux.NewRouter() - - // IMPORTANT: you must specify an OPTIONS method matcher for the middleware to set CORS headers - r.HandleFunc("/foo", fooHandler).Methods(http.MethodGet, http.MethodPut, http.MethodPatch, http.MethodOptions) - r.Use(mux.CORSMethodMiddleware(r)) - - http.ListenAndServe(":8080", r) -} - -func fooHandler(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Access-Control-Allow-Origin", "*") - if r.Method == http.MethodOptions { - return - } - - w.Write([]byte("foo")) -} -``` - -And an request to `/foo` using something like: - -```bash -curl localhost:8080/foo -v -``` - -Would look like: - -```bash -* Trying ::1... -* TCP_NODELAY set -* Connected to localhost (::1) port 8080 (#0) -> GET /foo HTTP/1.1 -> Host: localhost:8080 -> User-Agent: curl/7.59.0 -> Accept: */* -> -< HTTP/1.1 200 OK -< Access-Control-Allow-Methods: GET,PUT,PATCH,OPTIONS -< Access-Control-Allow-Origin: * -< Date: Fri, 28 Jun 2019 20:13:30 GMT -< Content-Length: 3 -< Content-Type: text/plain; charset=utf-8 -< -* Connection #0 to host localhost left intact -foo -``` - -### Testing Handlers - -Testing handlers in a Go web application is straightforward, and _mux_ doesn't complicate this any further. Given two files: `endpoints.go` and `endpoints_test.go`, here's how we'd test an application using _mux_. - -First, our simple HTTP handler: - -```go -// endpoints.go -package main - -func HealthCheckHandler(w http.ResponseWriter, r *http.Request) { - // A very simple health check. - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - - // In the future we could report back on the status of our DB, or our cache - // (e.g. Redis) by performing a simple PING, and include them in the response. - io.WriteString(w, `{"alive": true}`) -} - -func main() { - r := mux.NewRouter() - r.HandleFunc("/health", HealthCheckHandler) - - log.Fatal(http.ListenAndServe("localhost:8080", r)) -} -``` - -Our test code: - -```go -// endpoints_test.go -package main - -import ( - "net/http" - "net/http/httptest" - "testing" -) - -func TestHealthCheckHandler(t *testing.T) { - // Create a request to pass to our handler. We don't have any query parameters for now, so we'll - // pass 'nil' as the third parameter. - req, err := http.NewRequest("GET", "/health", nil) - if err != nil { - t.Fatal(err) - } - - // We create a ResponseRecorder (which satisfies http.ResponseWriter) to record the response. - rr := httptest.NewRecorder() - handler := http.HandlerFunc(HealthCheckHandler) - - // Our handlers satisfy http.Handler, so we can call their ServeHTTP method - // directly and pass in our Request and ResponseRecorder. - handler.ServeHTTP(rr, req) - - // Check the status code is what we expect. - if status := rr.Code; status != http.StatusOK { - t.Errorf("handler returned wrong status code: got %v want %v", - status, http.StatusOK) - } - - // Check the response body is what we expect. - expected := `{"alive": true}` - if rr.Body.String() != expected { - t.Errorf("handler returned unexpected body: got %v want %v", - rr.Body.String(), expected) - } -} -``` - -In the case that our routes have [variables](#examples), we can pass those in the request. We could write -[table-driven tests](https://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go) to test multiple -possible route variables as needed. - -```go -// endpoints.go -func main() { - r := mux.NewRouter() - // A route with a route variable: - r.HandleFunc("/metrics/{type}", MetricsHandler) - - log.Fatal(http.ListenAndServe("localhost:8080", r)) -} -``` - -Our test file, with a table-driven test of `routeVariables`: - -```go -// endpoints_test.go -func TestMetricsHandler(t *testing.T) { - tt := []struct{ - routeVariable string - shouldPass bool - }{ - {"goroutines", true}, - {"heap", true}, - {"counters", true}, - {"queries", true}, - {"adhadaeqm3k", false}, - } - - for _, tc := range tt { - path := fmt.Sprintf("/metrics/%s", tc.routeVariable) - req, err := http.NewRequest("GET", path, nil) - if err != nil { - t.Fatal(err) - } - - rr := httptest.NewRecorder() - - // Need to create a router that we can pass the request through so that the vars will be added to the context - router := mux.NewRouter() - router.HandleFunc("/metrics/{type}", MetricsHandler) - router.ServeHTTP(rr, req) - - // In this case, our MetricsHandler returns a non-200 response - // for a route variable it doesn't know about. - if rr.Code == http.StatusOK && !tc.shouldPass { - t.Errorf("handler should have failed on routeVariable %s: got %v want %v", - tc.routeVariable, rr.Code, http.StatusOK) - } - } -} -``` - -## Full Example - -Here's a complete, runnable example of a small `mux` based server: - -```go -package main - -import ( - "net/http" - "log" - "github.com/gorilla/mux" -) - -func YourHandler(w http.ResponseWriter, r *http.Request) { - w.Write([]byte("Gorilla!\n")) -} - -func main() { - r := mux.NewRouter() - // Routes consist of a path and a handler function. - r.HandleFunc("/", YourHandler) - - // Bind to a port and pass our router in - log.Fatal(http.ListenAndServe(":8000", r)) -} -``` - -## License - -BSD licensed. See the LICENSE file for details. diff --git a/vendor/github.com/gorilla/mux/bench_test.go b/vendor/github.com/gorilla/mux/bench_test.go deleted file mode 100644 index 522156dccff2..000000000000 --- a/vendor/github.com/gorilla/mux/bench_test.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "net/http" - "net/http/httptest" - "testing" -) - -func BenchmarkMux(b *testing.B) { - router := new(Router) - handler := func(w http.ResponseWriter, r *http.Request) {} - router.HandleFunc("/v1/{v1}", handler) - - request, _ := http.NewRequest("GET", "/v1/anything", nil) - for i := 0; i < b.N; i++ { - router.ServeHTTP(nil, request) - } -} - -func BenchmarkMuxAlternativeInRegexp(b *testing.B) { - router := new(Router) - handler := func(w http.ResponseWriter, r *http.Request) {} - router.HandleFunc("/v1/{v1:(?:a|b)}", handler) - - requestA, _ := http.NewRequest("GET", "/v1/a", nil) - requestB, _ := http.NewRequest("GET", "/v1/b", nil) - for i := 0; i < b.N; i++ { - router.ServeHTTP(nil, requestA) - router.ServeHTTP(nil, requestB) - } -} - -func BenchmarkManyPathVariables(b *testing.B) { - router := new(Router) - handler := func(w http.ResponseWriter, r *http.Request) {} - router.HandleFunc("/v1/{v1}/{v2}/{v3}/{v4}/{v5}", handler) - - matchingRequest, _ := http.NewRequest("GET", "/v1/1/2/3/4/5", nil) - notMatchingRequest, _ := http.NewRequest("GET", "/v1/1/2/3/4", nil) - recorder := httptest.NewRecorder() - for i := 0; i < b.N; i++ { - router.ServeHTTP(nil, matchingRequest) - router.ServeHTTP(recorder, notMatchingRequest) - } -} diff --git a/vendor/github.com/gorilla/mux/context.go b/vendor/github.com/gorilla/mux/context.go deleted file mode 100644 index 665940a2682e..000000000000 --- a/vendor/github.com/gorilla/mux/context.go +++ /dev/null @@ -1,18 +0,0 @@ -package mux - -import ( - "context" - "net/http" -) - -func contextGet(r *http.Request, key interface{}) interface{} { - return r.Context().Value(key) -} - -func contextSet(r *http.Request, key, val interface{}) *http.Request { - if val == nil { - return r - } - - return r.WithContext(context.WithValue(r.Context(), key, val)) -} diff --git a/vendor/github.com/gorilla/mux/context_test.go b/vendor/github.com/gorilla/mux/context_test.go deleted file mode 100644 index d8a56b422f91..000000000000 --- a/vendor/github.com/gorilla/mux/context_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package mux - -import ( - "context" - "net/http" - "testing" - "time" -) - -func TestNativeContextMiddleware(t *testing.T) { - withTimeout := func(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx, cancel := context.WithTimeout(r.Context(), time.Minute) - defer cancel() - h.ServeHTTP(w, r.WithContext(ctx)) - }) - } - - r := NewRouter() - r.Handle("/path/{foo}", withTimeout(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - vars := Vars(r) - if vars["foo"] != "bar" { - t.Fatal("Expected foo var to be set") - } - }))) - - rec := NewRecorder() - req := newRequest("GET", "/path/bar") - r.ServeHTTP(rec, req) -} diff --git a/vendor/github.com/gorilla/mux/doc.go b/vendor/github.com/gorilla/mux/doc.go deleted file mode 100644 index bd5a38b55d82..000000000000 --- a/vendor/github.com/gorilla/mux/doc.go +++ /dev/null @@ -1,306 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package mux implements a request router and dispatcher. - -The name mux stands for "HTTP request multiplexer". Like the standard -http.ServeMux, mux.Router matches incoming requests against a list of -registered routes and calls a handler for the route that matches the URL -or other conditions. The main features are: - - * Requests can be matched based on URL host, path, path prefix, schemes, - header and query values, HTTP methods or using custom matchers. - * URL hosts, paths and query values can have variables with an optional - regular expression. - * Registered URLs can be built, or "reversed", which helps maintaining - references to resources. - * Routes can be used as subrouters: nested routes are only tested if the - parent route matches. This is useful to define groups of routes that - share common conditions like a host, a path prefix or other repeated - attributes. As a bonus, this optimizes request matching. - * It implements the http.Handler interface so it is compatible with the - standard http.ServeMux. - -Let's start registering a couple of URL paths and handlers: - - func main() { - r := mux.NewRouter() - r.HandleFunc("/", HomeHandler) - r.HandleFunc("/products", ProductsHandler) - r.HandleFunc("/articles", ArticlesHandler) - http.Handle("/", r) - } - -Here we register three routes mapping URL paths to handlers. This is -equivalent to how http.HandleFunc() works: if an incoming request URL matches -one of the paths, the corresponding handler is called passing -(http.ResponseWriter, *http.Request) as parameters. - -Paths can have variables. They are defined using the format {name} or -{name:pattern}. If a regular expression pattern is not defined, the matched -variable will be anything until the next slash. For example: - - r := mux.NewRouter() - r.HandleFunc("/products/{key}", ProductHandler) - r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) - r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) - -Groups can be used inside patterns, as long as they are non-capturing (?:re). For example: - - r.HandleFunc("/articles/{category}/{sort:(?:asc|desc|new)}", ArticlesCategoryHandler) - -The names are used to create a map of route variables which can be retrieved -calling mux.Vars(): - - vars := mux.Vars(request) - category := vars["category"] - -Note that if any capturing groups are present, mux will panic() during parsing. To prevent -this, convert any capturing groups to non-capturing, e.g. change "/{sort:(asc|desc)}" to -"/{sort:(?:asc|desc)}". This is a change from prior versions which behaved unpredictably -when capturing groups were present. - -And this is all you need to know about the basic usage. More advanced options -are explained below. - -Routes can also be restricted to a domain or subdomain. Just define a host -pattern to be matched. They can also have variables: - - r := mux.NewRouter() - // Only matches if domain is "www.example.com". - r.Host("www.example.com") - // Matches a dynamic subdomain. - r.Host("{subdomain:[a-z]+}.domain.com") - -There are several other matchers that can be added. To match path prefixes: - - r.PathPrefix("/products/") - -...or HTTP methods: - - r.Methods("GET", "POST") - -...or URL schemes: - - r.Schemes("https") - -...or header values: - - r.Headers("X-Requested-With", "XMLHttpRequest") - -...or query values: - - r.Queries("key", "value") - -...or to use a custom matcher function: - - r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { - return r.ProtoMajor == 0 - }) - -...and finally, it is possible to combine several matchers in a single route: - - r.HandleFunc("/products", ProductsHandler). - Host("www.example.com"). - Methods("GET"). - Schemes("http") - -Setting the same matching conditions again and again can be boring, so we have -a way to group several routes that share the same requirements. -We call it "subrouting". - -For example, let's say we have several URLs that should only match when the -host is "www.example.com". Create a route for that host and get a "subrouter" -from it: - - r := mux.NewRouter() - s := r.Host("www.example.com").Subrouter() - -Then register routes in the subrouter: - - s.HandleFunc("/products/", ProductsHandler) - s.HandleFunc("/products/{key}", ProductHandler) - s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) - -The three URL paths we registered above will only be tested if the domain is -"www.example.com", because the subrouter is tested first. This is not -only convenient, but also optimizes request matching. You can create -subrouters combining any attribute matchers accepted by a route. - -Subrouters can be used to create domain or path "namespaces": you define -subrouters in a central place and then parts of the app can register its -paths relatively to a given subrouter. - -There's one more thing about subroutes. When a subrouter has a path prefix, -the inner routes use it as base for their paths: - - r := mux.NewRouter() - s := r.PathPrefix("/products").Subrouter() - // "/products/" - s.HandleFunc("/", ProductsHandler) - // "/products/{key}/" - s.HandleFunc("/{key}/", ProductHandler) - // "/products/{key}/details" - s.HandleFunc("/{key}/details", ProductDetailsHandler) - -Note that the path provided to PathPrefix() represents a "wildcard": calling -PathPrefix("/static/").Handler(...) means that the handler will be passed any -request that matches "/static/*". This makes it easy to serve static files with mux: - - func main() { - var dir string - - flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir") - flag.Parse() - r := mux.NewRouter() - - // This will serve files under http://localhost:8000/static/ - r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir)))) - - srv := &http.Server{ - Handler: r, - Addr: "127.0.0.1:8000", - // Good practice: enforce timeouts for servers you create! - WriteTimeout: 15 * time.Second, - ReadTimeout: 15 * time.Second, - } - - log.Fatal(srv.ListenAndServe()) - } - -Now let's see how to build registered URLs. - -Routes can be named. All routes that define a name can have their URLs built, -or "reversed". We define a name calling Name() on a route. For example: - - r := mux.NewRouter() - r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). - Name("article") - -To build a URL, get the route and call the URL() method, passing a sequence of -key/value pairs for the route variables. For the previous route, we would do: - - url, err := r.Get("article").URL("category", "technology", "id", "42") - -...and the result will be a url.URL with the following path: - - "/articles/technology/42" - -This also works for host and query value variables: - - r := mux.NewRouter() - r.Host("{subdomain}.domain.com"). - Path("/articles/{category}/{id:[0-9]+}"). - Queries("filter", "{filter}"). - HandlerFunc(ArticleHandler). - Name("article") - - // url.String() will be "http://news.domain.com/articles/technology/42?filter=gorilla" - url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42", - "filter", "gorilla") - -All variables defined in the route are required, and their values must -conform to the corresponding patterns. These requirements guarantee that a -generated URL will always match a registered route -- the only exception is -for explicitly defined "build-only" routes which never match. - -Regex support also exists for matching Headers within a route. For example, we could do: - - r.HeadersRegexp("Content-Type", "application/(text|json)") - -...and the route will match both requests with a Content-Type of `application/json` as well as -`application/text` - -There's also a way to build only the URL host or path for a route: -use the methods URLHost() or URLPath() instead. For the previous route, -we would do: - - // "http://news.domain.com/" - host, err := r.Get("article").URLHost("subdomain", "news") - - // "/articles/technology/42" - path, err := r.Get("article").URLPath("category", "technology", "id", "42") - -And if you use subrouters, host and path defined separately can be built -as well: - - r := mux.NewRouter() - s := r.Host("{subdomain}.domain.com").Subrouter() - s.Path("/articles/{category}/{id:[0-9]+}"). - HandlerFunc(ArticleHandler). - Name("article") - - // "http://news.domain.com/articles/technology/42" - url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42") - -Mux supports the addition of middlewares to a Router, which are executed in the order they are added if a match is found, including its subrouters. Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or ResponseWriter hijacking. - - type MiddlewareFunc func(http.Handler) http.Handler - -Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc (closures can access variables from the context where they are created). - -A very basic middleware which logs the URI of the request being handled could be written as: - - func simpleMw(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // Do stuff here - log.Println(r.RequestURI) - // Call the next handler, which can be another middleware in the chain, or the final handler. - next.ServeHTTP(w, r) - }) - } - -Middlewares can be added to a router using `Router.Use()`: - - r := mux.NewRouter() - r.HandleFunc("/", handler) - r.Use(simpleMw) - -A more complex authentication middleware, which maps session token to users, could be written as: - - // Define our struct - type authenticationMiddleware struct { - tokenUsers map[string]string - } - - // Initialize it somewhere - func (amw *authenticationMiddleware) Populate() { - amw.tokenUsers["00000000"] = "user0" - amw.tokenUsers["aaaaaaaa"] = "userA" - amw.tokenUsers["05f717e5"] = "randomUser" - amw.tokenUsers["deadbeef"] = "user0" - } - - // Middleware function, which will be called for each request - func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - token := r.Header.Get("X-Session-Token") - - if user, found := amw.tokenUsers[token]; found { - // We found the token in our map - log.Printf("Authenticated user %s\n", user) - next.ServeHTTP(w, r) - } else { - http.Error(w, "Forbidden", http.StatusForbidden) - } - }) - } - - r := mux.NewRouter() - r.HandleFunc("/", handler) - - amw := authenticationMiddleware{tokenUsers: make(map[string]string)} - amw.Populate() - - r.Use(amw.Middleware) - -Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. - -*/ -package mux diff --git a/vendor/github.com/gorilla/mux/example_authentication_middleware_test.go b/vendor/github.com/gorilla/mux/example_authentication_middleware_test.go deleted file mode 100644 index 6f2ea86ca708..000000000000 --- a/vendor/github.com/gorilla/mux/example_authentication_middleware_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package mux_test - -import ( - "log" - "net/http" - - "github.com/gorilla/mux" -) - -// Define our struct -type authenticationMiddleware struct { - tokenUsers map[string]string -} - -// Initialize it somewhere -func (amw *authenticationMiddleware) Populate() { - amw.tokenUsers["00000000"] = "user0" - amw.tokenUsers["aaaaaaaa"] = "userA" - amw.tokenUsers["05f717e5"] = "randomUser" - amw.tokenUsers["deadbeef"] = "user0" -} - -// Middleware function, which will be called for each request -func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - token := r.Header.Get("X-Session-Token") - - if user, found := amw.tokenUsers[token]; found { - // We found the token in our map - log.Printf("Authenticated user %s\n", user) - next.ServeHTTP(w, r) - } else { - http.Error(w, "Forbidden", http.StatusForbidden) - } - }) -} - -func Example_authenticationMiddleware() { - r := mux.NewRouter() - r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { - // Do something here - }) - amw := authenticationMiddleware{make(map[string]string)} - amw.Populate() - r.Use(amw.Middleware) -} diff --git a/vendor/github.com/gorilla/mux/example_cors_method_middleware_test.go b/vendor/github.com/gorilla/mux/example_cors_method_middleware_test.go deleted file mode 100644 index 00929fcee5ba..000000000000 --- a/vendor/github.com/gorilla/mux/example_cors_method_middleware_test.go +++ /dev/null @@ -1,37 +0,0 @@ -package mux_test - -import ( - "fmt" - "net/http" - "net/http/httptest" - - "github.com/gorilla/mux" -) - -func ExampleCORSMethodMiddleware() { - r := mux.NewRouter() - - r.HandleFunc("/foo", func(w http.ResponseWriter, r *http.Request) { - // Handle the request - }).Methods(http.MethodGet, http.MethodPut, http.MethodPatch) - r.HandleFunc("/foo", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Access-Control-Allow-Origin", "http://example.com") - w.Header().Set("Access-Control-Max-Age", "86400") - }).Methods(http.MethodOptions) - - r.Use(mux.CORSMethodMiddleware(r)) - - rw := httptest.NewRecorder() - req, _ := http.NewRequest("OPTIONS", "/foo", nil) // needs to be OPTIONS - req.Header.Set("Access-Control-Request-Method", "POST") // needs to be non-empty - req.Header.Set("Access-Control-Request-Headers", "Authorization") // needs to be non-empty - req.Header.Set("Origin", "http://example.com") // needs to be non-empty - - r.ServeHTTP(rw, req) - - fmt.Println(rw.Header().Get("Access-Control-Allow-Methods")) - fmt.Println(rw.Header().Get("Access-Control-Allow-Origin")) - // Output: - // GET,PUT,PATCH,OPTIONS - // http://example.com -} diff --git a/vendor/github.com/gorilla/mux/example_route_test.go b/vendor/github.com/gorilla/mux/example_route_test.go deleted file mode 100644 index 1125570713e6..000000000000 --- a/vendor/github.com/gorilla/mux/example_route_test.go +++ /dev/null @@ -1,51 +0,0 @@ -package mux_test - -import ( - "fmt" - "net/http" - - "github.com/gorilla/mux" -) - -// This example demonstrates setting a regular expression matcher for -// the header value. A plain word will match any value that contains a -// matching substring as if the pattern was wrapped with `.*`. -func ExampleRoute_HeadersRegexp() { - r := mux.NewRouter() - route := r.NewRoute().HeadersRegexp("Accept", "html") - - req1, _ := http.NewRequest("GET", "example.com", nil) - req1.Header.Add("Accept", "text/plain") - req1.Header.Add("Accept", "text/html") - - req2, _ := http.NewRequest("GET", "example.com", nil) - req2.Header.Set("Accept", "application/xhtml+xml") - - matchInfo := &mux.RouteMatch{} - fmt.Printf("Match: %v %q\n", route.Match(req1, matchInfo), req1.Header["Accept"]) - fmt.Printf("Match: %v %q\n", route.Match(req2, matchInfo), req2.Header["Accept"]) - // Output: - // Match: true ["text/plain" "text/html"] - // Match: true ["application/xhtml+xml"] -} - -// This example demonstrates setting a strict regular expression matcher -// for the header value. Using the start and end of string anchors, the -// value must be an exact match. -func ExampleRoute_HeadersRegexp_exactMatch() { - r := mux.NewRouter() - route := r.NewRoute().HeadersRegexp("Origin", "^https://example.co$") - - yes, _ := http.NewRequest("GET", "example.co", nil) - yes.Header.Set("Origin", "https://example.co") - - no, _ := http.NewRequest("GET", "example.co.uk", nil) - no.Header.Set("Origin", "https://example.co.uk") - - matchInfo := &mux.RouteMatch{} - fmt.Printf("Match: %v %q\n", route.Match(yes, matchInfo), yes.Header["Origin"]) - fmt.Printf("Match: %v %q\n", route.Match(no, matchInfo), no.Header["Origin"]) - // Output: - // Match: true ["https://example.co"] - // Match: false ["https://example.co.uk"] -} diff --git a/vendor/github.com/gorilla/mux/go.mod b/vendor/github.com/gorilla/mux/go.mod deleted file mode 100644 index df170a399403..000000000000 --- a/vendor/github.com/gorilla/mux/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/gorilla/mux - -go 1.12 diff --git a/vendor/github.com/gorilla/mux/middleware.go b/vendor/github.com/gorilla/mux/middleware.go deleted file mode 100644 index cf2b26dc037a..000000000000 --- a/vendor/github.com/gorilla/mux/middleware.go +++ /dev/null @@ -1,79 +0,0 @@ -package mux - -import ( - "net/http" - "strings" -) - -// MiddlewareFunc is a function which receives an http.Handler and returns another http.Handler. -// Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed -// to it, and then calls the handler passed as parameter to the MiddlewareFunc. -type MiddlewareFunc func(http.Handler) http.Handler - -// middleware interface is anything which implements a MiddlewareFunc named Middleware. -type middleware interface { - Middleware(handler http.Handler) http.Handler -} - -// Middleware allows MiddlewareFunc to implement the middleware interface. -func (mw MiddlewareFunc) Middleware(handler http.Handler) http.Handler { - return mw(handler) -} - -// Use appends a MiddlewareFunc to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router. -func (r *Router) Use(mwf ...MiddlewareFunc) { - for _, fn := range mwf { - r.middlewares = append(r.middlewares, fn) - } -} - -// useInterface appends a middleware to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router. -func (r *Router) useInterface(mw middleware) { - r.middlewares = append(r.middlewares, mw) -} - -// CORSMethodMiddleware automatically sets the Access-Control-Allow-Methods response header -// on requests for routes that have an OPTIONS method matcher to all the method matchers on -// the route. Routes that do not explicitly handle OPTIONS requests will not be processed -// by the middleware. See examples for usage. -func CORSMethodMiddleware(r *Router) MiddlewareFunc { - return func(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - allMethods, err := getAllMethodsForRoute(r, req) - if err == nil { - for _, v := range allMethods { - if v == http.MethodOptions { - w.Header().Set("Access-Control-Allow-Methods", strings.Join(allMethods, ",")) - } - } - } - - next.ServeHTTP(w, req) - }) - } -} - -// getAllMethodsForRoute returns all the methods from method matchers matching a given -// request. -func getAllMethodsForRoute(r *Router, req *http.Request) ([]string, error) { - var allMethods []string - - err := r.Walk(func(route *Route, _ *Router, _ []*Route) error { - for _, m := range route.matchers { - if _, ok := m.(*routeRegexp); ok { - if m.Match(req, &RouteMatch{}) { - methods, err := route.GetMethods() - if err != nil { - return err - } - - allMethods = append(allMethods, methods...) - } - break - } - } - return nil - }) - - return allMethods, err -} diff --git a/vendor/github.com/gorilla/mux/middleware_test.go b/vendor/github.com/gorilla/mux/middleware_test.go deleted file mode 100644 index 27647afe4305..000000000000 --- a/vendor/github.com/gorilla/mux/middleware_test.go +++ /dev/null @@ -1,545 +0,0 @@ -package mux - -import ( - "bytes" - "net/http" - "testing" -) - -type testMiddleware struct { - timesCalled uint -} - -func (tm *testMiddleware) Middleware(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - tm.timesCalled++ - h.ServeHTTP(w, r) - }) -} - -func dummyHandler(w http.ResponseWriter, r *http.Request) {} - -func TestMiddlewareAdd(t *testing.T) { - router := NewRouter() - router.HandleFunc("/", dummyHandler).Methods("GET") - - mw := &testMiddleware{} - - router.useInterface(mw) - if len(router.middlewares) != 1 || router.middlewares[0] != mw { - t.Fatal("Middleware interface was not added correctly") - } - - router.Use(mw.Middleware) - if len(router.middlewares) != 2 { - t.Fatal("Middleware method was not added correctly") - } - - banalMw := func(handler http.Handler) http.Handler { - return handler - } - router.Use(banalMw) - if len(router.middlewares) != 3 { - t.Fatal("Middleware function was not added correctly") - } -} - -func TestMiddleware(t *testing.T) { - router := NewRouter() - router.HandleFunc("/", dummyHandler).Methods("GET") - - mw := &testMiddleware{} - router.useInterface(mw) - - rw := NewRecorder() - req := newRequest("GET", "/") - - t.Run("regular middleware call", func(t *testing.T) { - router.ServeHTTP(rw, req) - if mw.timesCalled != 1 { - t.Fatalf("Expected %d calls, but got only %d", 1, mw.timesCalled) - } - }) - - t.Run("not called for 404", func(t *testing.T) { - req = newRequest("GET", "/not/found") - router.ServeHTTP(rw, req) - if mw.timesCalled != 1 { - t.Fatalf("Expected %d calls, but got only %d", 1, mw.timesCalled) - } - }) - - t.Run("not called for method mismatch", func(t *testing.T) { - req = newRequest("POST", "/") - router.ServeHTTP(rw, req) - if mw.timesCalled != 1 { - t.Fatalf("Expected %d calls, but got only %d", 1, mw.timesCalled) - } - }) - - t.Run("regular call using function middleware", func(t *testing.T) { - router.Use(mw.Middleware) - req = newRequest("GET", "/") - router.ServeHTTP(rw, req) - if mw.timesCalled != 3 { - t.Fatalf("Expected %d calls, but got only %d", 3, mw.timesCalled) - } - }) -} - -func TestMiddlewareSubrouter(t *testing.T) { - router := NewRouter() - router.HandleFunc("/", dummyHandler).Methods("GET") - - subrouter := router.PathPrefix("/sub").Subrouter() - subrouter.HandleFunc("/x", dummyHandler).Methods("GET") - - mw := &testMiddleware{} - subrouter.useInterface(mw) - - rw := NewRecorder() - req := newRequest("GET", "/") - - t.Run("not called for route outside subrouter", func(t *testing.T) { - router.ServeHTTP(rw, req) - if mw.timesCalled != 0 { - t.Fatalf("Expected %d calls, but got only %d", 0, mw.timesCalled) - } - }) - - t.Run("not called for subrouter root 404", func(t *testing.T) { - req = newRequest("GET", "/sub/") - router.ServeHTTP(rw, req) - if mw.timesCalled != 0 { - t.Fatalf("Expected %d calls, but got only %d", 0, mw.timesCalled) - } - }) - - t.Run("called once for route inside subrouter", func(t *testing.T) { - req = newRequest("GET", "/sub/x") - router.ServeHTTP(rw, req) - if mw.timesCalled != 1 { - t.Fatalf("Expected %d calls, but got only %d", 1, mw.timesCalled) - } - }) - - t.Run("not called for 404 inside subrouter", func(t *testing.T) { - req = newRequest("GET", "/sub/not/found") - router.ServeHTTP(rw, req) - if mw.timesCalled != 1 { - t.Fatalf("Expected %d calls, but got only %d", 1, mw.timesCalled) - } - }) - - t.Run("middleware added to router", func(t *testing.T) { - router.useInterface(mw) - - t.Run("called once for route outside subrouter", func(t *testing.T) { - req = newRequest("GET", "/") - router.ServeHTTP(rw, req) - if mw.timesCalled != 2 { - t.Fatalf("Expected %d calls, but got only %d", 2, mw.timesCalled) - } - }) - - t.Run("called twice for route inside subrouter", func(t *testing.T) { - req = newRequest("GET", "/sub/x") - router.ServeHTTP(rw, req) - if mw.timesCalled != 4 { - t.Fatalf("Expected %d calls, but got only %d", 4, mw.timesCalled) - } - }) - }) -} - -func TestMiddlewareExecution(t *testing.T) { - mwStr := []byte("Middleware\n") - handlerStr := []byte("Logic\n") - - router := NewRouter() - router.HandleFunc("/", func(w http.ResponseWriter, e *http.Request) { - w.Write(handlerStr) - }) - - t.Run("responds normally without middleware", func(t *testing.T) { - rw := NewRecorder() - req := newRequest("GET", "/") - - router.ServeHTTP(rw, req) - - if !bytes.Equal(rw.Body.Bytes(), handlerStr) { - t.Fatal("Handler response is not what it should be") - } - }) - - t.Run("responds with handler and middleware response", func(t *testing.T) { - rw := NewRecorder() - req := newRequest("GET", "/") - - router.Use(func(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write(mwStr) - h.ServeHTTP(w, r) - }) - }) - - router.ServeHTTP(rw, req) - if !bytes.Equal(rw.Body.Bytes(), append(mwStr, handlerStr...)) { - t.Fatal("Middleware + handler response is not what it should be") - } - }) -} - -func TestMiddlewareNotFound(t *testing.T) { - mwStr := []byte("Middleware\n") - handlerStr := []byte("Logic\n") - - router := NewRouter() - router.HandleFunc("/", func(w http.ResponseWriter, e *http.Request) { - w.Write(handlerStr) - }) - router.Use(func(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write(mwStr) - h.ServeHTTP(w, r) - }) - }) - - // Test not found call with default handler - t.Run("not called", func(t *testing.T) { - rw := NewRecorder() - req := newRequest("GET", "/notfound") - - router.ServeHTTP(rw, req) - if bytes.Contains(rw.Body.Bytes(), mwStr) { - t.Fatal("Middleware was called for a 404") - } - }) - - t.Run("not called with custom not found handler", func(t *testing.T) { - rw := NewRecorder() - req := newRequest("GET", "/notfound") - - router.NotFoundHandler = http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - rw.Write([]byte("Custom 404 handler")) - }) - router.ServeHTTP(rw, req) - - if bytes.Contains(rw.Body.Bytes(), mwStr) { - t.Fatal("Middleware was called for a custom 404") - } - }) -} - -func TestMiddlewareMethodMismatch(t *testing.T) { - mwStr := []byte("Middleware\n") - handlerStr := []byte("Logic\n") - - router := NewRouter() - router.HandleFunc("/", func(w http.ResponseWriter, e *http.Request) { - w.Write(handlerStr) - }).Methods("GET") - - router.Use(func(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write(mwStr) - h.ServeHTTP(w, r) - }) - }) - - t.Run("not called", func(t *testing.T) { - rw := NewRecorder() - req := newRequest("POST", "/") - - router.ServeHTTP(rw, req) - if bytes.Contains(rw.Body.Bytes(), mwStr) { - t.Fatal("Middleware was called for a method mismatch") - } - }) - - t.Run("not called with custom method not allowed handler", func(t *testing.T) { - rw := NewRecorder() - req := newRequest("POST", "/") - - router.MethodNotAllowedHandler = http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - rw.Write([]byte("Method not allowed")) - }) - router.ServeHTTP(rw, req) - - if bytes.Contains(rw.Body.Bytes(), mwStr) { - t.Fatal("Middleware was called for a method mismatch") - } - }) -} - -func TestMiddlewareNotFoundSubrouter(t *testing.T) { - mwStr := []byte("Middleware\n") - handlerStr := []byte("Logic\n") - - router := NewRouter() - router.HandleFunc("/", func(w http.ResponseWriter, e *http.Request) { - w.Write(handlerStr) - }) - - subrouter := router.PathPrefix("/sub/").Subrouter() - subrouter.HandleFunc("/", func(w http.ResponseWriter, e *http.Request) { - w.Write(handlerStr) - }) - - router.Use(func(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write(mwStr) - h.ServeHTTP(w, r) - }) - }) - - t.Run("not called", func(t *testing.T) { - rw := NewRecorder() - req := newRequest("GET", "/sub/notfound") - - router.ServeHTTP(rw, req) - if bytes.Contains(rw.Body.Bytes(), mwStr) { - t.Fatal("Middleware was called for a 404") - } - }) - - t.Run("not called with custom not found handler", func(t *testing.T) { - rw := NewRecorder() - req := newRequest("GET", "/sub/notfound") - - subrouter.NotFoundHandler = http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - rw.Write([]byte("Custom 404 handler")) - }) - router.ServeHTTP(rw, req) - - if bytes.Contains(rw.Body.Bytes(), mwStr) { - t.Fatal("Middleware was called for a custom 404") - } - }) -} - -func TestMiddlewareMethodMismatchSubrouter(t *testing.T) { - mwStr := []byte("Middleware\n") - handlerStr := []byte("Logic\n") - - router := NewRouter() - router.HandleFunc("/", func(w http.ResponseWriter, e *http.Request) { - w.Write(handlerStr) - }) - - subrouter := router.PathPrefix("/sub/").Subrouter() - subrouter.HandleFunc("/", func(w http.ResponseWriter, e *http.Request) { - w.Write(handlerStr) - }).Methods("GET") - - router.Use(func(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write(mwStr) - h.ServeHTTP(w, r) - }) - }) - - t.Run("not called", func(t *testing.T) { - rw := NewRecorder() - req := newRequest("POST", "/sub/") - - router.ServeHTTP(rw, req) - if bytes.Contains(rw.Body.Bytes(), mwStr) { - t.Fatal("Middleware was called for a method mismatch") - } - }) - - t.Run("not called with custom method not allowed handler", func(t *testing.T) { - rw := NewRecorder() - req := newRequest("POST", "/sub/") - - router.MethodNotAllowedHandler = http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - rw.Write([]byte("Method not allowed")) - }) - router.ServeHTTP(rw, req) - - if bytes.Contains(rw.Body.Bytes(), mwStr) { - t.Fatal("Middleware was called for a method mismatch") - } - }) -} - -func TestCORSMethodMiddleware(t *testing.T) { - testCases := []struct { - name string - registerRoutes func(r *Router) - requestHeader http.Header - requestMethod string - requestPath string - expectedAccessControlAllowMethodsHeader string - expectedResponse string - }{ - { - name: "does not set without OPTIONS matcher", - registerRoutes: func(r *Router) { - r.HandleFunc("/foo", stringHandler("a")).Methods(http.MethodGet, http.MethodPut, http.MethodPatch) - }, - requestMethod: "GET", - requestPath: "/foo", - expectedAccessControlAllowMethodsHeader: "", - expectedResponse: "a", - }, - { - name: "sets on non OPTIONS", - registerRoutes: func(r *Router) { - r.HandleFunc("/foo", stringHandler("a")).Methods(http.MethodGet, http.MethodPut, http.MethodPatch) - r.HandleFunc("/foo", stringHandler("b")).Methods(http.MethodOptions) - }, - requestMethod: "GET", - requestPath: "/foo", - expectedAccessControlAllowMethodsHeader: "GET,PUT,PATCH,OPTIONS", - expectedResponse: "a", - }, - { - name: "sets without preflight headers", - registerRoutes: func(r *Router) { - r.HandleFunc("/foo", stringHandler("a")).Methods(http.MethodGet, http.MethodPut, http.MethodPatch) - r.HandleFunc("/foo", stringHandler("b")).Methods(http.MethodOptions) - }, - requestMethod: "OPTIONS", - requestPath: "/foo", - expectedAccessControlAllowMethodsHeader: "GET,PUT,PATCH,OPTIONS", - expectedResponse: "b", - }, - { - name: "does not set on error", - registerRoutes: func(r *Router) { - r.HandleFunc("/foo", stringHandler("a")) - }, - requestMethod: "OPTIONS", - requestPath: "/foo", - expectedAccessControlAllowMethodsHeader: "", - expectedResponse: "a", - }, - { - name: "sets header on valid preflight", - registerRoutes: func(r *Router) { - r.HandleFunc("/foo", stringHandler("a")).Methods(http.MethodGet, http.MethodPut, http.MethodPatch) - r.HandleFunc("/foo", stringHandler("b")).Methods(http.MethodOptions) - }, - requestMethod: "OPTIONS", - requestPath: "/foo", - requestHeader: http.Header{ - "Access-Control-Request-Method": []string{"GET"}, - "Access-Control-Request-Headers": []string{"Authorization"}, - "Origin": []string{"http://example.com"}, - }, - expectedAccessControlAllowMethodsHeader: "GET,PUT,PATCH,OPTIONS", - expectedResponse: "b", - }, - { - name: "does not set methods from unmatching routes", - registerRoutes: func(r *Router) { - r.HandleFunc("/foo", stringHandler("c")).Methods(http.MethodDelete) - r.HandleFunc("/foo/bar", stringHandler("a")).Methods(http.MethodGet, http.MethodPut, http.MethodPatch) - r.HandleFunc("/foo/bar", stringHandler("b")).Methods(http.MethodOptions) - }, - requestMethod: "OPTIONS", - requestPath: "/foo/bar", - requestHeader: http.Header{ - "Access-Control-Request-Method": []string{"GET"}, - "Access-Control-Request-Headers": []string{"Authorization"}, - "Origin": []string{"http://example.com"}, - }, - expectedAccessControlAllowMethodsHeader: "GET,PUT,PATCH,OPTIONS", - expectedResponse: "b", - }, - } - - for _, tt := range testCases { - t.Run(tt.name, func(t *testing.T) { - router := NewRouter() - - tt.registerRoutes(router) - - router.Use(CORSMethodMiddleware(router)) - - rw := NewRecorder() - req := newRequest(tt.requestMethod, tt.requestPath) - req.Header = tt.requestHeader - - router.ServeHTTP(rw, req) - - actualMethodsHeader := rw.Header().Get("Access-Control-Allow-Methods") - if actualMethodsHeader != tt.expectedAccessControlAllowMethodsHeader { - t.Fatalf("Expected Access-Control-Allow-Methods to equal %s but got %s", tt.expectedAccessControlAllowMethodsHeader, actualMethodsHeader) - } - - actualResponse := rw.Body.String() - if actualResponse != tt.expectedResponse { - t.Fatalf("Expected response to equal %s but got %s", tt.expectedResponse, actualResponse) - } - }) - } -} - -func TestMiddlewareOnMultiSubrouter(t *testing.T) { - first := "first" - second := "second" - notFound := "404 not found" - - router := NewRouter() - firstSubRouter := router.PathPrefix("/").Subrouter() - secondSubRouter := router.PathPrefix("/").Subrouter() - - router.NotFoundHandler = http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - rw.Write([]byte(notFound)) - }) - - firstSubRouter.HandleFunc("/first", func(w http.ResponseWriter, r *http.Request) { - - }) - - secondSubRouter.HandleFunc("/second", func(w http.ResponseWriter, r *http.Request) { - - }) - - firstSubRouter.Use(func(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte(first)) - h.ServeHTTP(w, r) - }) - }) - - secondSubRouter.Use(func(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte(second)) - h.ServeHTTP(w, r) - }) - }) - - t.Run("/first uses first middleware", func(t *testing.T) { - rw := NewRecorder() - req := newRequest("GET", "/first") - - router.ServeHTTP(rw, req) - if rw.Body.String() != first { - t.Fatalf("Middleware did not run: expected %s middleware to write a response (got %s)", first, rw.Body.String()) - } - }) - - t.Run("/second uses second middleware", func(t *testing.T) { - rw := NewRecorder() - req := newRequest("GET", "/second") - - router.ServeHTTP(rw, req) - if rw.Body.String() != second { - t.Fatalf("Middleware did not run: expected %s middleware to write a response (got %s)", second, rw.Body.String()) - } - }) - - t.Run("uses not found handler", func(t *testing.T) { - rw := NewRecorder() - req := newRequest("GET", "/second/not-exist") - - router.ServeHTTP(rw, req) - if rw.Body.String() != notFound { - t.Fatalf("Notfound handler did not run: expected %s for not-exist, (got %s)", notFound, rw.Body.String()) - } - }) -} diff --git a/vendor/github.com/gorilla/mux/mux.go b/vendor/github.com/gorilla/mux/mux.go deleted file mode 100644 index 26f9582ac844..000000000000 --- a/vendor/github.com/gorilla/mux/mux.go +++ /dev/null @@ -1,605 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "errors" - "fmt" - "net/http" - "path" - "regexp" -) - -var ( - // ErrMethodMismatch is returned when the method in the request does not match - // the method defined against the route. - ErrMethodMismatch = errors.New("method is not allowed") - // ErrNotFound is returned when no route match is found. - ErrNotFound = errors.New("no matching route was found") -) - -// NewRouter returns a new router instance. -func NewRouter() *Router { - return &Router{namedRoutes: make(map[string]*Route)} -} - -// Router registers routes to be matched and dispatches a handler. -// -// It implements the http.Handler interface, so it can be registered to serve -// requests: -// -// var router = mux.NewRouter() -// -// func main() { -// http.Handle("/", router) -// } -// -// Or, for Google App Engine, register it in a init() function: -// -// func init() { -// http.Handle("/", router) -// } -// -// This will send all incoming requests to the router. -type Router struct { - // Configurable Handler to be used when no route matches. - NotFoundHandler http.Handler - - // Configurable Handler to be used when the request method does not match the route. - MethodNotAllowedHandler http.Handler - - // Routes to be matched, in order. - routes []*Route - - // Routes by name for URL building. - namedRoutes map[string]*Route - - // If true, do not clear the request context after handling the request. - // - // Deprecated: No effect when go1.7+ is used, since the context is stored - // on the request itself. - KeepContext bool - - // Slice of middlewares to be called after a match is found - middlewares []middleware - - // configuration shared with `Route` - routeConf -} - -// common route configuration shared between `Router` and `Route` -type routeConf struct { - // If true, "/path/foo%2Fbar/to" will match the path "/path/{var}/to" - useEncodedPath bool - - // If true, when the path pattern is "/path/", accessing "/path" will - // redirect to the former and vice versa. - strictSlash bool - - // If true, when the path pattern is "/path//to", accessing "/path//to" - // will not redirect - skipClean bool - - // Manager for the variables from host and path. - regexp routeRegexpGroup - - // List of matchers. - matchers []matcher - - // The scheme used when building URLs. - buildScheme string - - buildVarsFunc BuildVarsFunc -} - -// returns an effective deep copy of `routeConf` -func copyRouteConf(r routeConf) routeConf { - c := r - - if r.regexp.path != nil { - c.regexp.path = copyRouteRegexp(r.regexp.path) - } - - if r.regexp.host != nil { - c.regexp.host = copyRouteRegexp(r.regexp.host) - } - - c.regexp.queries = make([]*routeRegexp, 0, len(r.regexp.queries)) - for _, q := range r.regexp.queries { - c.regexp.queries = append(c.regexp.queries, copyRouteRegexp(q)) - } - - c.matchers = make([]matcher, len(r.matchers)) - copy(c.matchers, r.matchers) - - return c -} - -func copyRouteRegexp(r *routeRegexp) *routeRegexp { - c := *r - return &c -} - -// Match attempts to match the given request against the router's registered routes. -// -// If the request matches a route of this router or one of its subrouters the Route, -// Handler, and Vars fields of the the match argument are filled and this function -// returns true. -// -// If the request does not match any of this router's or its subrouters' routes -// then this function returns false. If available, a reason for the match failure -// will be filled in the match argument's MatchErr field. If the match failure type -// (eg: not found) has a registered handler, the handler is assigned to the Handler -// field of the match argument. -func (r *Router) Match(req *http.Request, match *RouteMatch) bool { - for _, route := range r.routes { - if route.Match(req, match) { - // Build middleware chain if no error was found - if match.MatchErr == nil { - for i := len(r.middlewares) - 1; i >= 0; i-- { - match.Handler = r.middlewares[i].Middleware(match.Handler) - } - } - return true - } - } - - if match.MatchErr == ErrMethodMismatch { - if r.MethodNotAllowedHandler != nil { - match.Handler = r.MethodNotAllowedHandler - return true - } - - return false - } - - // Closest match for a router (includes sub-routers) - if r.NotFoundHandler != nil { - match.Handler = r.NotFoundHandler - match.MatchErr = ErrNotFound - return true - } - - match.MatchErr = ErrNotFound - return false -} - -// ServeHTTP dispatches the handler registered in the matched route. -// -// When there is a match, the route variables can be retrieved calling -// mux.Vars(request). -func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { - if !r.skipClean { - path := req.URL.Path - if r.useEncodedPath { - path = req.URL.EscapedPath() - } - // Clean path to canonical form and redirect. - if p := cleanPath(path); p != path { - - // Added 3 lines (Philip Schlump) - It was dropping the query string and #whatever from query. - // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue: - // http://code.google.com/p/go/issues/detail?id=5252 - url := *req.URL - url.Path = p - p = url.String() - - w.Header().Set("Location", p) - w.WriteHeader(http.StatusMovedPermanently) - return - } - } - var match RouteMatch - var handler http.Handler - if r.Match(req, &match) { - handler = match.Handler - req = setVars(req, match.Vars) - req = setCurrentRoute(req, match.Route) - } - - if handler == nil && match.MatchErr == ErrMethodMismatch { - handler = methodNotAllowedHandler() - } - - if handler == nil { - handler = http.NotFoundHandler() - } - - handler.ServeHTTP(w, req) -} - -// Get returns a route registered with the given name. -func (r *Router) Get(name string) *Route { - return r.namedRoutes[name] -} - -// GetRoute returns a route registered with the given name. This method -// was renamed to Get() and remains here for backwards compatibility. -func (r *Router) GetRoute(name string) *Route { - return r.namedRoutes[name] -} - -// StrictSlash defines the trailing slash behavior for new routes. The initial -// value is false. -// -// When true, if the route path is "/path/", accessing "/path" will perform a redirect -// to the former and vice versa. In other words, your application will always -// see the path as specified in the route. -// -// When false, if the route path is "/path", accessing "/path/" will not match -// this route and vice versa. -// -// The re-direct is a HTTP 301 (Moved Permanently). Note that when this is set for -// routes with a non-idempotent method (e.g. POST, PUT), the subsequent re-directed -// request will be made as a GET by most clients. Use middleware or client settings -// to modify this behaviour as needed. -// -// Special case: when a route sets a path prefix using the PathPrefix() method, -// strict slash is ignored for that route because the redirect behavior can't -// be determined from a prefix alone. However, any subrouters created from that -// route inherit the original StrictSlash setting. -func (r *Router) StrictSlash(value bool) *Router { - r.strictSlash = value - return r -} - -// SkipClean defines the path cleaning behaviour for new routes. The initial -// value is false. Users should be careful about which routes are not cleaned -// -// When true, if the route path is "/path//to", it will remain with the double -// slash. This is helpful if you have a route like: /fetch/http://xkcd.com/534/ -// -// When false, the path will be cleaned, so /fetch/http://xkcd.com/534/ will -// become /fetch/http/xkcd.com/534 -func (r *Router) SkipClean(value bool) *Router { - r.skipClean = value - return r -} - -// UseEncodedPath tells the router to match the encoded original path -// to the routes. -// For eg. "/path/foo%2Fbar/to" will match the path "/path/{var}/to". -// -// If not called, the router will match the unencoded path to the routes. -// For eg. "/path/foo%2Fbar/to" will match the path "/path/foo/bar/to" -func (r *Router) UseEncodedPath() *Router { - r.useEncodedPath = true - return r -} - -// ---------------------------------------------------------------------------- -// Route factories -// ---------------------------------------------------------------------------- - -// NewRoute registers an empty route. -func (r *Router) NewRoute() *Route { - // initialize a route with a copy of the parent router's configuration - route := &Route{routeConf: copyRouteConf(r.routeConf), namedRoutes: r.namedRoutes} - r.routes = append(r.routes, route) - return route -} - -// Name registers a new route with a name. -// See Route.Name(). -func (r *Router) Name(name string) *Route { - return r.NewRoute().Name(name) -} - -// Handle registers a new route with a matcher for the URL path. -// See Route.Path() and Route.Handler(). -func (r *Router) Handle(path string, handler http.Handler) *Route { - return r.NewRoute().Path(path).Handler(handler) -} - -// HandleFunc registers a new route with a matcher for the URL path. -// See Route.Path() and Route.HandlerFunc(). -func (r *Router) HandleFunc(path string, f func(http.ResponseWriter, - *http.Request)) *Route { - return r.NewRoute().Path(path).HandlerFunc(f) -} - -// Headers registers a new route with a matcher for request header values. -// See Route.Headers(). -func (r *Router) Headers(pairs ...string) *Route { - return r.NewRoute().Headers(pairs...) -} - -// Host registers a new route with a matcher for the URL host. -// See Route.Host(). -func (r *Router) Host(tpl string) *Route { - return r.NewRoute().Host(tpl) -} - -// MatcherFunc registers a new route with a custom matcher function. -// See Route.MatcherFunc(). -func (r *Router) MatcherFunc(f MatcherFunc) *Route { - return r.NewRoute().MatcherFunc(f) -} - -// Methods registers a new route with a matcher for HTTP methods. -// See Route.Methods(). -func (r *Router) Methods(methods ...string) *Route { - return r.NewRoute().Methods(methods...) -} - -// Path registers a new route with a matcher for the URL path. -// See Route.Path(). -func (r *Router) Path(tpl string) *Route { - return r.NewRoute().Path(tpl) -} - -// PathPrefix registers a new route with a matcher for the URL path prefix. -// See Route.PathPrefix(). -func (r *Router) PathPrefix(tpl string) *Route { - return r.NewRoute().PathPrefix(tpl) -} - -// Queries registers a new route with a matcher for URL query values. -// See Route.Queries(). -func (r *Router) Queries(pairs ...string) *Route { - return r.NewRoute().Queries(pairs...) -} - -// Schemes registers a new route with a matcher for URL schemes. -// See Route.Schemes(). -func (r *Router) Schemes(schemes ...string) *Route { - return r.NewRoute().Schemes(schemes...) -} - -// BuildVarsFunc registers a new route with a custom function for modifying -// route variables before building a URL. -func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route { - return r.NewRoute().BuildVarsFunc(f) -} - -// Walk walks the router and all its sub-routers, calling walkFn for each route -// in the tree. The routes are walked in the order they were added. Sub-routers -// are explored depth-first. -func (r *Router) Walk(walkFn WalkFunc) error { - return r.walk(walkFn, []*Route{}) -} - -// SkipRouter is used as a return value from WalkFuncs to indicate that the -// router that walk is about to descend down to should be skipped. -var SkipRouter = errors.New("skip this router") - -// WalkFunc is the type of the function called for each route visited by Walk. -// At every invocation, it is given the current route, and the current router, -// and a list of ancestor routes that lead to the current route. -type WalkFunc func(route *Route, router *Router, ancestors []*Route) error - -func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error { - for _, t := range r.routes { - err := walkFn(t, r, ancestors) - if err == SkipRouter { - continue - } - if err != nil { - return err - } - for _, sr := range t.matchers { - if h, ok := sr.(*Router); ok { - ancestors = append(ancestors, t) - err := h.walk(walkFn, ancestors) - if err != nil { - return err - } - ancestors = ancestors[:len(ancestors)-1] - } - } - if h, ok := t.handler.(*Router); ok { - ancestors = append(ancestors, t) - err := h.walk(walkFn, ancestors) - if err != nil { - return err - } - ancestors = ancestors[:len(ancestors)-1] - } - } - return nil -} - -// ---------------------------------------------------------------------------- -// Context -// ---------------------------------------------------------------------------- - -// RouteMatch stores information about a matched route. -type RouteMatch struct { - Route *Route - Handler http.Handler - Vars map[string]string - - // MatchErr is set to appropriate matching error - // It is set to ErrMethodMismatch if there is a mismatch in - // the request method and route method - MatchErr error -} - -type contextKey int - -const ( - varsKey contextKey = iota - routeKey -) - -// Vars returns the route variables for the current request, if any. -func Vars(r *http.Request) map[string]string { - if rv := contextGet(r, varsKey); rv != nil { - return rv.(map[string]string) - } - return nil -} - -// CurrentRoute returns the matched route for the current request, if any. -// This only works when called inside the handler of the matched route -// because the matched route is stored in the request context which is cleared -// after the handler returns, unless the KeepContext option is set on the -// Router. -func CurrentRoute(r *http.Request) *Route { - if rv := contextGet(r, routeKey); rv != nil { - return rv.(*Route) - } - return nil -} - -func setVars(r *http.Request, val interface{}) *http.Request { - return contextSet(r, varsKey, val) -} - -func setCurrentRoute(r *http.Request, val interface{}) *http.Request { - return contextSet(r, routeKey, val) -} - -// ---------------------------------------------------------------------------- -// Helpers -// ---------------------------------------------------------------------------- - -// cleanPath returns the canonical path for p, eliminating . and .. elements. -// Borrowed from the net/http package. -func cleanPath(p string) string { - if p == "" { - return "/" - } - if p[0] != '/' { - p = "/" + p - } - np := path.Clean(p) - // path.Clean removes trailing slash except for root; - // put the trailing slash back if necessary. - if p[len(p)-1] == '/' && np != "/" { - np += "/" - } - - return np -} - -// uniqueVars returns an error if two slices contain duplicated strings. -func uniqueVars(s1, s2 []string) error { - for _, v1 := range s1 { - for _, v2 := range s2 { - if v1 == v2 { - return fmt.Errorf("mux: duplicated route variable %q", v2) - } - } - } - return nil -} - -// checkPairs returns the count of strings passed in, and an error if -// the count is not an even number. -func checkPairs(pairs ...string) (int, error) { - length := len(pairs) - if length%2 != 0 { - return length, fmt.Errorf( - "mux: number of parameters must be multiple of 2, got %v", pairs) - } - return length, nil -} - -// mapFromPairsToString converts variadic string parameters to a -// string to string map. -func mapFromPairsToString(pairs ...string) (map[string]string, error) { - length, err := checkPairs(pairs...) - if err != nil { - return nil, err - } - m := make(map[string]string, length/2) - for i := 0; i < length; i += 2 { - m[pairs[i]] = pairs[i+1] - } - return m, nil -} - -// mapFromPairsToRegex converts variadic string parameters to a -// string to regex map. -func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) { - length, err := checkPairs(pairs...) - if err != nil { - return nil, err - } - m := make(map[string]*regexp.Regexp, length/2) - for i := 0; i < length; i += 2 { - regex, err := regexp.Compile(pairs[i+1]) - if err != nil { - return nil, err - } - m[pairs[i]] = regex - } - return m, nil -} - -// matchInArray returns true if the given string value is in the array. -func matchInArray(arr []string, value string) bool { - for _, v := range arr { - if v == value { - return true - } - } - return false -} - -// matchMapWithString returns true if the given key/value pairs exist in a given map. -func matchMapWithString(toCheck map[string]string, toMatch map[string][]string, canonicalKey bool) bool { - for k, v := range toCheck { - // Check if key exists. - if canonicalKey { - k = http.CanonicalHeaderKey(k) - } - if values := toMatch[k]; values == nil { - return false - } else if v != "" { - // If value was defined as an empty string we only check that the - // key exists. Otherwise we also check for equality. - valueExists := false - for _, value := range values { - if v == value { - valueExists = true - break - } - } - if !valueExists { - return false - } - } - } - return true -} - -// matchMapWithRegex returns true if the given key/value pairs exist in a given map compiled against -// the given regex -func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]string, canonicalKey bool) bool { - for k, v := range toCheck { - // Check if key exists. - if canonicalKey { - k = http.CanonicalHeaderKey(k) - } - if values := toMatch[k]; values == nil { - return false - } else if v != nil { - // If value was defined as an empty string we only check that the - // key exists. Otherwise we also check for equality. - valueExists := false - for _, value := range values { - if v.MatchString(value) { - valueExists = true - break - } - } - if !valueExists { - return false - } - } - } - return true -} - -// methodNotAllowed replies to the request with an HTTP status code 405. -func methodNotAllowed(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusMethodNotAllowed) -} - -// methodNotAllowedHandler returns a simple request handler -// that replies to each request with a status code 405. -func methodNotAllowedHandler() http.Handler { return http.HandlerFunc(methodNotAllowed) } diff --git a/vendor/github.com/gorilla/mux/mux_test.go b/vendor/github.com/gorilla/mux/mux_test.go deleted file mode 100644 index 34c00dd25ad4..000000000000 --- a/vendor/github.com/gorilla/mux/mux_test.go +++ /dev/null @@ -1,2846 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "net/http" - "net/url" - "reflect" - "strings" - "testing" -) - -func (r *Route) GoString() string { - matchers := make([]string, len(r.matchers)) - for i, m := range r.matchers { - matchers[i] = fmt.Sprintf("%#v", m) - } - return fmt.Sprintf("&Route{matchers:[]matcher{%s}}", strings.Join(matchers, ", ")) -} - -func (r *routeRegexp) GoString() string { - return fmt.Sprintf("&routeRegexp{template: %q, regexpType: %v, options: %v, regexp: regexp.MustCompile(%q), reverse: %q, varsN: %v, varsR: %v", r.template, r.regexpType, r.options, r.regexp.String(), r.reverse, r.varsN, r.varsR) -} - -type routeTest struct { - title string // title of the test - route *Route // the route being tested - request *http.Request // a request to test the route - vars map[string]string // the expected vars of the match - scheme string // the expected scheme of the built URL - host string // the expected host of the built URL - path string // the expected path of the built URL - query string // the expected query string of the built URL - pathTemplate string // the expected path template of the route - hostTemplate string // the expected host template of the route - queriesTemplate string // the expected query template of the route - methods []string // the expected route methods - pathRegexp string // the expected path regexp - queriesRegexp string // the expected query regexp - shouldMatch bool // whether the request is expected to match the route at all - shouldRedirect bool // whether the request should result in a redirect -} - -func TestHost(t *testing.T) { - - tests := []routeTest{ - { - title: "Host route match", - route: new(Route).Host("aaa.bbb.ccc"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: true, - }, - { - title: "Host route, wrong host in request URL", - route: new(Route).Host("aaa.bbb.ccc"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: false, - }, - { - title: "Host route with port, match", - route: new(Route).Host("aaa.bbb.ccc:1234"), - request: newRequest("GET", "http://aaa.bbb.ccc:1234/111/222/333"), - vars: map[string]string{}, - host: "aaa.bbb.ccc:1234", - path: "", - shouldMatch: true, - }, - { - title: "Host route with port, wrong port in request URL", - route: new(Route).Host("aaa.bbb.ccc:1234"), - request: newRequest("GET", "http://aaa.bbb.ccc:9999/111/222/333"), - vars: map[string]string{}, - host: "aaa.bbb.ccc:1234", - path: "", - shouldMatch: false, - }, - { - title: "Host route, match with host in request header", - route: new(Route).Host("aaa.bbb.ccc"), - request: newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc"), - vars: map[string]string{}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: true, - }, - { - title: "Host route, wrong host in request header", - route: new(Route).Host("aaa.bbb.ccc"), - request: newRequestHost("GET", "/111/222/333", "aaa.222.ccc"), - vars: map[string]string{}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: false, - }, - { - title: "Host route with port, match with request header", - route: new(Route).Host("aaa.bbb.ccc:1234"), - request: newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc:1234"), - vars: map[string]string{}, - host: "aaa.bbb.ccc:1234", - path: "", - shouldMatch: true, - }, - { - title: "Host route with port, wrong host in request header", - route: new(Route).Host("aaa.bbb.ccc:1234"), - request: newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc:9999"), - vars: map[string]string{}, - host: "aaa.bbb.ccc:1234", - path: "", - shouldMatch: false, - }, - { - title: "Host route with pattern, match with request header", - route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc:1{v2:(?:23|4)}"), - request: newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc:123"), - vars: map[string]string{"v1": "bbb", "v2": "23"}, - host: "aaa.bbb.ccc:123", - path: "", - hostTemplate: `aaa.{v1:[a-z]{3}}.ccc:1{v2:(?:23|4)}`, - shouldMatch: true, - }, - { - title: "Host route with pattern, match", - route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v1": "bbb"}, - host: "aaa.bbb.ccc", - path: "", - hostTemplate: `aaa.{v1:[a-z]{3}}.ccc`, - shouldMatch: true, - }, - { - title: "Host route with pattern, additional capturing group, match", - route: new(Route).Host("aaa.{v1:[a-z]{2}(?:b|c)}.ccc"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v1": "bbb"}, - host: "aaa.bbb.ccc", - path: "", - hostTemplate: `aaa.{v1:[a-z]{2}(?:b|c)}.ccc`, - shouldMatch: true, - }, - { - title: "Host route with pattern, wrong host in request URL", - route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{"v1": "bbb"}, - host: "aaa.bbb.ccc", - path: "", - hostTemplate: `aaa.{v1:[a-z]{3}}.ccc`, - shouldMatch: false, - }, - { - title: "Host route with multiple patterns, match", - route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc"}, - host: "aaa.bbb.ccc", - path: "", - hostTemplate: `{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}`, - shouldMatch: true, - }, - { - title: "Host route with multiple patterns, wrong host in request URL", - route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc"}, - host: "aaa.bbb.ccc", - path: "", - hostTemplate: `{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}`, - shouldMatch: false, - }, - { - title: "Host route with hyphenated name and pattern, match", - route: new(Route).Host("aaa.{v-1:[a-z]{3}}.ccc"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v-1": "bbb"}, - host: "aaa.bbb.ccc", - path: "", - hostTemplate: `aaa.{v-1:[a-z]{3}}.ccc`, - shouldMatch: true, - }, - { - title: "Host route with hyphenated name and pattern, additional capturing group, match", - route: new(Route).Host("aaa.{v-1:[a-z]{2}(?:b|c)}.ccc"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v-1": "bbb"}, - host: "aaa.bbb.ccc", - path: "", - hostTemplate: `aaa.{v-1:[a-z]{2}(?:b|c)}.ccc`, - shouldMatch: true, - }, - { - title: "Host route with multiple hyphenated names and patterns, match", - route: new(Route).Host("{v-1:[a-z]{3}}.{v-2:[a-z]{3}}.{v-3:[a-z]{3}}"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v-1": "aaa", "v-2": "bbb", "v-3": "ccc"}, - host: "aaa.bbb.ccc", - path: "", - hostTemplate: `{v-1:[a-z]{3}}.{v-2:[a-z]{3}}.{v-3:[a-z]{3}}`, - shouldMatch: true, - }, - } - for _, test := range tests { - t.Run(test.title, func(t *testing.T) { - testRoute(t, test) - testTemplate(t, test) - }) - } -} - -func TestPath(t *testing.T) { - tests := []routeTest{ - { - title: "Path route, match", - route: new(Route).Path("/111/222/333"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{}, - host: "", - path: "/111/222/333", - shouldMatch: true, - }, - { - title: "Path route, match with trailing slash in request and path", - route: new(Route).Path("/111/"), - request: newRequest("GET", "http://localhost/111/"), - vars: map[string]string{}, - host: "", - path: "/111/", - shouldMatch: true, - }, - { - title: "Path route, do not match with trailing slash in path", - route: new(Route).Path("/111/"), - request: newRequest("GET", "http://localhost/111"), - vars: map[string]string{}, - host: "", - path: "/111", - pathTemplate: `/111/`, - pathRegexp: `^/111/$`, - shouldMatch: false, - }, - { - title: "Path route, do not match with trailing slash in request", - route: new(Route).Path("/111"), - request: newRequest("GET", "http://localhost/111/"), - vars: map[string]string{}, - host: "", - path: "/111/", - pathTemplate: `/111`, - shouldMatch: false, - }, - { - title: "Path route, match root with no host", - route: new(Route).Path("/"), - request: newRequest("GET", "/"), - vars: map[string]string{}, - host: "", - path: "/", - pathTemplate: `/`, - pathRegexp: `^/$`, - shouldMatch: true, - }, - { - title: "Path route, match root with no host, App Engine format", - route: new(Route).Path("/"), - request: func() *http.Request { - r := newRequest("GET", "http://localhost/") - r.RequestURI = "/" - return r - }(), - vars: map[string]string{}, - host: "", - path: "/", - pathTemplate: `/`, - shouldMatch: true, - }, - { - title: "Path route, wrong path in request in request URL", - route: new(Route).Path("/111/222/333"), - request: newRequest("GET", "http://localhost/1/2/3"), - vars: map[string]string{}, - host: "", - path: "/111/222/333", - shouldMatch: false, - }, - { - title: "Path route with pattern, match", - route: new(Route).Path("/111/{v1:[0-9]{3}}/333"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{"v1": "222"}, - host: "", - path: "/111/222/333", - pathTemplate: `/111/{v1:[0-9]{3}}/333`, - shouldMatch: true, - }, - { - title: "Path route with pattern, URL in request does not match", - route: new(Route).Path("/111/{v1:[0-9]{3}}/333"), - request: newRequest("GET", "http://localhost/111/aaa/333"), - vars: map[string]string{"v1": "222"}, - host: "", - path: "/111/222/333", - pathTemplate: `/111/{v1:[0-9]{3}}/333`, - pathRegexp: `^/111/(?P[0-9]{3})/333$`, - shouldMatch: false, - }, - { - title: "Path route with multiple patterns, match", - route: new(Route).Path("/{v1:[0-9]{3}}/{v2:[0-9]{3}}/{v3:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{"v1": "111", "v2": "222", "v3": "333"}, - host: "", - path: "/111/222/333", - pathTemplate: `/{v1:[0-9]{3}}/{v2:[0-9]{3}}/{v3:[0-9]{3}}`, - pathRegexp: `^/(?P[0-9]{3})/(?P[0-9]{3})/(?P[0-9]{3})$`, - shouldMatch: true, - }, - { - title: "Path route with multiple patterns, URL in request does not match", - route: new(Route).Path("/{v1:[0-9]{3}}/{v2:[0-9]{3}}/{v3:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/aaa/333"), - vars: map[string]string{"v1": "111", "v2": "222", "v3": "333"}, - host: "", - path: "/111/222/333", - pathTemplate: `/{v1:[0-9]{3}}/{v2:[0-9]{3}}/{v3:[0-9]{3}}`, - pathRegexp: `^/(?P[0-9]{3})/(?P[0-9]{3})/(?P[0-9]{3})$`, - shouldMatch: false, - }, - { - title: "Path route with multiple patterns with pipe, match", - route: new(Route).Path("/{category:a|(?:b/c)}/{product}/{id:[0-9]+}"), - request: newRequest("GET", "http://localhost/a/product_name/1"), - vars: map[string]string{"category": "a", "product": "product_name", "id": "1"}, - host: "", - path: "/a/product_name/1", - pathTemplate: `/{category:a|(?:b/c)}/{product}/{id:[0-9]+}`, - pathRegexp: `^/(?Pa|(?:b/c))/(?P[^/]+)/(?P[0-9]+)$`, - shouldMatch: true, - }, - { - title: "Path route with hyphenated name and pattern, match", - route: new(Route).Path("/111/{v-1:[0-9]{3}}/333"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{"v-1": "222"}, - host: "", - path: "/111/222/333", - pathTemplate: `/111/{v-1:[0-9]{3}}/333`, - pathRegexp: `^/111/(?P[0-9]{3})/333$`, - shouldMatch: true, - }, - { - title: "Path route with multiple hyphenated names and patterns, match", - route: new(Route).Path("/{v-1:[0-9]{3}}/{v-2:[0-9]{3}}/{v-3:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{"v-1": "111", "v-2": "222", "v-3": "333"}, - host: "", - path: "/111/222/333", - pathTemplate: `/{v-1:[0-9]{3}}/{v-2:[0-9]{3}}/{v-3:[0-9]{3}}`, - pathRegexp: `^/(?P[0-9]{3})/(?P[0-9]{3})/(?P[0-9]{3})$`, - shouldMatch: true, - }, - { - title: "Path route with multiple hyphenated names and patterns with pipe, match", - route: new(Route).Path("/{product-category:a|(?:b/c)}/{product-name}/{product-id:[0-9]+}"), - request: newRequest("GET", "http://localhost/a/product_name/1"), - vars: map[string]string{"product-category": "a", "product-name": "product_name", "product-id": "1"}, - host: "", - path: "/a/product_name/1", - pathTemplate: `/{product-category:a|(?:b/c)}/{product-name}/{product-id:[0-9]+}`, - pathRegexp: `^/(?Pa|(?:b/c))/(?P[^/]+)/(?P[0-9]+)$`, - shouldMatch: true, - }, - { - title: "Path route with multiple hyphenated names and patterns with pipe and case insensitive, match", - route: new(Route).Path("/{type:(?i:daily|mini|variety)}-{date:\\d{4,4}-\\d{2,2}-\\d{2,2}}"), - request: newRequest("GET", "http://localhost/daily-2016-01-01"), - vars: map[string]string{"type": "daily", "date": "2016-01-01"}, - host: "", - path: "/daily-2016-01-01", - pathTemplate: `/{type:(?i:daily|mini|variety)}-{date:\d{4,4}-\d{2,2}-\d{2,2}}`, - pathRegexp: `^/(?P(?i:daily|mini|variety))-(?P\d{4,4}-\d{2,2}-\d{2,2})$`, - shouldMatch: true, - }, - { - title: "Path route with empty match right after other match", - route: new(Route).Path(`/{v1:[0-9]*}{v2:[a-z]*}/{v3:[0-9]*}`), - request: newRequest("GET", "http://localhost/111/222"), - vars: map[string]string{"v1": "111", "v2": "", "v3": "222"}, - host: "", - path: "/111/222", - pathTemplate: `/{v1:[0-9]*}{v2:[a-z]*}/{v3:[0-9]*}`, - pathRegexp: `^/(?P[0-9]*)(?P[a-z]*)/(?P[0-9]*)$`, - shouldMatch: true, - }, - { - title: "Path route with single pattern with pipe, match", - route: new(Route).Path("/{category:a|b/c}"), - request: newRequest("GET", "http://localhost/a"), - vars: map[string]string{"category": "a"}, - host: "", - path: "/a", - pathTemplate: `/{category:a|b/c}`, - shouldMatch: true, - }, - { - title: "Path route with single pattern with pipe, match", - route: new(Route).Path("/{category:a|b/c}"), - request: newRequest("GET", "http://localhost/b/c"), - vars: map[string]string{"category": "b/c"}, - host: "", - path: "/b/c", - pathTemplate: `/{category:a|b/c}`, - shouldMatch: true, - }, - { - title: "Path route with multiple patterns with pipe, match", - route: new(Route).Path("/{category:a|b/c}/{product}/{id:[0-9]+}"), - request: newRequest("GET", "http://localhost/a/product_name/1"), - vars: map[string]string{"category": "a", "product": "product_name", "id": "1"}, - host: "", - path: "/a/product_name/1", - pathTemplate: `/{category:a|b/c}/{product}/{id:[0-9]+}`, - shouldMatch: true, - }, - { - title: "Path route with multiple patterns with pipe, match", - route: new(Route).Path("/{category:a|b/c}/{product}/{id:[0-9]+}"), - request: newRequest("GET", "http://localhost/b/c/product_name/1"), - vars: map[string]string{"category": "b/c", "product": "product_name", "id": "1"}, - host: "", - path: "/b/c/product_name/1", - pathTemplate: `/{category:a|b/c}/{product}/{id:[0-9]+}`, - shouldMatch: true, - }, - } - - for _, test := range tests { - t.Run(test.title, func(t *testing.T) { - testRoute(t, test) - testTemplate(t, test) - testUseEscapedRoute(t, test) - testRegexp(t, test) - }) - } -} - -func TestPathPrefix(t *testing.T) { - tests := []routeTest{ - { - title: "PathPrefix route, match", - route: new(Route).PathPrefix("/111"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{}, - host: "", - path: "/111", - shouldMatch: true, - }, - { - title: "PathPrefix route, match substring", - route: new(Route).PathPrefix("/1"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{}, - host: "", - path: "/1", - shouldMatch: true, - }, - { - title: "PathPrefix route, URL prefix in request does not match", - route: new(Route).PathPrefix("/111"), - request: newRequest("GET", "http://localhost/1/2/3"), - vars: map[string]string{}, - host: "", - path: "/111", - shouldMatch: false, - }, - { - title: "PathPrefix route with pattern, match", - route: new(Route).PathPrefix("/111/{v1:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{"v1": "222"}, - host: "", - path: "/111/222", - pathTemplate: `/111/{v1:[0-9]{3}}`, - shouldMatch: true, - }, - { - title: "PathPrefix route with pattern, URL prefix in request does not match", - route: new(Route).PathPrefix("/111/{v1:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/aaa/333"), - vars: map[string]string{"v1": "222"}, - host: "", - path: "/111/222", - pathTemplate: `/111/{v1:[0-9]{3}}`, - shouldMatch: false, - }, - { - title: "PathPrefix route with multiple patterns, match", - route: new(Route).PathPrefix("/{v1:[0-9]{3}}/{v2:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{"v1": "111", "v2": "222"}, - host: "", - path: "/111/222", - pathTemplate: `/{v1:[0-9]{3}}/{v2:[0-9]{3}}`, - shouldMatch: true, - }, - { - title: "PathPrefix route with multiple patterns, URL prefix in request does not match", - route: new(Route).PathPrefix("/{v1:[0-9]{3}}/{v2:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/aaa/333"), - vars: map[string]string{"v1": "111", "v2": "222"}, - host: "", - path: "/111/222", - pathTemplate: `/{v1:[0-9]{3}}/{v2:[0-9]{3}}`, - shouldMatch: false, - }, - } - - for _, test := range tests { - t.Run(test.title, func(t *testing.T) { - testRoute(t, test) - testTemplate(t, test) - testUseEscapedRoute(t, test) - }) - } -} - -func TestSchemeHostPath(t *testing.T) { - tests := []routeTest{ - { - title: "Host and Path route, match", - route: new(Route).Host("aaa.bbb.ccc").Path("/111/222/333"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{}, - scheme: "http", - host: "aaa.bbb.ccc", - path: "/111/222/333", - pathTemplate: `/111/222/333`, - hostTemplate: `aaa.bbb.ccc`, - shouldMatch: true, - }, - { - title: "Scheme, Host, and Path route, match", - route: new(Route).Schemes("https").Host("aaa.bbb.ccc").Path("/111/222/333"), - request: newRequest("GET", "https://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{}, - scheme: "https", - host: "aaa.bbb.ccc", - path: "/111/222/333", - pathTemplate: `/111/222/333`, - hostTemplate: `aaa.bbb.ccc`, - shouldMatch: true, - }, - { - title: "Host and Path route, wrong host in request URL", - route: new(Route).Host("aaa.bbb.ccc").Path("/111/222/333"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{}, - scheme: "http", - host: "aaa.bbb.ccc", - path: "/111/222/333", - pathTemplate: `/111/222/333`, - hostTemplate: `aaa.bbb.ccc`, - shouldMatch: false, - }, - { - title: "Host and Path route with pattern, match", - route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc").Path("/111/{v2:[0-9]{3}}/333"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v1": "bbb", "v2": "222"}, - scheme: "http", - host: "aaa.bbb.ccc", - path: "/111/222/333", - pathTemplate: `/111/{v2:[0-9]{3}}/333`, - hostTemplate: `aaa.{v1:[a-z]{3}}.ccc`, - shouldMatch: true, - }, - { - title: "Scheme, Host, and Path route with host and path patterns, match", - route: new(Route).Schemes("ftp", "ssss").Host("aaa.{v1:[a-z]{3}}.ccc").Path("/111/{v2:[0-9]{3}}/333"), - request: newRequest("GET", "ssss://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v1": "bbb", "v2": "222"}, - scheme: "ftp", - host: "aaa.bbb.ccc", - path: "/111/222/333", - pathTemplate: `/111/{v2:[0-9]{3}}/333`, - hostTemplate: `aaa.{v1:[a-z]{3}}.ccc`, - shouldMatch: true, - }, - { - title: "Host and Path route with pattern, URL in request does not match", - route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc").Path("/111/{v2:[0-9]{3}}/333"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{"v1": "bbb", "v2": "222"}, - scheme: "http", - host: "aaa.bbb.ccc", - path: "/111/222/333", - pathTemplate: `/111/{v2:[0-9]{3}}/333`, - hostTemplate: `aaa.{v1:[a-z]{3}}.ccc`, - shouldMatch: false, - }, - { - title: "Host and Path route with multiple patterns, match", - route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}").Path("/{v4:[0-9]{3}}/{v5:[0-9]{3}}/{v6:[0-9]{3}}"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc", "v4": "111", "v5": "222", "v6": "333"}, - scheme: "http", - host: "aaa.bbb.ccc", - path: "/111/222/333", - pathTemplate: `/{v4:[0-9]{3}}/{v5:[0-9]{3}}/{v6:[0-9]{3}}`, - hostTemplate: `{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}`, - shouldMatch: true, - }, - { - title: "Host and Path route with multiple patterns, URL in request does not match", - route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}").Path("/{v4:[0-9]{3}}/{v5:[0-9]{3}}/{v6:[0-9]{3}}"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc", "v4": "111", "v5": "222", "v6": "333"}, - scheme: "http", - host: "aaa.bbb.ccc", - path: "/111/222/333", - pathTemplate: `/{v4:[0-9]{3}}/{v5:[0-9]{3}}/{v6:[0-9]{3}}`, - hostTemplate: `{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}`, - shouldMatch: false, - }, - } - - for _, test := range tests { - t.Run(test.title, func(t *testing.T) { - testRoute(t, test) - testTemplate(t, test) - testUseEscapedRoute(t, test) - }) - } -} - -func TestHeaders(t *testing.T) { - // newRequestHeaders creates a new request with a method, url, and headers - newRequestHeaders := func(method, url string, headers map[string]string) *http.Request { - req, err := http.NewRequest(method, url, nil) - if err != nil { - panic(err) - } - for k, v := range headers { - req.Header.Add(k, v) - } - return req - } - - tests := []routeTest{ - { - title: "Headers route, match", - route: new(Route).Headers("foo", "bar", "baz", "ding"), - request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "bar", "baz": "ding"}), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Headers route, bad header values", - route: new(Route).Headers("foo", "bar", "baz", "ding"), - request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "bar", "baz": "dong"}), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - { - title: "Headers route, regex header values to match", - route: new(Route).Headers("foo", "ba[zr]"), - request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "bar"}), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - { - title: "Headers route, regex header values to match", - route: new(Route).HeadersRegexp("foo", "ba[zr]"), - request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "baz"}), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - } - - for _, test := range tests { - t.Run(test.title, func(t *testing.T) { - testRoute(t, test) - testTemplate(t, test) - }) - } -} - -func TestMethods(t *testing.T) { - tests := []routeTest{ - { - title: "Methods route, match GET", - route: new(Route).Methods("GET", "POST"), - request: newRequest("GET", "http://localhost"), - vars: map[string]string{}, - host: "", - path: "", - methods: []string{"GET", "POST"}, - shouldMatch: true, - }, - { - title: "Methods route, match POST", - route: new(Route).Methods("GET", "POST"), - request: newRequest("POST", "http://localhost"), - vars: map[string]string{}, - host: "", - path: "", - methods: []string{"GET", "POST"}, - shouldMatch: true, - }, - { - title: "Methods route, bad method", - route: new(Route).Methods("GET", "POST"), - request: newRequest("PUT", "http://localhost"), - vars: map[string]string{}, - host: "", - path: "", - methods: []string{"GET", "POST"}, - shouldMatch: false, - }, - { - title: "Route without methods", - route: new(Route), - request: newRequest("PUT", "http://localhost"), - vars: map[string]string{}, - host: "", - path: "", - methods: []string{}, - shouldMatch: true, - }, - } - - for _, test := range tests { - t.Run(test.title, func(t *testing.T) { - testRoute(t, test) - testTemplate(t, test) - testMethods(t, test) - }) - } -} - -func TestQueries(t *testing.T) { - tests := []routeTest{ - { - title: "Queries route, match", - route: new(Route).Queries("foo", "bar", "baz", "ding"), - request: newRequest("GET", "http://localhost?foo=bar&baz=ding"), - vars: map[string]string{}, - host: "", - path: "", - query: "foo=bar&baz=ding", - queriesTemplate: "foo=bar,baz=ding", - queriesRegexp: "^foo=bar$,^baz=ding$", - shouldMatch: true, - }, - { - title: "Queries route, match with a query string", - route: new(Route).Host("www.example.com").Path("/api").Queries("foo", "bar", "baz", "ding"), - request: newRequest("GET", "http://www.example.com/api?foo=bar&baz=ding"), - vars: map[string]string{}, - host: "", - path: "", - query: "foo=bar&baz=ding", - pathTemplate: `/api`, - hostTemplate: `www.example.com`, - queriesTemplate: "foo=bar,baz=ding", - queriesRegexp: "^foo=bar$,^baz=ding$", - shouldMatch: true, - }, - { - title: "Queries route, match with a query string out of order", - route: new(Route).Host("www.example.com").Path("/api").Queries("foo", "bar", "baz", "ding"), - request: newRequest("GET", "http://www.example.com/api?baz=ding&foo=bar"), - vars: map[string]string{}, - host: "", - path: "", - query: "foo=bar&baz=ding", - pathTemplate: `/api`, - hostTemplate: `www.example.com`, - queriesTemplate: "foo=bar,baz=ding", - queriesRegexp: "^foo=bar$,^baz=ding$", - shouldMatch: true, - }, - { - title: "Queries route, bad query", - route: new(Route).Queries("foo", "bar", "baz", "ding"), - request: newRequest("GET", "http://localhost?foo=bar&baz=dong"), - vars: map[string]string{}, - host: "", - path: "", - queriesTemplate: "foo=bar,baz=ding", - queriesRegexp: "^foo=bar$,^baz=ding$", - shouldMatch: false, - }, - { - title: "Queries route with pattern, match", - route: new(Route).Queries("foo", "{v1}"), - request: newRequest("GET", "http://localhost?foo=bar"), - vars: map[string]string{"v1": "bar"}, - host: "", - path: "", - query: "foo=bar", - queriesTemplate: "foo={v1}", - queriesRegexp: "^foo=(?P.*)$", - shouldMatch: true, - }, - { - title: "Queries route with multiple patterns, match", - route: new(Route).Queries("foo", "{v1}", "baz", "{v2}"), - request: newRequest("GET", "http://localhost?foo=bar&baz=ding"), - vars: map[string]string{"v1": "bar", "v2": "ding"}, - host: "", - path: "", - query: "foo=bar&baz=ding", - queriesTemplate: "foo={v1},baz={v2}", - queriesRegexp: "^foo=(?P.*)$,^baz=(?P.*)$", - shouldMatch: true, - }, - { - title: "Queries route with regexp pattern, match", - route: new(Route).Queries("foo", "{v1:[0-9]+}"), - request: newRequest("GET", "http://localhost?foo=10"), - vars: map[string]string{"v1": "10"}, - host: "", - path: "", - query: "foo=10", - queriesTemplate: "foo={v1:[0-9]+}", - queriesRegexp: "^foo=(?P[0-9]+)$", - shouldMatch: true, - }, - { - title: "Queries route with regexp pattern, regexp does not match", - route: new(Route).Queries("foo", "{v1:[0-9]+}"), - request: newRequest("GET", "http://localhost?foo=a"), - vars: map[string]string{}, - host: "", - path: "", - queriesTemplate: "foo={v1:[0-9]+}", - queriesRegexp: "^foo=(?P[0-9]+)$", - shouldMatch: false, - }, - { - title: "Queries route with regexp pattern with quantifier, match", - route: new(Route).Queries("foo", "{v1:[0-9]{1}}"), - request: newRequest("GET", "http://localhost?foo=1"), - vars: map[string]string{"v1": "1"}, - host: "", - path: "", - query: "foo=1", - queriesTemplate: "foo={v1:[0-9]{1}}", - queriesRegexp: "^foo=(?P[0-9]{1})$", - shouldMatch: true, - }, - { - title: "Queries route with regexp pattern with quantifier, additional variable in query string, match", - route: new(Route).Queries("foo", "{v1:[0-9]{1}}"), - request: newRequest("GET", "http://localhost?bar=2&foo=1"), - vars: map[string]string{"v1": "1"}, - host: "", - path: "", - query: "foo=1", - queriesTemplate: "foo={v1:[0-9]{1}}", - queriesRegexp: "^foo=(?P[0-9]{1})$", - shouldMatch: true, - }, - { - title: "Queries route with regexp pattern with quantifier, regexp does not match", - route: new(Route).Queries("foo", "{v1:[0-9]{1}}"), - request: newRequest("GET", "http://localhost?foo=12"), - vars: map[string]string{}, - host: "", - path: "", - queriesTemplate: "foo={v1:[0-9]{1}}", - queriesRegexp: "^foo=(?P[0-9]{1})$", - shouldMatch: false, - }, - { - title: "Queries route with regexp pattern with quantifier, additional capturing group", - route: new(Route).Queries("foo", "{v1:[0-9]{1}(?:a|b)}"), - request: newRequest("GET", "http://localhost?foo=1a"), - vars: map[string]string{"v1": "1a"}, - host: "", - path: "", - query: "foo=1a", - queriesTemplate: "foo={v1:[0-9]{1}(?:a|b)}", - queriesRegexp: "^foo=(?P[0-9]{1}(?:a|b))$", - shouldMatch: true, - }, - { - title: "Queries route with regexp pattern with quantifier, additional variable in query string, regexp does not match", - route: new(Route).Queries("foo", "{v1:[0-9]{1}}"), - request: newRequest("GET", "http://localhost?foo=12"), - vars: map[string]string{}, - host: "", - path: "", - queriesTemplate: "foo={v1:[0-9]{1}}", - queriesRegexp: "^foo=(?P[0-9]{1})$", - shouldMatch: false, - }, - { - title: "Queries route with hyphenated name, match", - route: new(Route).Queries("foo", "{v-1}"), - request: newRequest("GET", "http://localhost?foo=bar"), - vars: map[string]string{"v-1": "bar"}, - host: "", - path: "", - query: "foo=bar", - queriesTemplate: "foo={v-1}", - queriesRegexp: "^foo=(?P.*)$", - shouldMatch: true, - }, - { - title: "Queries route with multiple hyphenated names, match", - route: new(Route).Queries("foo", "{v-1}", "baz", "{v-2}"), - request: newRequest("GET", "http://localhost?foo=bar&baz=ding"), - vars: map[string]string{"v-1": "bar", "v-2": "ding"}, - host: "", - path: "", - query: "foo=bar&baz=ding", - queriesTemplate: "foo={v-1},baz={v-2}", - queriesRegexp: "^foo=(?P.*)$,^baz=(?P.*)$", - shouldMatch: true, - }, - { - title: "Queries route with hyphenate name and pattern, match", - route: new(Route).Queries("foo", "{v-1:[0-9]+}"), - request: newRequest("GET", "http://localhost?foo=10"), - vars: map[string]string{"v-1": "10"}, - host: "", - path: "", - query: "foo=10", - queriesTemplate: "foo={v-1:[0-9]+}", - queriesRegexp: "^foo=(?P[0-9]+)$", - shouldMatch: true, - }, - { - title: "Queries route with hyphenated name and pattern with quantifier, additional capturing group", - route: new(Route).Queries("foo", "{v-1:[0-9]{1}(?:a|b)}"), - request: newRequest("GET", "http://localhost?foo=1a"), - vars: map[string]string{"v-1": "1a"}, - host: "", - path: "", - query: "foo=1a", - queriesTemplate: "foo={v-1:[0-9]{1}(?:a|b)}", - queriesRegexp: "^foo=(?P[0-9]{1}(?:a|b))$", - shouldMatch: true, - }, - { - title: "Queries route with empty value, should match", - route: new(Route).Queries("foo", ""), - request: newRequest("GET", "http://localhost?foo=bar"), - vars: map[string]string{}, - host: "", - path: "", - query: "foo=", - queriesTemplate: "foo=", - queriesRegexp: "^foo=.*$", - shouldMatch: true, - }, - { - title: "Queries route with empty value and no parameter in request, should not match", - route: new(Route).Queries("foo", ""), - request: newRequest("GET", "http://localhost"), - vars: map[string]string{}, - host: "", - path: "", - queriesTemplate: "foo=", - queriesRegexp: "^foo=.*$", - shouldMatch: false, - }, - { - title: "Queries route with empty value and empty parameter in request, should match", - route: new(Route).Queries("foo", ""), - request: newRequest("GET", "http://localhost?foo="), - vars: map[string]string{}, - host: "", - path: "", - query: "foo=", - queriesTemplate: "foo=", - queriesRegexp: "^foo=.*$", - shouldMatch: true, - }, - { - title: "Queries route with overlapping value, should not match", - route: new(Route).Queries("foo", "bar"), - request: newRequest("GET", "http://localhost?foo=barfoo"), - vars: map[string]string{}, - host: "", - path: "", - queriesTemplate: "foo=bar", - queriesRegexp: "^foo=bar$", - shouldMatch: false, - }, - { - title: "Queries route with no parameter in request, should not match", - route: new(Route).Queries("foo", "{bar}"), - request: newRequest("GET", "http://localhost"), - vars: map[string]string{}, - host: "", - path: "", - queriesTemplate: "foo={bar}", - queriesRegexp: "^foo=(?P.*)$", - shouldMatch: false, - }, - { - title: "Queries route with empty parameter in request, should match", - route: new(Route).Queries("foo", "{bar}"), - request: newRequest("GET", "http://localhost?foo="), - vars: map[string]string{"foo": ""}, - host: "", - path: "", - query: "foo=", - queriesTemplate: "foo={bar}", - queriesRegexp: "^foo=(?P.*)$", - shouldMatch: true, - }, - { - title: "Queries route, bad submatch", - route: new(Route).Queries("foo", "bar", "baz", "ding"), - request: newRequest("GET", "http://localhost?fffoo=bar&baz=dingggg"), - vars: map[string]string{}, - host: "", - path: "", - queriesTemplate: "foo=bar,baz=ding", - queriesRegexp: "^foo=bar$,^baz=ding$", - shouldMatch: false, - }, - { - title: "Queries route with pattern, match, escaped value", - route: new(Route).Queries("foo", "{v1}"), - request: newRequest("GET", "http://localhost?foo=%25bar%26%20%2F%3D%3F"), - vars: map[string]string{"v1": "%bar& /=?"}, - host: "", - path: "", - query: "foo=%25bar%26+%2F%3D%3F", - queriesTemplate: "foo={v1}", - queriesRegexp: "^foo=(?P.*)$", - shouldMatch: true, - }, - } - - for _, test := range tests { - t.Run(test.title, func(t *testing.T) { - testTemplate(t, test) - testQueriesTemplates(t, test) - testUseEscapedRoute(t, test) - testQueriesRegexp(t, test) - }) - } -} - -func TestSchemes(t *testing.T) { - tests := []routeTest{ - // Schemes - { - title: "Schemes route, default scheme, match http, build http", - route: new(Route).Host("localhost"), - request: newRequest("GET", "http://localhost"), - scheme: "http", - host: "localhost", - shouldMatch: true, - }, - { - title: "Schemes route, match https, build https", - route: new(Route).Schemes("https", "ftp").Host("localhost"), - request: newRequest("GET", "https://localhost"), - scheme: "https", - host: "localhost", - shouldMatch: true, - }, - { - title: "Schemes route, match ftp, build https", - route: new(Route).Schemes("https", "ftp").Host("localhost"), - request: newRequest("GET", "ftp://localhost"), - scheme: "https", - host: "localhost", - shouldMatch: true, - }, - { - title: "Schemes route, match ftp, build ftp", - route: new(Route).Schemes("ftp", "https").Host("localhost"), - request: newRequest("GET", "ftp://localhost"), - scheme: "ftp", - host: "localhost", - shouldMatch: true, - }, - { - title: "Schemes route, bad scheme", - route: new(Route).Schemes("https", "ftp").Host("localhost"), - request: newRequest("GET", "http://localhost"), - scheme: "https", - host: "localhost", - shouldMatch: false, - }, - } - for _, test := range tests { - t.Run(test.title, func(t *testing.T) { - testRoute(t, test) - testTemplate(t, test) - }) - } -} - -func TestMatcherFunc(t *testing.T) { - m := func(r *http.Request, m *RouteMatch) bool { - return r.URL.Host == "aaa.bbb.ccc" - } - - tests := []routeTest{ - { - title: "MatchFunc route, match", - route: new(Route).MatcherFunc(m), - request: newRequest("GET", "http://aaa.bbb.ccc"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "MatchFunc route, non-match", - route: new(Route).MatcherFunc(m), - request: newRequest("GET", "http://aaa.222.ccc"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - } - - for _, test := range tests { - t.Run(test.title, func(t *testing.T) { - testRoute(t, test) - testTemplate(t, test) - }) - } -} - -func TestBuildVarsFunc(t *testing.T) { - tests := []routeTest{ - { - title: "BuildVarsFunc set on route", - route: new(Route).Path(`/111/{v1:\d}{v2:.*}`).BuildVarsFunc(func(vars map[string]string) map[string]string { - vars["v1"] = "3" - vars["v2"] = "a" - return vars - }), - request: newRequest("GET", "http://localhost/111/2"), - path: "/111/3a", - pathTemplate: `/111/{v1:\d}{v2:.*}`, - shouldMatch: true, - }, - { - title: "BuildVarsFunc set on route and parent route", - route: new(Route).PathPrefix(`/{v1:\d}`).BuildVarsFunc(func(vars map[string]string) map[string]string { - vars["v1"] = "2" - return vars - }).Subrouter().Path(`/{v2:\w}`).BuildVarsFunc(func(vars map[string]string) map[string]string { - vars["v2"] = "b" - return vars - }), - request: newRequest("GET", "http://localhost/1/a"), - path: "/2/b", - pathTemplate: `/{v1:\d}/{v2:\w}`, - shouldMatch: true, - }, - } - - for _, test := range tests { - t.Run(test.title, func(t *testing.T) { - testRoute(t, test) - testTemplate(t, test) - }) - } -} - -func TestSubRouter(t *testing.T) { - subrouter1 := new(Route).Host("{v1:[a-z]+}.google.com").Subrouter() - subrouter2 := new(Route).PathPrefix("/foo/{v1}").Subrouter() - subrouter3 := new(Route).PathPrefix("/foo").Subrouter() - subrouter4 := new(Route).PathPrefix("/foo/bar").Subrouter() - subrouter5 := new(Route).PathPrefix("/{category}").Subrouter() - tests := []routeTest{ - { - route: subrouter1.Path("/{v2:[a-z]+}"), - request: newRequest("GET", "http://aaa.google.com/bbb"), - vars: map[string]string{"v1": "aaa", "v2": "bbb"}, - host: "aaa.google.com", - path: "/bbb", - pathTemplate: `/{v2:[a-z]+}`, - hostTemplate: `{v1:[a-z]+}.google.com`, - shouldMatch: true, - }, - { - route: subrouter1.Path("/{v2:[a-z]+}"), - request: newRequest("GET", "http://111.google.com/111"), - vars: map[string]string{"v1": "aaa", "v2": "bbb"}, - host: "aaa.google.com", - path: "/bbb", - pathTemplate: `/{v2:[a-z]+}`, - hostTemplate: `{v1:[a-z]+}.google.com`, - shouldMatch: false, - }, - { - route: subrouter2.Path("/baz/{v2}"), - request: newRequest("GET", "http://localhost/foo/bar/baz/ding"), - vars: map[string]string{"v1": "bar", "v2": "ding"}, - host: "", - path: "/foo/bar/baz/ding", - pathTemplate: `/foo/{v1}/baz/{v2}`, - shouldMatch: true, - }, - { - route: subrouter2.Path("/baz/{v2}"), - request: newRequest("GET", "http://localhost/foo/bar"), - vars: map[string]string{"v1": "bar", "v2": "ding"}, - host: "", - path: "/foo/bar/baz/ding", - pathTemplate: `/foo/{v1}/baz/{v2}`, - shouldMatch: false, - }, - { - route: subrouter3.Path("/"), - request: newRequest("GET", "http://localhost/foo/"), - vars: map[string]string{}, - host: "", - path: "/foo/", - pathTemplate: `/foo/`, - shouldMatch: true, - }, - { - route: subrouter3.Path(""), - request: newRequest("GET", "http://localhost/foo"), - vars: map[string]string{}, - host: "", - path: "/foo", - pathTemplate: `/foo`, - shouldMatch: true, - }, - - { - route: subrouter4.Path("/"), - request: newRequest("GET", "http://localhost/foo/bar/"), - vars: map[string]string{}, - host: "", - path: "/foo/bar/", - pathTemplate: `/foo/bar/`, - shouldMatch: true, - }, - { - route: subrouter4.Path(""), - request: newRequest("GET", "http://localhost/foo/bar"), - vars: map[string]string{}, - host: "", - path: "/foo/bar", - pathTemplate: `/foo/bar`, - shouldMatch: true, - }, - { - route: subrouter5.Path("/"), - request: newRequest("GET", "http://localhost/baz/"), - vars: map[string]string{"category": "baz"}, - host: "", - path: "/baz/", - pathTemplate: `/{category}/`, - shouldMatch: true, - }, - { - route: subrouter5.Path(""), - request: newRequest("GET", "http://localhost/baz"), - vars: map[string]string{"category": "baz"}, - host: "", - path: "/baz", - pathTemplate: `/{category}`, - shouldMatch: true, - }, - { - title: "Mismatch method specified on parent route", - route: new(Route).Methods("POST").PathPrefix("/foo").Subrouter().Path("/"), - request: newRequest("GET", "http://localhost/foo/"), - vars: map[string]string{}, - host: "", - path: "/foo/", - pathTemplate: `/foo/`, - shouldMatch: false, - }, - { - title: "Match method specified on parent route", - route: new(Route).Methods("POST").PathPrefix("/foo").Subrouter().Path("/"), - request: newRequest("POST", "http://localhost/foo/"), - vars: map[string]string{}, - host: "", - path: "/foo/", - pathTemplate: `/foo/`, - shouldMatch: true, - }, - { - title: "Mismatch scheme specified on parent route", - route: new(Route).Schemes("https").Subrouter().PathPrefix("/"), - request: newRequest("GET", "http://localhost/"), - vars: map[string]string{}, - host: "", - path: "/", - pathTemplate: `/`, - shouldMatch: false, - }, - { - title: "Match scheme specified on parent route", - route: new(Route).Schemes("http").Subrouter().PathPrefix("/"), - request: newRequest("GET", "http://localhost/"), - vars: map[string]string{}, - host: "", - path: "/", - pathTemplate: `/`, - shouldMatch: true, - }, - { - title: "No match header specified on parent route", - route: new(Route).Headers("X-Forwarded-Proto", "https").Subrouter().PathPrefix("/"), - request: newRequest("GET", "http://localhost/"), - vars: map[string]string{}, - host: "", - path: "/", - pathTemplate: `/`, - shouldMatch: false, - }, - { - title: "Header mismatch value specified on parent route", - route: new(Route).Headers("X-Forwarded-Proto", "https").Subrouter().PathPrefix("/"), - request: newRequestWithHeaders("GET", "http://localhost/", "X-Forwarded-Proto", "http"), - vars: map[string]string{}, - host: "", - path: "/", - pathTemplate: `/`, - shouldMatch: false, - }, - { - title: "Header match value specified on parent route", - route: new(Route).Headers("X-Forwarded-Proto", "https").Subrouter().PathPrefix("/"), - request: newRequestWithHeaders("GET", "http://localhost/", "X-Forwarded-Proto", "https"), - vars: map[string]string{}, - host: "", - path: "/", - pathTemplate: `/`, - shouldMatch: true, - }, - { - title: "Query specified on parent route not present", - route: new(Route).Headers("key", "foobar").Subrouter().PathPrefix("/"), - request: newRequest("GET", "http://localhost/"), - vars: map[string]string{}, - host: "", - path: "/", - pathTemplate: `/`, - shouldMatch: false, - }, - { - title: "Query mismatch value specified on parent route", - route: new(Route).Queries("key", "foobar").Subrouter().PathPrefix("/"), - request: newRequest("GET", "http://localhost/?key=notfoobar"), - vars: map[string]string{}, - host: "", - path: "/", - pathTemplate: `/`, - shouldMatch: false, - }, - { - title: "Query match value specified on subroute", - route: new(Route).Queries("key", "foobar").Subrouter().PathPrefix("/"), - request: newRequest("GET", "http://localhost/?key=foobar"), - vars: map[string]string{}, - host: "", - path: "/", - pathTemplate: `/`, - shouldMatch: true, - }, - { - title: "Build with scheme on parent router", - route: new(Route).Schemes("ftp").Host("google.com").Subrouter().Path("/"), - request: newRequest("GET", "ftp://google.com/"), - scheme: "ftp", - host: "google.com", - path: "/", - pathTemplate: `/`, - hostTemplate: `google.com`, - shouldMatch: true, - }, - { - title: "Prefer scheme on child route when building URLs", - route: new(Route).Schemes("https", "ftp").Host("google.com").Subrouter().Schemes("ftp").Path("/"), - request: newRequest("GET", "ftp://google.com/"), - scheme: "ftp", - host: "google.com", - path: "/", - pathTemplate: `/`, - hostTemplate: `google.com`, - shouldMatch: true, - }, - } - - for _, test := range tests { - t.Run(test.title, func(t *testing.T) { - testRoute(t, test) - testTemplate(t, test) - testUseEscapedRoute(t, test) - }) - } -} - -func TestNamedRoutes(t *testing.T) { - r1 := NewRouter() - r1.NewRoute().Name("a") - r1.NewRoute().Name("b") - r1.NewRoute().Name("c") - - r2 := r1.NewRoute().Subrouter() - r2.NewRoute().Name("d") - r2.NewRoute().Name("e") - r2.NewRoute().Name("f") - - r3 := r2.NewRoute().Subrouter() - r3.NewRoute().Name("g") - r3.NewRoute().Name("h") - r3.NewRoute().Name("i") - r3.Name("j") - - if r1.namedRoutes == nil || len(r1.namedRoutes) != 10 { - t.Errorf("Expected 10 named routes, got %v", r1.namedRoutes) - } else if r1.Get("j") == nil { - t.Errorf("Subroute name not registered") - } -} - -func TestNameMultipleCalls(t *testing.T) { - r1 := NewRouter() - rt := r1.NewRoute().Name("foo").Name("bar") - err := rt.GetError() - if err == nil { - t.Errorf("Expected an error") - } -} - -func TestStrictSlash(t *testing.T) { - r := NewRouter() - r.StrictSlash(true) - - tests := []routeTest{ - { - title: "Redirect path without slash", - route: r.NewRoute().Path("/111/"), - request: newRequest("GET", "http://localhost/111"), - vars: map[string]string{}, - host: "", - path: "/111/", - shouldMatch: true, - shouldRedirect: true, - }, - { - title: "Do not redirect path with slash", - route: r.NewRoute().Path("/111/"), - request: newRequest("GET", "http://localhost/111/"), - vars: map[string]string{}, - host: "", - path: "/111/", - shouldMatch: true, - shouldRedirect: false, - }, - { - title: "Redirect path with slash", - route: r.NewRoute().Path("/111"), - request: newRequest("GET", "http://localhost/111/"), - vars: map[string]string{}, - host: "", - path: "/111", - shouldMatch: true, - shouldRedirect: true, - }, - { - title: "Do not redirect path without slash", - route: r.NewRoute().Path("/111"), - request: newRequest("GET", "http://localhost/111"), - vars: map[string]string{}, - host: "", - path: "/111", - shouldMatch: true, - shouldRedirect: false, - }, - { - title: "Propagate StrictSlash to subrouters", - route: r.NewRoute().PathPrefix("/static/").Subrouter().Path("/images/"), - request: newRequest("GET", "http://localhost/static/images"), - vars: map[string]string{}, - host: "", - path: "/static/images/", - shouldMatch: true, - shouldRedirect: true, - }, - { - title: "Ignore StrictSlash for path prefix", - route: r.NewRoute().PathPrefix("/static/"), - request: newRequest("GET", "http://localhost/static/logo.png"), - vars: map[string]string{}, - host: "", - path: "/static/", - shouldMatch: true, - shouldRedirect: false, - }, - } - - for _, test := range tests { - t.Run(test.title, func(t *testing.T) { - testRoute(t, test) - testTemplate(t, test) - testUseEscapedRoute(t, test) - }) - } -} - -func TestUseEncodedPath(t *testing.T) { - r := NewRouter() - r.UseEncodedPath() - - tests := []routeTest{ - { - title: "Router with useEncodedPath, URL with encoded slash does match", - route: r.NewRoute().Path("/v1/{v1}/v2"), - request: newRequest("GET", "http://localhost/v1/1%2F2/v2"), - vars: map[string]string{"v1": "1%2F2"}, - host: "", - path: "/v1/1%2F2/v2", - pathTemplate: `/v1/{v1}/v2`, - shouldMatch: true, - }, - { - title: "Router with useEncodedPath, URL with encoded slash doesn't match", - route: r.NewRoute().Path("/v1/1/2/v2"), - request: newRequest("GET", "http://localhost/v1/1%2F2/v2"), - vars: map[string]string{"v1": "1%2F2"}, - host: "", - path: "/v1/1%2F2/v2", - pathTemplate: `/v1/1/2/v2`, - shouldMatch: false, - }, - } - - for _, test := range tests { - t.Run(test.title, func(t *testing.T) { - testRoute(t, test) - testTemplate(t, test) - }) - } -} - -func TestWalkSingleDepth(t *testing.T) { - r0 := NewRouter() - r1 := NewRouter() - r2 := NewRouter() - - r0.Path("/g") - r0.Path("/o") - r0.Path("/d").Handler(r1) - r0.Path("/r").Handler(r2) - r0.Path("/a") - - r1.Path("/z") - r1.Path("/i") - r1.Path("/l") - r1.Path("/l") - - r2.Path("/i") - r2.Path("/l") - r2.Path("/l") - - paths := []string{"g", "o", "r", "i", "l", "l", "a"} - depths := []int{0, 0, 0, 1, 1, 1, 0} - i := 0 - err := r0.Walk(func(route *Route, router *Router, ancestors []*Route) error { - matcher := route.matchers[0].(*routeRegexp) - if matcher.template == "/d" { - return SkipRouter - } - if len(ancestors) != depths[i] { - t.Errorf(`Expected depth of %d at i = %d; got "%d"`, depths[i], i, len(ancestors)) - } - if matcher.template != "/"+paths[i] { - t.Errorf(`Expected "/%s" at i = %d; got "%s"`, paths[i], i, matcher.template) - } - i++ - return nil - }) - if err != nil { - panic(err) - } - if i != len(paths) { - t.Errorf("Expected %d routes, found %d", len(paths), i) - } -} - -func TestWalkNested(t *testing.T) { - router := NewRouter() - - routeSubrouter := func(r *Route) (*Route, *Router) { - return r, r.Subrouter() - } - - gRoute, g := routeSubrouter(router.Path("/g")) - oRoute, o := routeSubrouter(g.PathPrefix("/o")) - rRoute, r := routeSubrouter(o.PathPrefix("/r")) - iRoute, i := routeSubrouter(r.PathPrefix("/i")) - l1Route, l1 := routeSubrouter(i.PathPrefix("/l")) - l2Route, l2 := routeSubrouter(l1.PathPrefix("/l")) - l2.Path("/a") - - testCases := []struct { - path string - ancestors []*Route - }{ - {"/g", []*Route{}}, - {"/g/o", []*Route{gRoute}}, - {"/g/o/r", []*Route{gRoute, oRoute}}, - {"/g/o/r/i", []*Route{gRoute, oRoute, rRoute}}, - {"/g/o/r/i/l", []*Route{gRoute, oRoute, rRoute, iRoute}}, - {"/g/o/r/i/l/l", []*Route{gRoute, oRoute, rRoute, iRoute, l1Route}}, - {"/g/o/r/i/l/l/a", []*Route{gRoute, oRoute, rRoute, iRoute, l1Route, l2Route}}, - } - - idx := 0 - err := router.Walk(func(route *Route, router *Router, ancestors []*Route) error { - path := testCases[idx].path - tpl := route.regexp.path.template - if tpl != path { - t.Errorf(`Expected %s got %s`, path, tpl) - } - currWantAncestors := testCases[idx].ancestors - if !reflect.DeepEqual(currWantAncestors, ancestors) { - t.Errorf(`Expected %+v got %+v`, currWantAncestors, ancestors) - } - idx++ - return nil - }) - if err != nil { - panic(err) - } - if idx != len(testCases) { - t.Errorf("Expected %d routes, found %d", len(testCases), idx) - } -} - -func TestWalkSubrouters(t *testing.T) { - router := NewRouter() - - g := router.Path("/g").Subrouter() - o := g.PathPrefix("/o").Subrouter() - o.Methods("GET") - o.Methods("PUT") - - // all 4 routes should be matched - paths := []string{"/g", "/g/o", "/g/o", "/g/o"} - idx := 0 - err := router.Walk(func(route *Route, router *Router, ancestors []*Route) error { - path := paths[idx] - tpl, _ := route.GetPathTemplate() - if tpl != path { - t.Errorf(`Expected %s got %s`, path, tpl) - } - idx++ - return nil - }) - if err != nil { - panic(err) - } - if idx != len(paths) { - t.Errorf("Expected %d routes, found %d", len(paths), idx) - } -} - -func TestWalkErrorRoute(t *testing.T) { - router := NewRouter() - router.Path("/g") - expectedError := errors.New("error") - err := router.Walk(func(route *Route, router *Router, ancestors []*Route) error { - return expectedError - }) - if err != expectedError { - t.Errorf("Expected %v routes, found %v", expectedError, err) - } -} - -func TestWalkErrorMatcher(t *testing.T) { - router := NewRouter() - expectedError := router.Path("/g").Subrouter().Path("").GetError() - err := router.Walk(func(route *Route, router *Router, ancestors []*Route) error { - return route.GetError() - }) - if err != expectedError { - t.Errorf("Expected %v routes, found %v", expectedError, err) - } -} - -func TestWalkErrorHandler(t *testing.T) { - handler := NewRouter() - expectedError := handler.Path("/path").Subrouter().Path("").GetError() - router := NewRouter() - router.Path("/g").Handler(handler) - err := router.Walk(func(route *Route, router *Router, ancestors []*Route) error { - return route.GetError() - }) - if err != expectedError { - t.Errorf("Expected %v routes, found %v", expectedError, err) - } -} - -func TestSubrouterErrorHandling(t *testing.T) { - superRouterCalled := false - subRouterCalled := false - - router := NewRouter() - router.NotFoundHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - superRouterCalled = true - }) - subRouter := router.PathPrefix("/bign8").Subrouter() - subRouter.NotFoundHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - subRouterCalled = true - }) - - req, _ := http.NewRequest("GET", "http://localhost/bign8/was/here", nil) - router.ServeHTTP(NewRecorder(), req) - - if superRouterCalled { - t.Error("Super router 404 handler called when sub-router 404 handler is available.") - } - if !subRouterCalled { - t.Error("Sub-router 404 handler was not called.") - } -} - -// See: https://github.com/gorilla/mux/issues/200 -func TestPanicOnCapturingGroups(t *testing.T) { - defer func() { - if recover() == nil { - t.Errorf("(Test that capturing groups now fail fast) Expected panic, however test completed successfully.\n") - } - }() - NewRouter().NewRoute().Path("/{type:(promo|special)}/{promoId}.json") -} - -// ---------------------------------------------------------------------------- -// Helpers -// ---------------------------------------------------------------------------- - -func getRouteTemplate(route *Route) string { - host, err := route.GetHostTemplate() - if err != nil { - host = "none" - } - path, err := route.GetPathTemplate() - if err != nil { - path = "none" - } - return fmt.Sprintf("Host: %v, Path: %v", host, path) -} - -func testRoute(t *testing.T, test routeTest) { - request := test.request - route := test.route - vars := test.vars - shouldMatch := test.shouldMatch - query := test.query - shouldRedirect := test.shouldRedirect - uri := url.URL{ - Scheme: test.scheme, - Host: test.host, - Path: test.path, - } - if uri.Scheme == "" { - uri.Scheme = "http" - } - - var match RouteMatch - ok := route.Match(request, &match) - if ok != shouldMatch { - msg := "Should match" - if !shouldMatch { - msg = "Should not match" - } - t.Errorf("(%v) %v:\nRoute: %#v\nRequest: %#v\nVars: %v\n", test.title, msg, route, request, vars) - return - } - if shouldMatch { - if vars != nil && !stringMapEqual(vars, match.Vars) { - t.Errorf("(%v) Vars not equal: expected %v, got %v", test.title, vars, match.Vars) - return - } - if test.scheme != "" { - u, err := route.URL(mapToPairs(match.Vars)...) - if err != nil { - t.Fatalf("(%v) URL error: %v -- %v", test.title, err, getRouteTemplate(route)) - } - if uri.Scheme != u.Scheme { - t.Errorf("(%v) URLScheme not equal: expected %v, got %v", test.title, uri.Scheme, u.Scheme) - return - } - } - if test.host != "" { - u, err := test.route.URLHost(mapToPairs(match.Vars)...) - if err != nil { - t.Fatalf("(%v) URLHost error: %v -- %v", test.title, err, getRouteTemplate(route)) - } - if uri.Scheme != u.Scheme { - t.Errorf("(%v) URLHost scheme not equal: expected %v, got %v -- %v", test.title, uri.Scheme, u.Scheme, getRouteTemplate(route)) - return - } - if uri.Host != u.Host { - t.Errorf("(%v) URLHost host not equal: expected %v, got %v -- %v", test.title, uri.Host, u.Host, getRouteTemplate(route)) - return - } - } - if test.path != "" { - u, err := route.URLPath(mapToPairs(match.Vars)...) - if err != nil { - t.Fatalf("(%v) URLPath error: %v -- %v", test.title, err, getRouteTemplate(route)) - } - if uri.Path != u.Path { - t.Errorf("(%v) URLPath not equal: expected %v, got %v -- %v", test.title, uri.Path, u.Path, getRouteTemplate(route)) - return - } - } - if test.host != "" && test.path != "" { - u, err := route.URL(mapToPairs(match.Vars)...) - if err != nil { - t.Fatalf("(%v) URL error: %v -- %v", test.title, err, getRouteTemplate(route)) - } - if expected, got := uri.String(), u.String(); expected != got { - t.Errorf("(%v) URL not equal: expected %v, got %v -- %v", test.title, expected, got, getRouteTemplate(route)) - return - } - } - if query != "" { - u, err := route.URL(mapToPairs(match.Vars)...) - if err != nil { - t.Errorf("(%v) erred while creating url: %v", test.title, err) - return - } - if query != u.RawQuery { - t.Errorf("(%v) URL query not equal: expected %v, got %v", test.title, query, u.RawQuery) - return - } - } - if shouldRedirect && match.Handler == nil { - t.Errorf("(%v) Did not redirect", test.title) - return - } - if !shouldRedirect && match.Handler != nil { - t.Errorf("(%v) Unexpected redirect", test.title) - return - } - } -} - -func testUseEscapedRoute(t *testing.T, test routeTest) { - test.route.useEncodedPath = true - testRoute(t, test) -} - -func testTemplate(t *testing.T, test routeTest) { - route := test.route - pathTemplate := test.pathTemplate - if len(pathTemplate) == 0 { - pathTemplate = test.path - } - hostTemplate := test.hostTemplate - if len(hostTemplate) == 0 { - hostTemplate = test.host - } - - routePathTemplate, pathErr := route.GetPathTemplate() - if pathErr == nil && routePathTemplate != pathTemplate { - t.Errorf("(%v) GetPathTemplate not equal: expected %v, got %v", test.title, pathTemplate, routePathTemplate) - } - - routeHostTemplate, hostErr := route.GetHostTemplate() - if hostErr == nil && routeHostTemplate != hostTemplate { - t.Errorf("(%v) GetHostTemplate not equal: expected %v, got %v", test.title, hostTemplate, routeHostTemplate) - } -} - -func testMethods(t *testing.T, test routeTest) { - route := test.route - methods, _ := route.GetMethods() - if strings.Join(methods, ",") != strings.Join(test.methods, ",") { - t.Errorf("(%v) GetMethods not equal: expected %v, got %v", test.title, test.methods, methods) - } -} - -func testRegexp(t *testing.T, test routeTest) { - route := test.route - routePathRegexp, regexpErr := route.GetPathRegexp() - if test.pathRegexp != "" && regexpErr == nil && routePathRegexp != test.pathRegexp { - t.Errorf("(%v) GetPathRegexp not equal: expected %v, got %v", test.title, test.pathRegexp, routePathRegexp) - } -} - -func testQueriesRegexp(t *testing.T, test routeTest) { - route := test.route - queries, queriesErr := route.GetQueriesRegexp() - gotQueries := strings.Join(queries, ",") - if test.queriesRegexp != "" && queriesErr == nil && gotQueries != test.queriesRegexp { - t.Errorf("(%v) GetQueriesRegexp not equal: expected %v, got %v", test.title, test.queriesRegexp, gotQueries) - } -} - -func testQueriesTemplates(t *testing.T, test routeTest) { - route := test.route - queries, queriesErr := route.GetQueriesTemplates() - gotQueries := strings.Join(queries, ",") - if test.queriesTemplate != "" && queriesErr == nil && gotQueries != test.queriesTemplate { - t.Errorf("(%v) GetQueriesTemplates not equal: expected %v, got %v", test.title, test.queriesTemplate, gotQueries) - } -} - -type TestA301ResponseWriter struct { - hh http.Header - status int -} - -func (ho *TestA301ResponseWriter) Header() http.Header { - return ho.hh -} - -func (ho *TestA301ResponseWriter) Write(b []byte) (int, error) { - return 0, nil -} - -func (ho *TestA301ResponseWriter) WriteHeader(code int) { - ho.status = code -} - -func Test301Redirect(t *testing.T) { - m := make(http.Header) - - func1 := func(w http.ResponseWriter, r *http.Request) {} - func2 := func(w http.ResponseWriter, r *http.Request) {} - - r := NewRouter() - r.HandleFunc("/api/", func2).Name("func2") - r.HandleFunc("/", func1).Name("func1") - - req, _ := http.NewRequest("GET", "http://localhost//api/?abc=def", nil) - - res := TestA301ResponseWriter{ - hh: m, - status: 0, - } - r.ServeHTTP(&res, req) - - if "http://localhost/api/?abc=def" != res.hh["Location"][0] { - t.Errorf("Should have complete URL with query string") - } -} - -func TestSkipClean(t *testing.T) { - func1 := func(w http.ResponseWriter, r *http.Request) {} - func2 := func(w http.ResponseWriter, r *http.Request) {} - - r := NewRouter() - r.SkipClean(true) - r.HandleFunc("/api/", func2).Name("func2") - r.HandleFunc("/", func1).Name("func1") - - req, _ := http.NewRequest("GET", "http://localhost//api/?abc=def", nil) - res := NewRecorder() - r.ServeHTTP(res, req) - - if len(res.HeaderMap["Location"]) != 0 { - t.Errorf("Shouldn't redirect since skip clean is disabled") - } -} - -// https://plus.google.com/101022900381697718949/posts/eWy6DjFJ6uW -func TestSubrouterHeader(t *testing.T) { - expected := "func1 response" - func1 := func(w http.ResponseWriter, r *http.Request) { - fmt.Fprint(w, expected) - } - func2 := func(http.ResponseWriter, *http.Request) {} - - r := NewRouter() - s := r.Headers("SomeSpecialHeader", "").Subrouter() - s.HandleFunc("/", func1).Name("func1") - r.HandleFunc("/", func2).Name("func2") - - req, _ := http.NewRequest("GET", "http://localhost/", nil) - req.Header.Add("SomeSpecialHeader", "foo") - match := new(RouteMatch) - matched := r.Match(req, match) - if !matched { - t.Errorf("Should match request") - } - if match.Route.GetName() != "func1" { - t.Errorf("Expecting func1 handler, got %s", match.Route.GetName()) - } - resp := NewRecorder() - match.Handler.ServeHTTP(resp, req) - if resp.Body.String() != expected { - t.Errorf("Expecting %q", expected) - } -} - -func TestNoMatchMethodErrorHandler(t *testing.T) { - func1 := func(w http.ResponseWriter, r *http.Request) {} - - r := NewRouter() - r.HandleFunc("/", func1).Methods("GET", "POST") - - req, _ := http.NewRequest("PUT", "http://localhost/", nil) - match := new(RouteMatch) - matched := r.Match(req, match) - - if matched { - t.Error("Should not have matched route for methods") - } - - if match.MatchErr != ErrMethodMismatch { - t.Error("Should get ErrMethodMismatch error") - } - - resp := NewRecorder() - r.ServeHTTP(resp, req) - if resp.Code != 405 { - t.Errorf("Expecting code %v", 405) - } - - // Add matching route - r.HandleFunc("/", func1).Methods("PUT") - - match = new(RouteMatch) - matched = r.Match(req, match) - - if !matched { - t.Error("Should have matched route for methods") - } - - if match.MatchErr != nil { - t.Error("Should not have any matching error. Found:", match.MatchErr) - } -} - -func TestErrMatchNotFound(t *testing.T) { - emptyHandler := func(w http.ResponseWriter, r *http.Request) {} - - r := NewRouter() - r.HandleFunc("/", emptyHandler) - s := r.PathPrefix("/sub/").Subrouter() - s.HandleFunc("/", emptyHandler) - - // Regular 404 not found - req, _ := http.NewRequest("GET", "/sub/whatever", nil) - match := new(RouteMatch) - matched := r.Match(req, match) - - if matched { - t.Errorf("Subrouter should not have matched that, got %v", match.Route) - } - // Even without a custom handler, MatchErr is set to ErrNotFound - if match.MatchErr != ErrNotFound { - t.Errorf("Expected ErrNotFound MatchErr, but was %v", match.MatchErr) - } - - // Now lets add a 404 handler to subrouter - s.NotFoundHandler = http.NotFoundHandler() - req, _ = http.NewRequest("GET", "/sub/whatever", nil) - - // Test the subrouter first - match = new(RouteMatch) - matched = s.Match(req, match) - // Now we should get a match - if !matched { - t.Errorf("Subrouter should have matched %s", req.RequestURI) - } - // But MatchErr should be set to ErrNotFound anyway - if match.MatchErr != ErrNotFound { - t.Errorf("Expected ErrNotFound MatchErr, but was %v", match.MatchErr) - } - - // Now test the parent (MatchErr should propagate) - match = new(RouteMatch) - matched = r.Match(req, match) - - // Now we should get a match - if !matched { - t.Errorf("Router should have matched %s via subrouter", req.RequestURI) - } - // But MatchErr should be set to ErrNotFound anyway - if match.MatchErr != ErrNotFound { - t.Errorf("Expected ErrNotFound MatchErr, but was %v", match.MatchErr) - } -} - -// methodsSubrouterTest models the data necessary for testing handler -// matching for subrouters created after HTTP methods matcher registration. -type methodsSubrouterTest struct { - title string - wantCode int - router *Router - // method is the input into the request and expected response - method string - // input request path - path string - // redirectTo is the expected location path for strict-slash matches - redirectTo string -} - -// methodHandler writes the method string in response. -func methodHandler(method string) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte(method)) - } -} - -// TestMethodsSubrouterCatchall matches handlers for subrouters where a -// catchall handler is set for a mis-matching method. -func TestMethodsSubrouterCatchall(t *testing.T) { - t.Parallel() - - router := NewRouter() - router.Methods("PATCH").Subrouter().PathPrefix("/").HandlerFunc(methodHandler("PUT")) - router.Methods("GET").Subrouter().HandleFunc("/foo", methodHandler("GET")) - router.Methods("POST").Subrouter().HandleFunc("/foo", methodHandler("POST")) - router.Methods("DELETE").Subrouter().HandleFunc("/foo", methodHandler("DELETE")) - - tests := []methodsSubrouterTest{ - { - title: "match GET handler", - router: router, - path: "http://localhost/foo", - method: "GET", - wantCode: http.StatusOK, - }, - { - title: "match POST handler", - router: router, - method: "POST", - path: "http://localhost/foo", - wantCode: http.StatusOK, - }, - { - title: "match DELETE handler", - router: router, - method: "DELETE", - path: "http://localhost/foo", - wantCode: http.StatusOK, - }, - { - title: "disallow PUT method", - router: router, - method: "PUT", - path: "http://localhost/foo", - wantCode: http.StatusMethodNotAllowed, - }, - } - - for _, test := range tests { - t.Run(test.title, func(t *testing.T) { - testMethodsSubrouter(t, test) - }) - } -} - -// TestMethodsSubrouterStrictSlash matches handlers on subrouters with -// strict-slash matchers. -func TestMethodsSubrouterStrictSlash(t *testing.T) { - t.Parallel() - - router := NewRouter() - sub := router.PathPrefix("/").Subrouter() - sub.StrictSlash(true).Path("/foo").Methods("GET").Subrouter().HandleFunc("", methodHandler("GET")) - sub.StrictSlash(true).Path("/foo/").Methods("PUT").Subrouter().HandleFunc("/", methodHandler("PUT")) - sub.StrictSlash(true).Path("/foo/").Methods("POST").Subrouter().HandleFunc("/", methodHandler("POST")) - - tests := []methodsSubrouterTest{ - { - title: "match POST handler", - router: router, - method: "POST", - path: "http://localhost/foo/", - wantCode: http.StatusOK, - }, - { - title: "match GET handler", - router: router, - method: "GET", - path: "http://localhost/foo", - wantCode: http.StatusOK, - }, - { - title: "match POST handler, redirect strict-slash", - router: router, - method: "POST", - path: "http://localhost/foo", - redirectTo: "http://localhost/foo/", - wantCode: http.StatusMovedPermanently, - }, - { - title: "match GET handler, redirect strict-slash", - router: router, - method: "GET", - path: "http://localhost/foo/", - redirectTo: "http://localhost/foo", - wantCode: http.StatusMovedPermanently, - }, - { - title: "disallow DELETE method", - router: router, - method: "DELETE", - path: "http://localhost/foo", - wantCode: http.StatusMethodNotAllowed, - }, - } - - for _, test := range tests { - t.Run(test.title, func(t *testing.T) { - testMethodsSubrouter(t, test) - }) - } -} - -// TestMethodsSubrouterPathPrefix matches handlers on subrouters created -// on a router with a path prefix matcher and method matcher. -func TestMethodsSubrouterPathPrefix(t *testing.T) { - t.Parallel() - - router := NewRouter() - router.PathPrefix("/1").Methods("POST").Subrouter().HandleFunc("/2", methodHandler("POST")) - router.PathPrefix("/1").Methods("DELETE").Subrouter().HandleFunc("/2", methodHandler("DELETE")) - router.PathPrefix("/1").Methods("PUT").Subrouter().HandleFunc("/2", methodHandler("PUT")) - router.PathPrefix("/1").Methods("POST").Subrouter().HandleFunc("/2", methodHandler("POST2")) - - tests := []methodsSubrouterTest{ - { - title: "match first POST handler", - router: router, - method: "POST", - path: "http://localhost/1/2", - wantCode: http.StatusOK, - }, - { - title: "match DELETE handler", - router: router, - method: "DELETE", - path: "http://localhost/1/2", - wantCode: http.StatusOK, - }, - { - title: "match PUT handler", - router: router, - method: "PUT", - path: "http://localhost/1/2", - wantCode: http.StatusOK, - }, - { - title: "disallow PATCH method", - router: router, - method: "PATCH", - path: "http://localhost/1/2", - wantCode: http.StatusMethodNotAllowed, - }, - } - - for _, test := range tests { - t.Run(test.title, func(t *testing.T) { - testMethodsSubrouter(t, test) - }) - } -} - -// TestMethodsSubrouterSubrouter matches handlers on subrouters produced -// from method matchers registered on a root subrouter. -func TestMethodsSubrouterSubrouter(t *testing.T) { - t.Parallel() - - router := NewRouter() - sub := router.PathPrefix("/1").Subrouter() - sub.Methods("POST").Subrouter().HandleFunc("/2", methodHandler("POST")) - sub.Methods("GET").Subrouter().HandleFunc("/2", methodHandler("GET")) - sub.Methods("PATCH").Subrouter().HandleFunc("/2", methodHandler("PATCH")) - sub.HandleFunc("/2", methodHandler("PUT")).Subrouter().Methods("PUT") - sub.HandleFunc("/2", methodHandler("POST2")).Subrouter().Methods("POST") - - tests := []methodsSubrouterTest{ - { - title: "match first POST handler", - router: router, - method: "POST", - path: "http://localhost/1/2", - wantCode: http.StatusOK, - }, - { - title: "match GET handler", - router: router, - method: "GET", - path: "http://localhost/1/2", - wantCode: http.StatusOK, - }, - { - title: "match PATCH handler", - router: router, - method: "PATCH", - path: "http://localhost/1/2", - wantCode: http.StatusOK, - }, - { - title: "match PUT handler", - router: router, - method: "PUT", - path: "http://localhost/1/2", - wantCode: http.StatusOK, - }, - { - title: "disallow DELETE method", - router: router, - method: "DELETE", - path: "http://localhost/1/2", - wantCode: http.StatusMethodNotAllowed, - }, - } - - for _, test := range tests { - t.Run(test.title, func(t *testing.T) { - testMethodsSubrouter(t, test) - }) - } -} - -// TestMethodsSubrouterPathVariable matches handlers on matching paths -// with path variables in them. -func TestMethodsSubrouterPathVariable(t *testing.T) { - t.Parallel() - - router := NewRouter() - router.Methods("GET").Subrouter().HandleFunc("/foo", methodHandler("GET")) - router.Methods("POST").Subrouter().HandleFunc("/{any}", methodHandler("POST")) - router.Methods("DELETE").Subrouter().HandleFunc("/1/{any}", methodHandler("DELETE")) - router.Methods("PUT").Subrouter().HandleFunc("/1/{any}", methodHandler("PUT")) - - tests := []methodsSubrouterTest{ - { - title: "match GET handler", - router: router, - method: "GET", - path: "http://localhost/foo", - wantCode: http.StatusOK, - }, - { - title: "match POST handler", - router: router, - method: "POST", - path: "http://localhost/foo", - wantCode: http.StatusOK, - }, - { - title: "match DELETE handler", - router: router, - method: "DELETE", - path: "http://localhost/1/foo", - wantCode: http.StatusOK, - }, - { - title: "match PUT handler", - router: router, - method: "PUT", - path: "http://localhost/1/foo", - wantCode: http.StatusOK, - }, - { - title: "disallow PATCH method", - router: router, - method: "PATCH", - path: "http://localhost/1/foo", - wantCode: http.StatusMethodNotAllowed, - }, - } - - for _, test := range tests { - t.Run(test.title, func(t *testing.T) { - testMethodsSubrouter(t, test) - }) - } -} - -func ExampleSetURLVars() { - req, _ := http.NewRequest("GET", "/foo", nil) - req = SetURLVars(req, map[string]string{"foo": "bar"}) - - fmt.Println(Vars(req)["foo"]) - - // Output: bar -} - -// testMethodsSubrouter runs an individual methodsSubrouterTest. -func testMethodsSubrouter(t *testing.T, test methodsSubrouterTest) { - // Execute request - req, _ := http.NewRequest(test.method, test.path, nil) - resp := NewRecorder() - test.router.ServeHTTP(resp, req) - - switch test.wantCode { - case http.StatusMethodNotAllowed: - if resp.Code != http.StatusMethodNotAllowed { - t.Errorf(`(%s) Expected "405 Method Not Allowed", but got %d code`, test.title, resp.Code) - } else if matchedMethod := resp.Body.String(); matchedMethod != "" { - t.Errorf(`(%s) Expected "405 Method Not Allowed", but %q handler was called`, test.title, matchedMethod) - } - - case http.StatusMovedPermanently: - if gotLocation := resp.HeaderMap.Get("Location"); gotLocation != test.redirectTo { - t.Errorf("(%s) Expected %q route-match to redirect to %q, but got %q", test.title, test.method, test.redirectTo, gotLocation) - } - - case http.StatusOK: - if matchedMethod := resp.Body.String(); matchedMethod != test.method { - t.Errorf("(%s) Expected %q handler to be called, but %q handler was called", test.title, test.method, matchedMethod) - } - - default: - expectedCodes := []int{http.StatusMethodNotAllowed, http.StatusMovedPermanently, http.StatusOK} - t.Errorf("(%s) Expected wantCode to be one of: %v, but got %d", test.title, expectedCodes, test.wantCode) - } -} - -func TestSubrouterMatching(t *testing.T) { - const ( - none, stdOnly, subOnly uint8 = 0, 1 << 0, 1 << 1 - both = subOnly | stdOnly - ) - - type request struct { - Name string - Request *http.Request - Flags uint8 - } - - cases := []struct { - Name string - Standard, Subrouter func(*Router) - Requests []request - }{ - { - "pathPrefix", - func(r *Router) { - r.PathPrefix("/before").PathPrefix("/after") - }, - func(r *Router) { - r.PathPrefix("/before").Subrouter().PathPrefix("/after") - }, - []request{ - {"no match final path prefix", newRequest("GET", "/after"), none}, - {"no match parent path prefix", newRequest("GET", "/before"), none}, - {"matches append", newRequest("GET", "/before/after"), both}, - {"matches as prefix", newRequest("GET", "/before/after/1234"), both}, - }, - }, - { - "path", - func(r *Router) { - r.Path("/before").Path("/after") - }, - func(r *Router) { - r.Path("/before").Subrouter().Path("/after") - }, - []request{ - {"no match subroute path", newRequest("GET", "/after"), none}, - {"no match parent path", newRequest("GET", "/before"), none}, - {"no match as prefix", newRequest("GET", "/before/after/1234"), none}, - {"no match append", newRequest("GET", "/before/after"), none}, - }, - }, - { - "host", - func(r *Router) { - r.Host("before.com").Host("after.com") - }, - func(r *Router) { - r.Host("before.com").Subrouter().Host("after.com") - }, - []request{ - {"no match before", newRequestHost("GET", "/", "before.com"), none}, - {"no match other", newRequestHost("GET", "/", "other.com"), none}, - {"matches after", newRequestHost("GET", "/", "after.com"), none}, - }, - }, - { - "queries variant keys", - func(r *Router) { - r.Queries("foo", "bar").Queries("cricket", "baseball") - }, - func(r *Router) { - r.Queries("foo", "bar").Subrouter().Queries("cricket", "baseball") - }, - []request{ - {"matches with all", newRequest("GET", "/?foo=bar&cricket=baseball"), both}, - {"matches with more", newRequest("GET", "/?foo=bar&cricket=baseball&something=else"), both}, - {"no match with none", newRequest("GET", "/"), none}, - {"no match with some", newRequest("GET", "/?cricket=baseball"), none}, - }, - }, - { - "queries overlapping keys", - func(r *Router) { - r.Queries("foo", "bar").Queries("foo", "baz") - }, - func(r *Router) { - r.Queries("foo", "bar").Subrouter().Queries("foo", "baz") - }, - []request{ - {"no match old value", newRequest("GET", "/?foo=bar"), none}, - {"no match diff value", newRequest("GET", "/?foo=bak"), none}, - {"no match with none", newRequest("GET", "/"), none}, - {"matches override", newRequest("GET", "/?foo=baz"), none}, - }, - }, - { - "header variant keys", - func(r *Router) { - r.Headers("foo", "bar").Headers("cricket", "baseball") - }, - func(r *Router) { - r.Headers("foo", "bar").Subrouter().Headers("cricket", "baseball") - }, - []request{ - { - "matches with all", - newRequestWithHeaders("GET", "/", "foo", "bar", "cricket", "baseball"), - both, - }, - { - "matches with more", - newRequestWithHeaders("GET", "/", "foo", "bar", "cricket", "baseball", "something", "else"), - both, - }, - {"no match with none", newRequest("GET", "/"), none}, - {"no match with some", newRequestWithHeaders("GET", "/", "cricket", "baseball"), none}, - }, - }, - { - "header overlapping keys", - func(r *Router) { - r.Headers("foo", "bar").Headers("foo", "baz") - }, - func(r *Router) { - r.Headers("foo", "bar").Subrouter().Headers("foo", "baz") - }, - []request{ - {"no match old value", newRequestWithHeaders("GET", "/", "foo", "bar"), none}, - {"no match diff value", newRequestWithHeaders("GET", "/", "foo", "bak"), none}, - {"no match with none", newRequest("GET", "/"), none}, - {"matches override", newRequestWithHeaders("GET", "/", "foo", "baz"), none}, - }, - }, - { - "method", - func(r *Router) { - r.Methods("POST").Methods("GET") - }, - func(r *Router) { - r.Methods("POST").Subrouter().Methods("GET") - }, - []request{ - {"matches before", newRequest("POST", "/"), none}, - {"no match other", newRequest("HEAD", "/"), none}, - {"matches override", newRequest("GET", "/"), none}, - }, - }, - { - "schemes", - func(r *Router) { - r.Schemes("http").Schemes("https") - }, - func(r *Router) { - r.Schemes("http").Subrouter().Schemes("https") - }, - []request{ - {"matches overrides", newRequest("GET", "https://www.example.com/"), none}, - {"matches original", newRequest("GET", "http://www.example.com/"), none}, - {"no match other", newRequest("GET", "ftp://www.example.com/"), none}, - }, - }, - } - - // case -> request -> router - for _, c := range cases { - t.Run(c.Name, func(t *testing.T) { - for _, req := range c.Requests { - t.Run(req.Name, func(t *testing.T) { - for _, v := range []struct { - Name string - Config func(*Router) - Expected bool - }{ - {"subrouter", c.Subrouter, (req.Flags & subOnly) != 0}, - {"standard", c.Standard, (req.Flags & stdOnly) != 0}, - } { - r := NewRouter() - v.Config(r) - if r.Match(req.Request, &RouteMatch{}) != v.Expected { - if v.Expected { - t.Errorf("expected %v match", v.Name) - } else { - t.Errorf("expected %v no match", v.Name) - } - } - } - }) - } - }) - } -} - -// verify that copyRouteConf copies fields as expected. -func Test_copyRouteConf(t *testing.T) { - var ( - m MatcherFunc = func(*http.Request, *RouteMatch) bool { - return true - } - b BuildVarsFunc = func(i map[string]string) map[string]string { - return i - } - r, _ = newRouteRegexp("hi", regexpTypeHost, routeRegexpOptions{}) - ) - - tests := []struct { - name string - args routeConf - want routeConf - }{ - { - "empty", - routeConf{}, - routeConf{}, - }, - { - "full", - routeConf{ - useEncodedPath: true, - strictSlash: true, - skipClean: true, - regexp: routeRegexpGroup{host: r, path: r, queries: []*routeRegexp{r}}, - matchers: []matcher{m}, - buildScheme: "https", - buildVarsFunc: b, - }, - routeConf{ - useEncodedPath: true, - strictSlash: true, - skipClean: true, - regexp: routeRegexpGroup{host: r, path: r, queries: []*routeRegexp{r}}, - matchers: []matcher{m}, - buildScheme: "https", - buildVarsFunc: b, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // special case some incomparable fields of routeConf before delegating to reflect.DeepEqual - got := copyRouteConf(tt.args) - - // funcs not comparable, just compare length of slices - if len(got.matchers) != len(tt.want.matchers) { - t.Errorf("matchers different lengths: %v %v", len(got.matchers), len(tt.want.matchers)) - } - got.matchers, tt.want.matchers = nil, nil - - // deep equal treats nil slice differently to empty slice so check for zero len first - { - bothZero := len(got.regexp.queries) == 0 && len(tt.want.regexp.queries) == 0 - if !bothZero && !reflect.DeepEqual(got.regexp.queries, tt.want.regexp.queries) { - t.Errorf("queries unequal: %v %v", got.regexp.queries, tt.want.regexp.queries) - } - got.regexp.queries, tt.want.regexp.queries = nil, nil - } - - // funcs not comparable, just compare nullity - if (got.buildVarsFunc == nil) != (tt.want.buildVarsFunc == nil) { - t.Errorf("build vars funcs unequal: %v %v", got.buildVarsFunc == nil, tt.want.buildVarsFunc == nil) - } - got.buildVarsFunc, tt.want.buildVarsFunc = nil, nil - - // finish the deal - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("route confs unequal: %v %v", got, tt.want) - } - }) - } -} - -func TestMethodNotAllowed(t *testing.T) { - handler := func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) } - router := NewRouter() - router.HandleFunc("/thing", handler).Methods(http.MethodGet) - router.HandleFunc("/something", handler).Methods(http.MethodGet) - - w := NewRecorder() - req := newRequest(http.MethodPut, "/thing") - - router.ServeHTTP(w, req) - - if w.Code != 405 { - t.Fatalf("Expected status code 405 (got %d)", w.Code) - } -} - -func TestSubrouterNotFound(t *testing.T) { - handler := func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) } - router := NewRouter() - router.Path("/a").Subrouter().HandleFunc("/thing", handler).Methods(http.MethodGet) - router.Path("/b").Subrouter().HandleFunc("/something", handler).Methods(http.MethodGet) - - w := NewRecorder() - req := newRequest(http.MethodPut, "/not-present") - - router.ServeHTTP(w, req) - - if w.Code != 404 { - t.Fatalf("Expected status code 404 (got %d)", w.Code) - } -} - -// mapToPairs converts a string map to a slice of string pairs -func mapToPairs(m map[string]string) []string { - var i int - p := make([]string, len(m)*2) - for k, v := range m { - p[i] = k - p[i+1] = v - i += 2 - } - return p -} - -// stringMapEqual checks the equality of two string maps -func stringMapEqual(m1, m2 map[string]string) bool { - nil1 := m1 == nil - nil2 := m2 == nil - if nil1 != nil2 || len(m1) != len(m2) { - return false - } - for k, v := range m1 { - if v != m2[k] { - return false - } - } - return true -} - -// stringHandler returns a handler func that writes a message 's' to the -// http.ResponseWriter. -func stringHandler(s string) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte(s)) - } -} - -// newRequest is a helper function to create a new request with a method and url. -// The request returned is a 'server' request as opposed to a 'client' one through -// simulated write onto the wire and read off of the wire. -// The differences between requests are detailed in the net/http package. -func newRequest(method, url string) *http.Request { - req, err := http.NewRequest(method, url, nil) - if err != nil { - panic(err) - } - // extract the escaped original host+path from url - // http://localhost/path/here?v=1#frag -> //localhost/path/here - opaque := "" - if i := len(req.URL.Scheme); i > 0 { - opaque = url[i+1:] - } - - if i := strings.LastIndex(opaque, "?"); i > -1 { - opaque = opaque[:i] - } - if i := strings.LastIndex(opaque, "#"); i > -1 { - opaque = opaque[:i] - } - - // Escaped host+path workaround as detailed in https://golang.org/pkg/net/url/#URL - // for < 1.5 client side workaround - req.URL.Opaque = opaque - - // Simulate writing to wire - var buff bytes.Buffer - req.Write(&buff) - ioreader := bufio.NewReader(&buff) - - // Parse request off of 'wire' - req, err = http.ReadRequest(ioreader) - if err != nil { - panic(err) - } - return req -} - -// create a new request with the provided headers -func newRequestWithHeaders(method, url string, headers ...string) *http.Request { - req := newRequest(method, url) - - if len(headers)%2 != 0 { - panic(fmt.Sprintf("Expected headers length divisible by 2 but got %v", len(headers))) - } - - for i := 0; i < len(headers); i += 2 { - req.Header.Set(headers[i], headers[i+1]) - } - - return req -} - -// newRequestHost a new request with a method, url, and host header -func newRequestHost(method, url, host string) *http.Request { - req, err := http.NewRequest(method, url, nil) - if err != nil { - panic(err) - } - req.Host = host - return req -} diff --git a/vendor/github.com/gorilla/mux/old_test.go b/vendor/github.com/gorilla/mux/old_test.go deleted file mode 100644 index b228983c4834..000000000000 --- a/vendor/github.com/gorilla/mux/old_test.go +++ /dev/null @@ -1,704 +0,0 @@ -// Old tests ported to Go1. This is a mess. Want to drop it one day. - -// Copyright 2011 Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "bytes" - "net/http" - "testing" -) - -// ---------------------------------------------------------------------------- -// ResponseRecorder -// ---------------------------------------------------------------------------- -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// ResponseRecorder is an implementation of http.ResponseWriter that -// records its mutations for later inspection in tests. -type ResponseRecorder struct { - Code int // the HTTP response code from WriteHeader - HeaderMap http.Header // the HTTP response headers - Body *bytes.Buffer // if non-nil, the bytes.Buffer to append written data to - Flushed bool -} - -// NewRecorder returns an initialized ResponseRecorder. -func NewRecorder() *ResponseRecorder { - return &ResponseRecorder{ - HeaderMap: make(http.Header), - Body: new(bytes.Buffer), - } -} - -// Header returns the response headers. -func (rw *ResponseRecorder) Header() http.Header { - return rw.HeaderMap -} - -// Write always succeeds and writes to rw.Body, if not nil. -func (rw *ResponseRecorder) Write(buf []byte) (int, error) { - if rw.Body != nil { - rw.Body.Write(buf) - } - if rw.Code == 0 { - rw.Code = http.StatusOK - } - return len(buf), nil -} - -// WriteHeader sets rw.Code. -func (rw *ResponseRecorder) WriteHeader(code int) { - rw.Code = code -} - -// Flush sets rw.Flushed to true. -func (rw *ResponseRecorder) Flush() { - rw.Flushed = true -} - -// ---------------------------------------------------------------------------- - -func TestRouteMatchers(t *testing.T) { - var scheme, host, path, query, method string - var headers map[string]string - var resultVars map[bool]map[string]string - - router := NewRouter() - router.NewRoute().Host("{var1}.google.com"). - Path("/{var2:[a-z]+}/{var3:[0-9]+}"). - Queries("foo", "bar"). - Methods("GET"). - Schemes("https"). - Headers("x-requested-with", "XMLHttpRequest") - router.NewRoute().Host("www.{var4}.com"). - PathPrefix("/foo/{var5:[a-z]+}/{var6:[0-9]+}"). - Queries("baz", "ding"). - Methods("POST"). - Schemes("http"). - Headers("Content-Type", "application/json") - - reset := func() { - // Everything match. - scheme = "https" - host = "www.google.com" - path = "/product/42" - query = "?foo=bar" - method = "GET" - headers = map[string]string{"X-Requested-With": "XMLHttpRequest"} - resultVars = map[bool]map[string]string{ - true: {"var1": "www", "var2": "product", "var3": "42"}, - false: {}, - } - } - - reset2 := func() { - // Everything match. - scheme = "http" - host = "www.google.com" - path = "/foo/product/42/path/that/is/ignored" - query = "?baz=ding" - method = "POST" - headers = map[string]string{"Content-Type": "application/json"} - resultVars = map[bool]map[string]string{ - true: {"var4": "google", "var5": "product", "var6": "42"}, - false: {}, - } - } - - match := func(shouldMatch bool) { - url := scheme + "://" + host + path + query - request, _ := http.NewRequest(method, url, nil) - for key, value := range headers { - request.Header.Add(key, value) - } - - var routeMatch RouteMatch - matched := router.Match(request, &routeMatch) - if matched != shouldMatch { - t.Errorf("Expected: %v\nGot: %v\nRequest: %v %v", shouldMatch, matched, request.Method, url) - } - - if matched { - currentRoute := routeMatch.Route - if currentRoute == nil { - t.Errorf("Expected a current route.") - } - vars := routeMatch.Vars - expectedVars := resultVars[shouldMatch] - if len(vars) != len(expectedVars) { - t.Errorf("Expected vars: %v Got: %v.", expectedVars, vars) - } - for name, value := range vars { - if expectedVars[name] != value { - t.Errorf("Expected vars: %v Got: %v.", expectedVars, vars) - } - } - } - } - - // 1st route -------------------------------------------------------------- - - // Everything match. - reset() - match(true) - - // Scheme doesn't match. - reset() - scheme = "http" - match(false) - - // Host doesn't match. - reset() - host = "www.mygoogle.com" - match(false) - - // Path doesn't match. - reset() - path = "/product/notdigits" - match(false) - - // Query doesn't match. - reset() - query = "?foo=baz" - match(false) - - // Method doesn't match. - reset() - method = "POST" - match(false) - - // Header doesn't match. - reset() - headers = map[string]string{} - match(false) - - // Everything match, again. - reset() - match(true) - - // 2nd route -------------------------------------------------------------- - // Everything match. - reset2() - match(true) - - // Scheme doesn't match. - reset2() - scheme = "https" - match(false) - - // Host doesn't match. - reset2() - host = "sub.google.com" - match(false) - - // Path doesn't match. - reset2() - path = "/bar/product/42" - match(false) - - // Query doesn't match. - reset2() - query = "?foo=baz" - match(false) - - // Method doesn't match. - reset2() - method = "GET" - match(false) - - // Header doesn't match. - reset2() - headers = map[string]string{} - match(false) - - // Everything match, again. - reset2() - match(true) -} - -type headerMatcherTest struct { - matcher headerMatcher - headers map[string]string - result bool -} - -var headerMatcherTests = []headerMatcherTest{ - { - matcher: headerMatcher(map[string]string{"x-requested-with": "XMLHttpRequest"}), - headers: map[string]string{"X-Requested-With": "XMLHttpRequest"}, - result: true, - }, - { - matcher: headerMatcher(map[string]string{"x-requested-with": ""}), - headers: map[string]string{"X-Requested-With": "anything"}, - result: true, - }, - { - matcher: headerMatcher(map[string]string{"x-requested-with": "XMLHttpRequest"}), - headers: map[string]string{}, - result: false, - }, -} - -type hostMatcherTest struct { - matcher *Route - url string - vars map[string]string - result bool -} - -var hostMatcherTests = []hostMatcherTest{ - { - matcher: NewRouter().NewRoute().Host("{foo:[a-z][a-z][a-z]}.{bar:[a-z][a-z][a-z]}.{baz:[a-z][a-z][a-z]}"), - url: "http://abc.def.ghi/", - vars: map[string]string{"foo": "abc", "bar": "def", "baz": "ghi"}, - result: true, - }, - { - matcher: NewRouter().NewRoute().Host("{foo:[a-z][a-z][a-z]}.{bar:[a-z][a-z][a-z]}.{baz:[a-z][a-z][a-z]}"), - url: "http://a.b.c/", - vars: map[string]string{"foo": "abc", "bar": "def", "baz": "ghi"}, - result: false, - }, -} - -type methodMatcherTest struct { - matcher methodMatcher - method string - result bool -} - -var methodMatcherTests = []methodMatcherTest{ - { - matcher: methodMatcher([]string{"GET", "POST", "PUT"}), - method: "GET", - result: true, - }, - { - matcher: methodMatcher([]string{"GET", "POST", "PUT"}), - method: "POST", - result: true, - }, - { - matcher: methodMatcher([]string{"GET", "POST", "PUT"}), - method: "PUT", - result: true, - }, - { - matcher: methodMatcher([]string{"GET", "POST", "PUT"}), - method: "DELETE", - result: false, - }, -} - -type pathMatcherTest struct { - matcher *Route - url string - vars map[string]string - result bool -} - -var pathMatcherTests = []pathMatcherTest{ - { - matcher: NewRouter().NewRoute().Path("/{foo:[0-9][0-9][0-9]}/{bar:[0-9][0-9][0-9]}/{baz:[0-9][0-9][0-9]}"), - url: "http://localhost:8080/123/456/789", - vars: map[string]string{"foo": "123", "bar": "456", "baz": "789"}, - result: true, - }, - { - matcher: NewRouter().NewRoute().Path("/{foo:[0-9][0-9][0-9]}/{bar:[0-9][0-9][0-9]}/{baz:[0-9][0-9][0-9]}"), - url: "http://localhost:8080/1/2/3", - vars: map[string]string{"foo": "123", "bar": "456", "baz": "789"}, - result: false, - }, -} - -type schemeMatcherTest struct { - matcher schemeMatcher - url string - result bool -} - -var schemeMatcherTests = []schemeMatcherTest{ - { - matcher: schemeMatcher([]string{"http", "https"}), - url: "http://localhost:8080/", - result: true, - }, - { - matcher: schemeMatcher([]string{"http", "https"}), - url: "https://localhost:8080/", - result: true, - }, - { - matcher: schemeMatcher([]string{"https"}), - url: "http://localhost:8080/", - result: false, - }, - { - matcher: schemeMatcher([]string{"http"}), - url: "https://localhost:8080/", - result: false, - }, -} - -type urlBuildingTest struct { - route *Route - vars []string - url string -} - -var urlBuildingTests = []urlBuildingTest{ - { - route: new(Route).Host("foo.domain.com"), - vars: []string{}, - url: "http://foo.domain.com", - }, - { - route: new(Route).Host("{subdomain}.domain.com"), - vars: []string{"subdomain", "bar"}, - url: "http://bar.domain.com", - }, - { - route: new(Route).Host("foo.domain.com").Path("/articles"), - vars: []string{}, - url: "http://foo.domain.com/articles", - }, - { - route: new(Route).Path("/articles"), - vars: []string{}, - url: "/articles", - }, - { - route: new(Route).Path("/articles/{category}/{id:[0-9]+}"), - vars: []string{"category", "technology", "id", "42"}, - url: "/articles/technology/42", - }, - { - route: new(Route).Host("{subdomain}.domain.com").Path("/articles/{category}/{id:[0-9]+}"), - vars: []string{"subdomain", "foo", "category", "technology", "id", "42"}, - url: "http://foo.domain.com/articles/technology/42", - }, -} - -func TestHeaderMatcher(t *testing.T) { - for _, v := range headerMatcherTests { - request, _ := http.NewRequest("GET", "http://localhost:8080/", nil) - for key, value := range v.headers { - request.Header.Add(key, value) - } - var routeMatch RouteMatch - result := v.matcher.Match(request, &routeMatch) - if result != v.result { - if v.result { - t.Errorf("%#v: should match %v.", v.matcher, request.Header) - } else { - t.Errorf("%#v: should not match %v.", v.matcher, request.Header) - } - } - } -} - -func TestHostMatcher(t *testing.T) { - for _, v := range hostMatcherTests { - request, _ := http.NewRequest("GET", v.url, nil) - var routeMatch RouteMatch - result := v.matcher.Match(request, &routeMatch) - vars := routeMatch.Vars - if result != v.result { - if v.result { - t.Errorf("%#v: should match %v.", v.matcher, v.url) - } else { - t.Errorf("%#v: should not match %v.", v.matcher, v.url) - } - } - if result { - if len(vars) != len(v.vars) { - t.Errorf("%#v: vars length should be %v, got %v.", v.matcher, len(v.vars), len(vars)) - } - for name, value := range vars { - if v.vars[name] != value { - t.Errorf("%#v: expected value %v for key %v, got %v.", v.matcher, v.vars[name], name, value) - } - } - } else { - if len(vars) != 0 { - t.Errorf("%#v: vars length should be 0, got %v.", v.matcher, len(vars)) - } - } - } -} - -func TestMethodMatcher(t *testing.T) { - for _, v := range methodMatcherTests { - request, _ := http.NewRequest(v.method, "http://localhost:8080/", nil) - var routeMatch RouteMatch - result := v.matcher.Match(request, &routeMatch) - if result != v.result { - if v.result { - t.Errorf("%#v: should match %v.", v.matcher, v.method) - } else { - t.Errorf("%#v: should not match %v.", v.matcher, v.method) - } - } - } -} - -func TestPathMatcher(t *testing.T) { - for _, v := range pathMatcherTests { - request, _ := http.NewRequest("GET", v.url, nil) - var routeMatch RouteMatch - result := v.matcher.Match(request, &routeMatch) - vars := routeMatch.Vars - if result != v.result { - if v.result { - t.Errorf("%#v: should match %v.", v.matcher, v.url) - } else { - t.Errorf("%#v: should not match %v.", v.matcher, v.url) - } - } - if result { - if len(vars) != len(v.vars) { - t.Errorf("%#v: vars length should be %v, got %v.", v.matcher, len(v.vars), len(vars)) - } - for name, value := range vars { - if v.vars[name] != value { - t.Errorf("%#v: expected value %v for key %v, got %v.", v.matcher, v.vars[name], name, value) - } - } - } else { - if len(vars) != 0 { - t.Errorf("%#v: vars length should be 0, got %v.", v.matcher, len(vars)) - } - } - } -} - -func TestSchemeMatcher(t *testing.T) { - for _, v := range schemeMatcherTests { - request, _ := http.NewRequest("GET", v.url, nil) - var routeMatch RouteMatch - result := v.matcher.Match(request, &routeMatch) - if result != v.result { - if v.result { - t.Errorf("%#v: should match %v.", v.matcher, v.url) - } else { - t.Errorf("%#v: should not match %v.", v.matcher, v.url) - } - } - } -} - -func TestUrlBuilding(t *testing.T) { - - for _, v := range urlBuildingTests { - u, _ := v.route.URL(v.vars...) - url := u.String() - if url != v.url { - t.Errorf("expected %v, got %v", v.url, url) - /* - reversePath := "" - reverseHost := "" - if v.route.pathTemplate != nil { - reversePath = v.route.pathTemplate.Reverse - } - if v.route.hostTemplate != nil { - reverseHost = v.route.hostTemplate.Reverse - } - - t.Errorf("%#v:\nexpected: %q\ngot: %q\nreverse path: %q\nreverse host: %q", v.route, v.url, url, reversePath, reverseHost) - */ - } - } - - ArticleHandler := func(w http.ResponseWriter, r *http.Request) { - } - - router := NewRouter() - router.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).Name("article") - - url, _ := router.Get("article").URL("category", "technology", "id", "42") - expected := "/articles/technology/42" - if url.String() != expected { - t.Errorf("Expected %v, got %v", expected, url.String()) - } -} - -func TestMatchedRouteName(t *testing.T) { - routeName := "stock" - router := NewRouter() - route := router.NewRoute().Path("/products/").Name(routeName) - - url := "http://www.example.com/products/" - request, _ := http.NewRequest("GET", url, nil) - var rv RouteMatch - ok := router.Match(request, &rv) - - if !ok || rv.Route != route { - t.Errorf("Expected same route, got %+v.", rv.Route) - } - - retName := rv.Route.GetName() - if retName != routeName { - t.Errorf("Expected %q, got %q.", routeName, retName) - } -} - -func TestSubRouting(t *testing.T) { - // Example from docs. - router := NewRouter() - subrouter := router.NewRoute().Host("www.example.com").Subrouter() - route := subrouter.NewRoute().Path("/products/").Name("products") - - url := "http://www.example.com/products/" - request, _ := http.NewRequest("GET", url, nil) - var rv RouteMatch - ok := router.Match(request, &rv) - - if !ok || rv.Route != route { - t.Errorf("Expected same route, got %+v.", rv.Route) - } - - u, _ := router.Get("products").URL() - builtURL := u.String() - // Yay, subroute aware of the domain when building! - if builtURL != url { - t.Errorf("Expected %q, got %q.", url, builtURL) - } -} - -func TestVariableNames(t *testing.T) { - route := new(Route).Host("{arg1}.domain.com").Path("/{arg1}/{arg2:[0-9]+}") - if route.err == nil { - t.Errorf("Expected error for duplicated variable names") - } -} - -func TestRedirectSlash(t *testing.T) { - var route *Route - var routeMatch RouteMatch - r := NewRouter() - - r.StrictSlash(false) - route = r.NewRoute() - if route.strictSlash != false { - t.Errorf("Expected false redirectSlash.") - } - - r.StrictSlash(true) - route = r.NewRoute() - if route.strictSlash != true { - t.Errorf("Expected true redirectSlash.") - } - - route = new(Route) - route.strictSlash = true - route.Path("/{arg1}/{arg2:[0-9]+}/") - request, _ := http.NewRequest("GET", "http://localhost/foo/123", nil) - routeMatch = RouteMatch{} - _ = route.Match(request, &routeMatch) - vars := routeMatch.Vars - if vars["arg1"] != "foo" { - t.Errorf("Expected foo.") - } - if vars["arg2"] != "123" { - t.Errorf("Expected 123.") - } - rsp := NewRecorder() - routeMatch.Handler.ServeHTTP(rsp, request) - if rsp.HeaderMap.Get("Location") != "http://localhost/foo/123/" { - t.Errorf("Expected redirect header.") - } - - route = new(Route) - route.strictSlash = true - route.Path("/{arg1}/{arg2:[0-9]+}") - request, _ = http.NewRequest("GET", "http://localhost/foo/123/", nil) - routeMatch = RouteMatch{} - _ = route.Match(request, &routeMatch) - vars = routeMatch.Vars - if vars["arg1"] != "foo" { - t.Errorf("Expected foo.") - } - if vars["arg2"] != "123" { - t.Errorf("Expected 123.") - } - rsp = NewRecorder() - routeMatch.Handler.ServeHTTP(rsp, request) - if rsp.HeaderMap.Get("Location") != "http://localhost/foo/123" { - t.Errorf("Expected redirect header.") - } -} - -// Test for the new regexp library, still not available in stable Go. -func TestNewRegexp(t *testing.T) { - var p *routeRegexp - var matches []string - - tests := map[string]map[string][]string{ - "/{foo:a{2}}": { - "/a": nil, - "/aa": {"aa"}, - "/aaa": nil, - "/aaaa": nil, - }, - "/{foo:a{2,}}": { - "/a": nil, - "/aa": {"aa"}, - "/aaa": {"aaa"}, - "/aaaa": {"aaaa"}, - }, - "/{foo:a{2,3}}": { - "/a": nil, - "/aa": {"aa"}, - "/aaa": {"aaa"}, - "/aaaa": nil, - }, - "/{foo:[a-z]{3}}/{bar:[a-z]{2}}": { - "/a": nil, - "/ab": nil, - "/abc": nil, - "/abcd": nil, - "/abc/ab": {"abc", "ab"}, - "/abc/abc": nil, - "/abcd/ab": nil, - }, - `/{foo:\w{3,}}/{bar:\d{2,}}`: { - "/a": nil, - "/ab": nil, - "/abc": nil, - "/abc/1": nil, - "/abc/12": {"abc", "12"}, - "/abcd/12": {"abcd", "12"}, - "/abcd/123": {"abcd", "123"}, - }, - } - - for pattern, paths := range tests { - p, _ = newRouteRegexp(pattern, regexpTypePath, routeRegexpOptions{}) - for path, result := range paths { - matches = p.regexp.FindStringSubmatch(path) - if result == nil { - if matches != nil { - t.Errorf("%v should not match %v.", pattern, path) - } - } else { - if len(matches) != len(result)+1 { - t.Errorf("Expected %v matches, got %v.", len(result)+1, len(matches)) - } else { - for k, v := range result { - if matches[k+1] != v { - t.Errorf("Expected %v, got %v.", v, matches[k+1]) - } - } - } - } - } - } -} diff --git a/vendor/github.com/gorilla/mux/regexp.go b/vendor/github.com/gorilla/mux/regexp.go deleted file mode 100644 index ac1abcd473e3..000000000000 --- a/vendor/github.com/gorilla/mux/regexp.go +++ /dev/null @@ -1,345 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "bytes" - "fmt" - "net/http" - "net/url" - "regexp" - "strconv" - "strings" -) - -type routeRegexpOptions struct { - strictSlash bool - useEncodedPath bool -} - -type regexpType int - -const ( - regexpTypePath regexpType = 0 - regexpTypeHost regexpType = 1 - regexpTypePrefix regexpType = 2 - regexpTypeQuery regexpType = 3 -) - -// newRouteRegexp parses a route template and returns a routeRegexp, -// used to match a host, a path or a query string. -// -// It will extract named variables, assemble a regexp to be matched, create -// a "reverse" template to build URLs and compile regexps to validate variable -// values used in URL building. -// -// Previously we accepted only Python-like identifiers for variable -// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that -// name and pattern can't be empty, and names can't contain a colon. -func newRouteRegexp(tpl string, typ regexpType, options routeRegexpOptions) (*routeRegexp, error) { - // Check if it is well-formed. - idxs, errBraces := braceIndices(tpl) - if errBraces != nil { - return nil, errBraces - } - // Backup the original. - template := tpl - // Now let's parse it. - defaultPattern := "[^/]+" - if typ == regexpTypeQuery { - defaultPattern = ".*" - } else if typ == regexpTypeHost { - defaultPattern = "[^.]+" - } - // Only match strict slash if not matching - if typ != regexpTypePath { - options.strictSlash = false - } - // Set a flag for strictSlash. - endSlash := false - if options.strictSlash && strings.HasSuffix(tpl, "/") { - tpl = tpl[:len(tpl)-1] - endSlash = true - } - varsN := make([]string, len(idxs)/2) - varsR := make([]*regexp.Regexp, len(idxs)/2) - pattern := bytes.NewBufferString("") - pattern.WriteByte('^') - reverse := bytes.NewBufferString("") - var end int - var err error - for i := 0; i < len(idxs); i += 2 { - // Set all values we are interested in. - raw := tpl[end:idxs[i]] - end = idxs[i+1] - parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2) - name := parts[0] - patt := defaultPattern - if len(parts) == 2 { - patt = parts[1] - } - // Name or pattern can't be empty. - if name == "" || patt == "" { - return nil, fmt.Errorf("mux: missing name or pattern in %q", - tpl[idxs[i]:end]) - } - // Build the regexp pattern. - fmt.Fprintf(pattern, "%s(?P<%s>%s)", regexp.QuoteMeta(raw), varGroupName(i/2), patt) - - // Build the reverse template. - fmt.Fprintf(reverse, "%s%%s", raw) - - // Append variable name and compiled pattern. - varsN[i/2] = name - varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt)) - if err != nil { - return nil, err - } - } - // Add the remaining. - raw := tpl[end:] - pattern.WriteString(regexp.QuoteMeta(raw)) - if options.strictSlash { - pattern.WriteString("[/]?") - } - if typ == regexpTypeQuery { - // Add the default pattern if the query value is empty - if queryVal := strings.SplitN(template, "=", 2)[1]; queryVal == "" { - pattern.WriteString(defaultPattern) - } - } - if typ != regexpTypePrefix { - pattern.WriteByte('$') - } - - var wildcardHostPort bool - if typ == regexpTypeHost { - if !strings.Contains(pattern.String(), ":") { - wildcardHostPort = true - } - } - reverse.WriteString(raw) - if endSlash { - reverse.WriteByte('/') - } - // Compile full regexp. - reg, errCompile := regexp.Compile(pattern.String()) - if errCompile != nil { - return nil, errCompile - } - - // Check for capturing groups which used to work in older versions - if reg.NumSubexp() != len(idxs)/2 { - panic(fmt.Sprintf("route %s contains capture groups in its regexp. ", template) + - "Only non-capturing groups are accepted: e.g. (?:pattern) instead of (pattern)") - } - - // Done! - return &routeRegexp{ - template: template, - regexpType: typ, - options: options, - regexp: reg, - reverse: reverse.String(), - varsN: varsN, - varsR: varsR, - wildcardHostPort: wildcardHostPort, - }, nil -} - -// routeRegexp stores a regexp to match a host or path and information to -// collect and validate route variables. -type routeRegexp struct { - // The unmodified template. - template string - // The type of match - regexpType regexpType - // Options for matching - options routeRegexpOptions - // Expanded regexp. - regexp *regexp.Regexp - // Reverse template. - reverse string - // Variable names. - varsN []string - // Variable regexps (validators). - varsR []*regexp.Regexp - // Wildcard host-port (no strict port match in hostname) - wildcardHostPort bool -} - -// Match matches the regexp against the URL host or path. -func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { - if r.regexpType == regexpTypeHost { - host := getHost(req) - if r.wildcardHostPort { - // Don't be strict on the port match - if i := strings.Index(host, ":"); i != -1 { - host = host[:i] - } - } - return r.regexp.MatchString(host) - } else { - if r.regexpType == regexpTypeQuery { - return r.matchQueryString(req) - } - path := req.URL.Path - if r.options.useEncodedPath { - path = req.URL.EscapedPath() - } - return r.regexp.MatchString(path) - } -} - -// url builds a URL part using the given values. -func (r *routeRegexp) url(values map[string]string) (string, error) { - urlValues := make([]interface{}, len(r.varsN)) - for k, v := range r.varsN { - value, ok := values[v] - if !ok { - return "", fmt.Errorf("mux: missing route variable %q", v) - } - if r.regexpType == regexpTypeQuery { - value = url.QueryEscape(value) - } - urlValues[k] = value - } - rv := fmt.Sprintf(r.reverse, urlValues...) - if !r.regexp.MatchString(rv) { - // The URL is checked against the full regexp, instead of checking - // individual variables. This is faster but to provide a good error - // message, we check individual regexps if the URL doesn't match. - for k, v := range r.varsN { - if !r.varsR[k].MatchString(values[v]) { - return "", fmt.Errorf( - "mux: variable %q doesn't match, expected %q", values[v], - r.varsR[k].String()) - } - } - } - return rv, nil -} - -// getURLQuery returns a single query parameter from a request URL. -// For a URL with foo=bar&baz=ding, we return only the relevant key -// value pair for the routeRegexp. -func (r *routeRegexp) getURLQuery(req *http.Request) string { - if r.regexpType != regexpTypeQuery { - return "" - } - templateKey := strings.SplitN(r.template, "=", 2)[0] - for key, vals := range req.URL.Query() { - if key == templateKey && len(vals) > 0 { - return key + "=" + vals[0] - } - } - return "" -} - -func (r *routeRegexp) matchQueryString(req *http.Request) bool { - return r.regexp.MatchString(r.getURLQuery(req)) -} - -// braceIndices returns the first level curly brace indices from a string. -// It returns an error in case of unbalanced braces. -func braceIndices(s string) ([]int, error) { - var level, idx int - var idxs []int - for i := 0; i < len(s); i++ { - switch s[i] { - case '{': - if level++; level == 1 { - idx = i - } - case '}': - if level--; level == 0 { - idxs = append(idxs, idx, i+1) - } else if level < 0 { - return nil, fmt.Errorf("mux: unbalanced braces in %q", s) - } - } - } - if level != 0 { - return nil, fmt.Errorf("mux: unbalanced braces in %q", s) - } - return idxs, nil -} - -// varGroupName builds a capturing group name for the indexed variable. -func varGroupName(idx int) string { - return "v" + strconv.Itoa(idx) -} - -// ---------------------------------------------------------------------------- -// routeRegexpGroup -// ---------------------------------------------------------------------------- - -// routeRegexpGroup groups the route matchers that carry variables. -type routeRegexpGroup struct { - host *routeRegexp - path *routeRegexp - queries []*routeRegexp -} - -// setMatch extracts the variables from the URL once a route matches. -func (v routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) { - // Store host variables. - if v.host != nil { - host := getHost(req) - matches := v.host.regexp.FindStringSubmatchIndex(host) - if len(matches) > 0 { - extractVars(host, matches, v.host.varsN, m.Vars) - } - } - path := req.URL.Path - if r.useEncodedPath { - path = req.URL.EscapedPath() - } - // Store path variables. - if v.path != nil { - matches := v.path.regexp.FindStringSubmatchIndex(path) - if len(matches) > 0 { - extractVars(path, matches, v.path.varsN, m.Vars) - // Check if we should redirect. - if v.path.options.strictSlash { - p1 := strings.HasSuffix(path, "/") - p2 := strings.HasSuffix(v.path.template, "/") - if p1 != p2 { - u, _ := url.Parse(req.URL.String()) - if p1 { - u.Path = u.Path[:len(u.Path)-1] - } else { - u.Path += "/" - } - m.Handler = http.RedirectHandler(u.String(), http.StatusMovedPermanently) - } - } - } - } - // Store query string variables. - for _, q := range v.queries { - queryURL := q.getURLQuery(req) - matches := q.regexp.FindStringSubmatchIndex(queryURL) - if len(matches) > 0 { - extractVars(queryURL, matches, q.varsN, m.Vars) - } - } -} - -// getHost tries its best to return the request host. -// According to section 14.23 of RFC 2616 the Host header -// can include the port number if the default value of 80 is not used. -func getHost(r *http.Request) string { - if r.URL.IsAbs() { - return r.URL.Host - } - return r.Host -} - -func extractVars(input string, matches []int, names []string, output map[string]string) { - for i, name := range names { - output[name] = input[matches[2*i+2]:matches[2*i+3]] - } -} diff --git a/vendor/github.com/gorilla/mux/route.go b/vendor/github.com/gorilla/mux/route.go deleted file mode 100644 index 8479c68c1df7..000000000000 --- a/vendor/github.com/gorilla/mux/route.go +++ /dev/null @@ -1,710 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "errors" - "fmt" - "net/http" - "net/url" - "regexp" - "strings" -) - -// Route stores information to match a request and build URLs. -type Route struct { - // Request handler for the route. - handler http.Handler - // If true, this route never matches: it is only used to build URLs. - buildOnly bool - // The name used to build URLs. - name string - // Error resulted from building a route. - err error - - // "global" reference to all named routes - namedRoutes map[string]*Route - - // config possibly passed in from `Router` - routeConf -} - -// SkipClean reports whether path cleaning is enabled for this route via -// Router.SkipClean. -func (r *Route) SkipClean() bool { - return r.skipClean -} - -// Match matches the route against the request. -func (r *Route) Match(req *http.Request, match *RouteMatch) bool { - if r.buildOnly || r.err != nil { - return false - } - - var matchErr error - - // Match everything. - for _, m := range r.matchers { - if matched := m.Match(req, match); !matched { - if _, ok := m.(methodMatcher); ok { - matchErr = ErrMethodMismatch - continue - } - - // Ignore ErrNotFound errors. These errors arise from match call - // to Subrouters. - // - // This prevents subsequent matching subrouters from failing to - // run middleware. If not ignored, the middleware would see a - // non-nil MatchErr and be skipped, even when there was a - // matching route. - if match.MatchErr == ErrNotFound { - match.MatchErr = nil - } - - matchErr = nil - return false - } - } - - if matchErr != nil { - match.MatchErr = matchErr - return false - } - - if match.MatchErr == ErrMethodMismatch { - // We found a route which matches request method, clear MatchErr - match.MatchErr = nil - // Then override the mis-matched handler - match.Handler = r.handler - } - - // Yay, we have a match. Let's collect some info about it. - if match.Route == nil { - match.Route = r - } - if match.Handler == nil { - match.Handler = r.handler - } - if match.Vars == nil { - match.Vars = make(map[string]string) - } - - // Set variables. - r.regexp.setMatch(req, match, r) - return true -} - -// ---------------------------------------------------------------------------- -// Route attributes -// ---------------------------------------------------------------------------- - -// GetError returns an error resulted from building the route, if any. -func (r *Route) GetError() error { - return r.err -} - -// BuildOnly sets the route to never match: it is only used to build URLs. -func (r *Route) BuildOnly() *Route { - r.buildOnly = true - return r -} - -// Handler -------------------------------------------------------------------- - -// Handler sets a handler for the route. -func (r *Route) Handler(handler http.Handler) *Route { - if r.err == nil { - r.handler = handler - } - return r -} - -// HandlerFunc sets a handler function for the route. -func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route { - return r.Handler(http.HandlerFunc(f)) -} - -// GetHandler returns the handler for the route, if any. -func (r *Route) GetHandler() http.Handler { - return r.handler -} - -// Name ----------------------------------------------------------------------- - -// Name sets the name for the route, used to build URLs. -// It is an error to call Name more than once on a route. -func (r *Route) Name(name string) *Route { - if r.name != "" { - r.err = fmt.Errorf("mux: route already has name %q, can't set %q", - r.name, name) - } - if r.err == nil { - r.name = name - r.namedRoutes[name] = r - } - return r -} - -// GetName returns the name for the route, if any. -func (r *Route) GetName() string { - return r.name -} - -// ---------------------------------------------------------------------------- -// Matchers -// ---------------------------------------------------------------------------- - -// matcher types try to match a request. -type matcher interface { - Match(*http.Request, *RouteMatch) bool -} - -// addMatcher adds a matcher to the route. -func (r *Route) addMatcher(m matcher) *Route { - if r.err == nil { - r.matchers = append(r.matchers, m) - } - return r -} - -// addRegexpMatcher adds a host or path matcher and builder to a route. -func (r *Route) addRegexpMatcher(tpl string, typ regexpType) error { - if r.err != nil { - return r.err - } - if typ == regexpTypePath || typ == regexpTypePrefix { - if len(tpl) > 0 && tpl[0] != '/' { - return fmt.Errorf("mux: path must start with a slash, got %q", tpl) - } - if r.regexp.path != nil { - tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl - } - } - rr, err := newRouteRegexp(tpl, typ, routeRegexpOptions{ - strictSlash: r.strictSlash, - useEncodedPath: r.useEncodedPath, - }) - if err != nil { - return err - } - for _, q := range r.regexp.queries { - if err = uniqueVars(rr.varsN, q.varsN); err != nil { - return err - } - } - if typ == regexpTypeHost { - if r.regexp.path != nil { - if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil { - return err - } - } - r.regexp.host = rr - } else { - if r.regexp.host != nil { - if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil { - return err - } - } - if typ == regexpTypeQuery { - r.regexp.queries = append(r.regexp.queries, rr) - } else { - r.regexp.path = rr - } - } - r.addMatcher(rr) - return nil -} - -// Headers -------------------------------------------------------------------- - -// headerMatcher matches the request against header values. -type headerMatcher map[string]string - -func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchMapWithString(m, r.Header, true) -} - -// Headers adds a matcher for request header values. -// It accepts a sequence of key/value pairs to be matched. For example: -// -// r := mux.NewRouter() -// r.Headers("Content-Type", "application/json", -// "X-Requested-With", "XMLHttpRequest") -// -// The above route will only match if both request header values match. -// If the value is an empty string, it will match any value if the key is set. -func (r *Route) Headers(pairs ...string) *Route { - if r.err == nil { - var headers map[string]string - headers, r.err = mapFromPairsToString(pairs...) - return r.addMatcher(headerMatcher(headers)) - } - return r -} - -// headerRegexMatcher matches the request against the route given a regex for the header -type headerRegexMatcher map[string]*regexp.Regexp - -func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchMapWithRegex(m, r.Header, true) -} - -// HeadersRegexp accepts a sequence of key/value pairs, where the value has regex -// support. For example: -// -// r := mux.NewRouter() -// r.HeadersRegexp("Content-Type", "application/(text|json)", -// "X-Requested-With", "XMLHttpRequest") -// -// The above route will only match if both the request header matches both regular expressions. -// If the value is an empty string, it will match any value if the key is set. -// Use the start and end of string anchors (^ and $) to match an exact value. -func (r *Route) HeadersRegexp(pairs ...string) *Route { - if r.err == nil { - var headers map[string]*regexp.Regexp - headers, r.err = mapFromPairsToRegex(pairs...) - return r.addMatcher(headerRegexMatcher(headers)) - } - return r -} - -// Host ----------------------------------------------------------------------- - -// Host adds a matcher for the URL host. -// It accepts a template with zero or more URL variables enclosed by {}. -// Variables can define an optional regexp pattern to be matched: -// -// - {name} matches anything until the next dot. -// -// - {name:pattern} matches the given regexp pattern. -// -// For example: -// -// r := mux.NewRouter() -// r.Host("www.example.com") -// r.Host("{subdomain}.domain.com") -// r.Host("{subdomain:[a-z]+}.domain.com") -// -// Variable names must be unique in a given route. They can be retrieved -// calling mux.Vars(request). -func (r *Route) Host(tpl string) *Route { - r.err = r.addRegexpMatcher(tpl, regexpTypeHost) - return r -} - -// MatcherFunc ---------------------------------------------------------------- - -// MatcherFunc is the function signature used by custom matchers. -type MatcherFunc func(*http.Request, *RouteMatch) bool - -// Match returns the match for a given request. -func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool { - return m(r, match) -} - -// MatcherFunc adds a custom function to be used as request matcher. -func (r *Route) MatcherFunc(f MatcherFunc) *Route { - return r.addMatcher(f) -} - -// Methods -------------------------------------------------------------------- - -// methodMatcher matches the request against HTTP methods. -type methodMatcher []string - -func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchInArray(m, r.Method) -} - -// Methods adds a matcher for HTTP methods. -// It accepts a sequence of one or more methods to be matched, e.g.: -// "GET", "POST", "PUT". -func (r *Route) Methods(methods ...string) *Route { - for k, v := range methods { - methods[k] = strings.ToUpper(v) - } - return r.addMatcher(methodMatcher(methods)) -} - -// Path ----------------------------------------------------------------------- - -// Path adds a matcher for the URL path. -// It accepts a template with zero or more URL variables enclosed by {}. The -// template must start with a "/". -// Variables can define an optional regexp pattern to be matched: -// -// - {name} matches anything until the next slash. -// -// - {name:pattern} matches the given regexp pattern. -// -// For example: -// -// r := mux.NewRouter() -// r.Path("/products/").Handler(ProductsHandler) -// r.Path("/products/{key}").Handler(ProductsHandler) -// r.Path("/articles/{category}/{id:[0-9]+}"). -// Handler(ArticleHandler) -// -// Variable names must be unique in a given route. They can be retrieved -// calling mux.Vars(request). -func (r *Route) Path(tpl string) *Route { - r.err = r.addRegexpMatcher(tpl, regexpTypePath) - return r -} - -// PathPrefix ----------------------------------------------------------------- - -// PathPrefix adds a matcher for the URL path prefix. This matches if the given -// template is a prefix of the full URL path. See Route.Path() for details on -// the tpl argument. -// -// Note that it does not treat slashes specially ("/foobar/" will be matched by -// the prefix "/foo") so you may want to use a trailing slash here. -// -// Also note that the setting of Router.StrictSlash() has no effect on routes -// with a PathPrefix matcher. -func (r *Route) PathPrefix(tpl string) *Route { - r.err = r.addRegexpMatcher(tpl, regexpTypePrefix) - return r -} - -// Query ---------------------------------------------------------------------- - -// Queries adds a matcher for URL query values. -// It accepts a sequence of key/value pairs. Values may define variables. -// For example: -// -// r := mux.NewRouter() -// r.Queries("foo", "bar", "id", "{id:[0-9]+}") -// -// The above route will only match if the URL contains the defined queries -// values, e.g.: ?foo=bar&id=42. -// -// If the value is an empty string, it will match any value if the key is set. -// -// Variables can define an optional regexp pattern to be matched: -// -// - {name} matches anything until the next slash. -// -// - {name:pattern} matches the given regexp pattern. -func (r *Route) Queries(pairs ...string) *Route { - length := len(pairs) - if length%2 != 0 { - r.err = fmt.Errorf( - "mux: number of parameters must be multiple of 2, got %v", pairs) - return nil - } - for i := 0; i < length; i += 2 { - if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], regexpTypeQuery); r.err != nil { - return r - } - } - - return r -} - -// Schemes -------------------------------------------------------------------- - -// schemeMatcher matches the request against URL schemes. -type schemeMatcher []string - -func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchInArray(m, r.URL.Scheme) -} - -// Schemes adds a matcher for URL schemes. -// It accepts a sequence of schemes to be matched, e.g.: "http", "https". -func (r *Route) Schemes(schemes ...string) *Route { - for k, v := range schemes { - schemes[k] = strings.ToLower(v) - } - if len(schemes) > 0 { - r.buildScheme = schemes[0] - } - return r.addMatcher(schemeMatcher(schemes)) -} - -// BuildVarsFunc -------------------------------------------------------------- - -// BuildVarsFunc is the function signature used by custom build variable -// functions (which can modify route variables before a route's URL is built). -type BuildVarsFunc func(map[string]string) map[string]string - -// BuildVarsFunc adds a custom function to be used to modify build variables -// before a route's URL is built. -func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route { - if r.buildVarsFunc != nil { - // compose the old and new functions - old := r.buildVarsFunc - r.buildVarsFunc = func(m map[string]string) map[string]string { - return f(old(m)) - } - } else { - r.buildVarsFunc = f - } - return r -} - -// Subrouter ------------------------------------------------------------------ - -// Subrouter creates a subrouter for the route. -// -// It will test the inner routes only if the parent route matched. For example: -// -// r := mux.NewRouter() -// s := r.Host("www.example.com").Subrouter() -// s.HandleFunc("/products/", ProductsHandler) -// s.HandleFunc("/products/{key}", ProductHandler) -// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) -// -// Here, the routes registered in the subrouter won't be tested if the host -// doesn't match. -func (r *Route) Subrouter() *Router { - // initialize a subrouter with a copy of the parent route's configuration - router := &Router{routeConf: copyRouteConf(r.routeConf), namedRoutes: r.namedRoutes} - r.addMatcher(router) - return router -} - -// ---------------------------------------------------------------------------- -// URL building -// ---------------------------------------------------------------------------- - -// URL builds a URL for the route. -// -// It accepts a sequence of key/value pairs for the route variables. For -// example, given this route: -// -// r := mux.NewRouter() -// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). -// Name("article") -// -// ...a URL for it can be built using: -// -// url, err := r.Get("article").URL("category", "technology", "id", "42") -// -// ...which will return an url.URL with the following path: -// -// "/articles/technology/42" -// -// This also works for host variables: -// -// r := mux.NewRouter() -// r.Host("{subdomain}.domain.com"). -// HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). -// Name("article") -// -// // url.String() will be "http://news.domain.com/articles/technology/42" -// url, err := r.Get("article").URL("subdomain", "news", -// "category", "technology", -// "id", "42") -// -// All variables defined in the route are required, and their values must -// conform to the corresponding patterns. -func (r *Route) URL(pairs ...string) (*url.URL, error) { - if r.err != nil { - return nil, r.err - } - values, err := r.prepareVars(pairs...) - if err != nil { - return nil, err - } - var scheme, host, path string - queries := make([]string, 0, len(r.regexp.queries)) - if r.regexp.host != nil { - if host, err = r.regexp.host.url(values); err != nil { - return nil, err - } - scheme = "http" - if r.buildScheme != "" { - scheme = r.buildScheme - } - } - if r.regexp.path != nil { - if path, err = r.regexp.path.url(values); err != nil { - return nil, err - } - } - for _, q := range r.regexp.queries { - var query string - if query, err = q.url(values); err != nil { - return nil, err - } - queries = append(queries, query) - } - return &url.URL{ - Scheme: scheme, - Host: host, - Path: path, - RawQuery: strings.Join(queries, "&"), - }, nil -} - -// URLHost builds the host part of the URL for a route. See Route.URL(). -// -// The route must have a host defined. -func (r *Route) URLHost(pairs ...string) (*url.URL, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp.host == nil { - return nil, errors.New("mux: route doesn't have a host") - } - values, err := r.prepareVars(pairs...) - if err != nil { - return nil, err - } - host, err := r.regexp.host.url(values) - if err != nil { - return nil, err - } - u := &url.URL{ - Scheme: "http", - Host: host, - } - if r.buildScheme != "" { - u.Scheme = r.buildScheme - } - return u, nil -} - -// URLPath builds the path part of the URL for a route. See Route.URL(). -// -// The route must have a path defined. -func (r *Route) URLPath(pairs ...string) (*url.URL, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp.path == nil { - return nil, errors.New("mux: route doesn't have a path") - } - values, err := r.prepareVars(pairs...) - if err != nil { - return nil, err - } - path, err := r.regexp.path.url(values) - if err != nil { - return nil, err - } - return &url.URL{ - Path: path, - }, nil -} - -// GetPathTemplate returns the template used to build the -// route match. -// This is useful for building simple REST API documentation and for instrumentation -// against third-party services. -// An error will be returned if the route does not define a path. -func (r *Route) GetPathTemplate() (string, error) { - if r.err != nil { - return "", r.err - } - if r.regexp.path == nil { - return "", errors.New("mux: route doesn't have a path") - } - return r.regexp.path.template, nil -} - -// GetPathRegexp returns the expanded regular expression used to match route path. -// This is useful for building simple REST API documentation and for instrumentation -// against third-party services. -// An error will be returned if the route does not define a path. -func (r *Route) GetPathRegexp() (string, error) { - if r.err != nil { - return "", r.err - } - if r.regexp.path == nil { - return "", errors.New("mux: route does not have a path") - } - return r.regexp.path.regexp.String(), nil -} - -// GetQueriesRegexp returns the expanded regular expressions used to match the -// route queries. -// This is useful for building simple REST API documentation and for instrumentation -// against third-party services. -// An error will be returned if the route does not have queries. -func (r *Route) GetQueriesRegexp() ([]string, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp.queries == nil { - return nil, errors.New("mux: route doesn't have queries") - } - var queries []string - for _, query := range r.regexp.queries { - queries = append(queries, query.regexp.String()) - } - return queries, nil -} - -// GetQueriesTemplates returns the templates used to build the -// query matching. -// This is useful for building simple REST API documentation and for instrumentation -// against third-party services. -// An error will be returned if the route does not define queries. -func (r *Route) GetQueriesTemplates() ([]string, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp.queries == nil { - return nil, errors.New("mux: route doesn't have queries") - } - var queries []string - for _, query := range r.regexp.queries { - queries = append(queries, query.template) - } - return queries, nil -} - -// GetMethods returns the methods the route matches against -// This is useful for building simple REST API documentation and for instrumentation -// against third-party services. -// An error will be returned if route does not have methods. -func (r *Route) GetMethods() ([]string, error) { - if r.err != nil { - return nil, r.err - } - for _, m := range r.matchers { - if methods, ok := m.(methodMatcher); ok { - return []string(methods), nil - } - } - return nil, errors.New("mux: route doesn't have methods") -} - -// GetHostTemplate returns the template used to build the -// route match. -// This is useful for building simple REST API documentation and for instrumentation -// against third-party services. -// An error will be returned if the route does not define a host. -func (r *Route) GetHostTemplate() (string, error) { - if r.err != nil { - return "", r.err - } - if r.regexp.host == nil { - return "", errors.New("mux: route doesn't have a host") - } - return r.regexp.host.template, nil -} - -// prepareVars converts the route variable pairs into a map. If the route has a -// BuildVarsFunc, it is invoked. -func (r *Route) prepareVars(pairs ...string) (map[string]string, error) { - m, err := mapFromPairsToString(pairs...) - if err != nil { - return nil, err - } - return r.buildVars(m), nil -} - -func (r *Route) buildVars(m map[string]string) map[string]string { - if r.buildVarsFunc != nil { - m = r.buildVarsFunc(m) - } - return m -} diff --git a/vendor/github.com/gorilla/mux/test_helpers.go b/vendor/github.com/gorilla/mux/test_helpers.go deleted file mode 100644 index 32ecffde489f..000000000000 --- a/vendor/github.com/gorilla/mux/test_helpers.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import "net/http" - -// SetURLVars sets the URL variables for the given request, to be accessed via -// mux.Vars for testing route behaviour. Arguments are not modified, a shallow -// copy is returned. -// -// This API should only be used for testing purposes; it provides a way to -// inject variables into the request context. Alternatively, URL variables -// can be set by making a route that captures the required variables, -// starting a server and sending the request to that server. -func SetURLVars(r *http.Request, val map[string]string) *http.Request { - return setVars(r, val) -} diff --git a/vendor/github.com/joho/godotenv/.gitignore b/vendor/github.com/joho/godotenv/.gitignore deleted file mode 100644 index e43b0f988953..000000000000 --- a/vendor/github.com/joho/godotenv/.gitignore +++ /dev/null @@ -1 +0,0 @@ -.DS_Store diff --git a/vendor/github.com/joho/godotenv/.travis.yml b/vendor/github.com/joho/godotenv/.travis.yml deleted file mode 100644 index f0db1adcdb15..000000000000 --- a/vendor/github.com/joho/godotenv/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go - -go: - - 1.x - -os: - - linux - - osx diff --git a/vendor/github.com/joho/godotenv/LICENCE b/vendor/github.com/joho/godotenv/LICENCE deleted file mode 100644 index e7ddd51be903..000000000000 --- a/vendor/github.com/joho/godotenv/LICENCE +++ /dev/null @@ -1,23 +0,0 @@ -Copyright (c) 2013 John Barton - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - diff --git a/vendor/github.com/joho/godotenv/README.md b/vendor/github.com/joho/godotenv/README.md deleted file mode 100644 index 1a20c78dcd37..000000000000 --- a/vendor/github.com/joho/godotenv/README.md +++ /dev/null @@ -1,163 +0,0 @@ -# GoDotEnv [![Build Status](https://travis-ci.org/joho/godotenv.svg?branch=master)](https://travis-ci.org/joho/godotenv) [![Build status](https://ci.appveyor.com/api/projects/status/9v40vnfvvgde64u4?svg=true)](https://ci.appveyor.com/project/joho/godotenv) [![Go Report Card](https://goreportcard.com/badge/github.com/joho/godotenv)](https://goreportcard.com/report/github.com/joho/godotenv) - -A Go (golang) port of the Ruby dotenv project (which loads env vars from a .env file) - -From the original Library: - -> Storing configuration in the environment is one of the tenets of a twelve-factor app. Anything that is likely to change between deployment environments–such as resource handles for databases or credentials for external services–should be extracted from the code into environment variables. -> -> But it is not always practical to set environment variables on development machines or continuous integration servers where multiple projects are run. Dotenv load variables from a .env file into ENV when the environment is bootstrapped. - -It can be used as a library (for loading in env for your own daemons etc) or as a bin command. - -There is test coverage and CI for both linuxish and windows environments, but I make no guarantees about the bin version working on windows. - -## Installation - -As a library - -```shell -go get github.com/joho/godotenv -``` - -or if you want to use it as a bin command -```shell -go get github.com/joho/godotenv/cmd/godotenv -``` - -## Usage - -Add your application configuration to your `.env` file in the root of your project: - -```shell -S3_BUCKET=YOURS3BUCKET -SECRET_KEY=YOURSECRETKEYGOESHERE -``` - -Then in your Go app you can do something like - -```go -package main - -import ( - "github.com/joho/godotenv" - "log" - "os" -) - -func main() { - err := godotenv.Load() - if err != nil { - log.Fatal("Error loading .env file") - } - - s3Bucket := os.Getenv("S3_BUCKET") - secretKey := os.Getenv("SECRET_KEY") - - // now do something with s3 or whatever -} -``` - -If you're even lazier than that, you can just take advantage of the autoload package which will read in `.env` on import - -```go -import _ "github.com/joho/godotenv/autoload" -``` - -While `.env` in the project root is the default, you don't have to be constrained, both examples below are 100% legit - -```go -_ = godotenv.Load("somerandomfile") -_ = godotenv.Load("filenumberone.env", "filenumbertwo.env") -``` - -If you want to be really fancy with your env file you can do comments and exports (below is a valid env file) - -```shell -# I am a comment and that is OK -SOME_VAR=someval -FOO=BAR # comments at line end are OK too -export BAR=BAZ -``` - -Or finally you can do YAML(ish) style - -```yaml -FOO: bar -BAR: baz -``` - -as a final aside, if you don't want godotenv munging your env you can just get a map back instead - -```go -var myEnv map[string]string -myEnv, err := godotenv.Read() - -s3Bucket := myEnv["S3_BUCKET"] -``` - -... or from an `io.Reader` instead of a local file - -```go -reader := getRemoteFile() -myEnv, err := godotenv.Parse(reader) -``` - -... or from a `string` if you so desire - -```go -content := getRemoteFileContent() -myEnv, err := godotenv.Unmarshal(content) -``` - -### Command Mode - -Assuming you've installed the command as above and you've got `$GOPATH/bin` in your `$PATH` - -``` -godotenv -f /some/path/to/.env some_command with some args -``` - -If you don't specify `-f` it will fall back on the default of loading `.env` in `PWD` - -### Writing Env Files - -Godotenv can also write a map representing the environment to a correctly-formatted and escaped file - -```go -env, err := godotenv.Unmarshal("KEY=value") -err := godotenv.Write(env, "./.env") -``` - -... or to a string - -```go -env, err := godotenv.Unmarshal("KEY=value") -content, err := godotenv.Marshal(env) -``` - -## Contributing - -Contributions are most welcome! The parser itself is pretty stupidly naive and I wouldn't be surprised if it breaks with edge cases. - -*code changes without tests will not be accepted* - -1. Fork it -2. Create your feature branch (`git checkout -b my-new-feature`) -3. Commit your changes (`git commit -am 'Added some feature'`) -4. Push to the branch (`git push origin my-new-feature`) -5. Create new Pull Request - -## Releases - -Releases should follow [Semver](http://semver.org/) though the first couple of releases are `v1` and `v1.1`. - -Use [annotated tags for all releases](https://github.com/joho/godotenv/issues/30). Example `git tag -a v1.2.1` - -## CI - -Linux: [![Build Status](https://travis-ci.org/joho/godotenv.svg?branch=master)](https://travis-ci.org/joho/godotenv) Windows: [![Build status](https://ci.appveyor.com/api/projects/status/9v40vnfvvgde64u4)](https://ci.appveyor.com/project/joho/godotenv) - -## Who? - -The original library [dotenv](https://github.com/bkeepers/dotenv) was written by [Brandon Keepers](http://opensoul.org/), and this port was done by [John Barton](http://whoisjohnbarton.com) based off the tests/fixtures in the original library. diff --git a/vendor/github.com/joho/godotenv/autoload/autoload.go b/vendor/github.com/joho/godotenv/autoload/autoload.go deleted file mode 100644 index fbcd2bdf8e94..000000000000 --- a/vendor/github.com/joho/godotenv/autoload/autoload.go +++ /dev/null @@ -1,15 +0,0 @@ -package autoload - -/* - You can just read the .env file on import just by doing - - import _ "github.com/joho/godotenv/autoload" - - And bob's your mother's brother -*/ - -import "github.com/joho/godotenv" - -func init() { - godotenv.Load() -} diff --git a/vendor/github.com/joho/godotenv/cmd/godotenv/cmd.go b/vendor/github.com/joho/godotenv/cmd/godotenv/cmd.go deleted file mode 100644 index 04a9f6497fde..000000000000 --- a/vendor/github.com/joho/godotenv/cmd/godotenv/cmd.go +++ /dev/null @@ -1,54 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "log" - - "strings" - - "github.com/joho/godotenv" -) - -func main() { - var showHelp bool - flag.BoolVar(&showHelp, "h", false, "show help") - var rawEnvFilenames string - flag.StringVar(&rawEnvFilenames, "f", "", "comma separated paths to .env files") - - flag.Parse() - - usage := ` -Run a process with a env setup from a .env file - -godotenv [-f ENV_FILE_PATHS] COMMAND_ARGS - -ENV_FILE_PATHS: comma separated paths to .env files -COMMAND_ARGS: command and args you want to run - -example - godotenv -f /path/to/something/.env,/another/path/.env fortune -` - // if no args or -h flag - // print usage and return - args := flag.Args() - if showHelp || len(args) == 0 { - fmt.Println(usage) - return - } - - // load env - var envFilenames []string - if rawEnvFilenames != "" { - envFilenames = strings.Split(rawEnvFilenames, ",") - } - - // take rest of args and "exec" them - cmd := args[0] - cmdArgs := args[1:] - - err := godotenv.Exec(envFilenames, cmd, cmdArgs) - if err != nil { - log.Fatal(err) - } -} diff --git a/vendor/github.com/joho/godotenv/fixtures/equals.env b/vendor/github.com/joho/godotenv/fixtures/equals.env deleted file mode 100644 index 594c5328ae9e..000000000000 --- a/vendor/github.com/joho/godotenv/fixtures/equals.env +++ /dev/null @@ -1,2 +0,0 @@ -export OPTION_A='postgres://localhost:5432/database?sslmode=disable' - diff --git a/vendor/github.com/joho/godotenv/fixtures/exported.env b/vendor/github.com/joho/godotenv/fixtures/exported.env deleted file mode 100644 index 5821377c76ba..000000000000 --- a/vendor/github.com/joho/godotenv/fixtures/exported.env +++ /dev/null @@ -1,2 +0,0 @@ -export OPTION_A=2 -export OPTION_B='\n' diff --git a/vendor/github.com/joho/godotenv/fixtures/invalid1.env b/vendor/github.com/joho/godotenv/fixtures/invalid1.env deleted file mode 100644 index 38f7e0e8bf76..000000000000 --- a/vendor/github.com/joho/godotenv/fixtures/invalid1.env +++ /dev/null @@ -1,2 +0,0 @@ -INVALID LINE -foo=bar diff --git a/vendor/github.com/joho/godotenv/fixtures/plain.env b/vendor/github.com/joho/godotenv/fixtures/plain.env deleted file mode 100644 index 43f7e44cc4cc..000000000000 --- a/vendor/github.com/joho/godotenv/fixtures/plain.env +++ /dev/null @@ -1,7 +0,0 @@ -OPTION_A=1 -OPTION_B=2 -OPTION_C= 3 -OPTION_D =4 -OPTION_E = 5 -OPTION_F = -OPTION_G= \ No newline at end of file diff --git a/vendor/github.com/joho/godotenv/fixtures/quoted.env b/vendor/github.com/joho/godotenv/fixtures/quoted.env deleted file mode 100644 index cc6376a3f288..000000000000 --- a/vendor/github.com/joho/godotenv/fixtures/quoted.env +++ /dev/null @@ -1,9 +0,0 @@ -OPTION_A='1' -OPTION_B='2' -OPTION_C='' -OPTION_D='\n' -OPTION_E="1" -OPTION_F="2" -OPTION_G="" -OPTION_H="\n" -OPTION_I = "echo 'asd'" diff --git a/vendor/github.com/joho/godotenv/godotenv.go b/vendor/github.com/joho/godotenv/godotenv.go deleted file mode 100644 index 48ae78c5d708..000000000000 --- a/vendor/github.com/joho/godotenv/godotenv.go +++ /dev/null @@ -1,314 +0,0 @@ -// Package godotenv is a go port of the ruby dotenv library (https://github.com/bkeepers/dotenv) -// -// Examples/readme can be found on the github page at https://github.com/joho/godotenv -// -// The TL;DR is that you make a .env file that looks something like -// -// SOME_ENV_VAR=somevalue -// -// and then in your go code you can call -// -// godotenv.Load() -// -// and all the env vars declared in .env will be available through os.Getenv("SOME_ENV_VAR") -package godotenv - -import ( - "bufio" - "errors" - "fmt" - "io" - "os" - "os/exec" - "regexp" - "sort" - "strings" -) - -const doubleQuoteSpecialChars = "\\\n\r\"!$`" - -// Load will read your env file(s) and load them into ENV for this process. -// -// Call this function as close as possible to the start of your program (ideally in main) -// -// If you call Load without any args it will default to loading .env in the current path -// -// You can otherwise tell it which files to load (there can be more than one) like -// -// godotenv.Load("fileone", "filetwo") -// -// It's important to note that it WILL NOT OVERRIDE an env variable that already exists - consider the .env file to set dev vars or sensible defaults -func Load(filenames ...string) (err error) { - filenames = filenamesOrDefault(filenames) - - for _, filename := range filenames { - err = loadFile(filename, false) - if err != nil { - return // return early on a spazout - } - } - return -} - -// Overload will read your env file(s) and load them into ENV for this process. -// -// Call this function as close as possible to the start of your program (ideally in main) -// -// If you call Overload without any args it will default to loading .env in the current path -// -// You can otherwise tell it which files to load (there can be more than one) like -// -// godotenv.Overload("fileone", "filetwo") -// -// It's important to note this WILL OVERRIDE an env variable that already exists - consider the .env file to forcefilly set all vars. -func Overload(filenames ...string) (err error) { - filenames = filenamesOrDefault(filenames) - - for _, filename := range filenames { - err = loadFile(filename, true) - if err != nil { - return // return early on a spazout - } - } - return -} - -// Read all env (with same file loading semantics as Load) but return values as -// a map rather than automatically writing values into env -func Read(filenames ...string) (envMap map[string]string, err error) { - filenames = filenamesOrDefault(filenames) - envMap = make(map[string]string) - - for _, filename := range filenames { - individualEnvMap, individualErr := readFile(filename) - - if individualErr != nil { - err = individualErr - return // return early on a spazout - } - - for key, value := range individualEnvMap { - envMap[key] = value - } - } - - return -} - -// Parse reads an env file from io.Reader, returning a map of keys and values. -func Parse(r io.Reader) (envMap map[string]string, err error) { - envMap = make(map[string]string) - - var lines []string - scanner := bufio.NewScanner(r) - for scanner.Scan() { - lines = append(lines, scanner.Text()) - } - - if err = scanner.Err(); err != nil { - return - } - - for _, fullLine := range lines { - if !isIgnoredLine(fullLine) { - var key, value string - key, value, err = parseLine(fullLine) - - if err != nil { - return - } - envMap[key] = value - } - } - return -} - -//Unmarshal reads an env file from a string, returning a map of keys and values. -func Unmarshal(str string) (envMap map[string]string, err error) { - return Parse(strings.NewReader(str)) -} - -// Exec loads env vars from the specified filenames (empty map falls back to default) -// then executes the cmd specified. -// -// Simply hooks up os.Stdin/err/out to the command and calls Run() -// -// If you want more fine grained control over your command it's recommended -// that you use `Load()` or `Read()` and the `os/exec` package yourself. -func Exec(filenames []string, cmd string, cmdArgs []string) error { - Load(filenames...) - - command := exec.Command(cmd, cmdArgs...) - command.Stdin = os.Stdin - command.Stdout = os.Stdout - command.Stderr = os.Stderr - return command.Run() -} - -// Write serializes the given environment and writes it to a file -func Write(envMap map[string]string, filename string) error { - content, error := Marshal(envMap) - if error != nil { - return error - } - file, error := os.Create(filename) - if error != nil { - return error - } - _, err := file.WriteString(content) - return err -} - -// Marshal outputs the given environment as a dotenv-formatted environment file. -// Each line is in the format: KEY="VALUE" where VALUE is backslash-escaped. -func Marshal(envMap map[string]string) (string, error) { - lines := make([]string, 0, len(envMap)) - for k, v := range envMap { - lines = append(lines, fmt.Sprintf(`%s="%s"`, k, doubleQuoteEscape(v))) - } - sort.Strings(lines) - return strings.Join(lines, "\n"), nil -} - -func filenamesOrDefault(filenames []string) []string { - if len(filenames) == 0 { - return []string{".env"} - } - return filenames -} - -func loadFile(filename string, overload bool) error { - envMap, err := readFile(filename) - if err != nil { - return err - } - - currentEnv := map[string]bool{} - rawEnv := os.Environ() - for _, rawEnvLine := range rawEnv { - key := strings.Split(rawEnvLine, "=")[0] - currentEnv[key] = true - } - - for key, value := range envMap { - if !currentEnv[key] || overload { - os.Setenv(key, value) - } - } - - return nil -} - -func readFile(filename string) (envMap map[string]string, err error) { - file, err := os.Open(filename) - if err != nil { - return - } - defer file.Close() - - return Parse(file) -} - -func parseLine(line string) (key string, value string, err error) { - if len(line) == 0 { - err = errors.New("zero length string") - return - } - - // ditch the comments (but keep quoted hashes) - if strings.Contains(line, "#") { - segmentsBetweenHashes := strings.Split(line, "#") - quotesAreOpen := false - var segmentsToKeep []string - for _, segment := range segmentsBetweenHashes { - if strings.Count(segment, "\"") == 1 || strings.Count(segment, "'") == 1 { - if quotesAreOpen { - quotesAreOpen = false - segmentsToKeep = append(segmentsToKeep, segment) - } else { - quotesAreOpen = true - } - } - - if len(segmentsToKeep) == 0 || quotesAreOpen { - segmentsToKeep = append(segmentsToKeep, segment) - } - } - - line = strings.Join(segmentsToKeep, "#") - } - - firstEquals := strings.Index(line, "=") - firstColon := strings.Index(line, ":") - splitString := strings.SplitN(line, "=", 2) - if firstColon != -1 && (firstColon < firstEquals || firstEquals == -1) { - //this is a yaml-style line - splitString = strings.SplitN(line, ":", 2) - } - - if len(splitString) != 2 { - err = errors.New("Can't separate key from value") - return - } - - // Parse the key - key = splitString[0] - if strings.HasPrefix(key, "export") { - key = strings.TrimPrefix(key, "export") - } - key = strings.Trim(key, " ") - - // Parse the value - value = parseValue(splitString[1]) - return -} - -func parseValue(value string) string { - - // trim - value = strings.Trim(value, " ") - - // check if we've got quoted values or possible escapes - if len(value) > 1 { - first := string(value[0:1]) - last := string(value[len(value)-1:]) - if first == last && strings.ContainsAny(first, `"'`) { - // pull the quotes off the edges - value = value[1 : len(value)-1] - // handle escapes - escapeRegex := regexp.MustCompile(`\\.`) - value = escapeRegex.ReplaceAllStringFunc(value, func(match string) string { - c := strings.TrimPrefix(match, `\`) - switch c { - case "n": - return "\n" - case "r": - return "\r" - default: - return c - } - }) - } - } - - return value -} - -func isIgnoredLine(line string) bool { - trimmedLine := strings.Trim(line, " \n\t") - return len(trimmedLine) == 0 || strings.HasPrefix(trimmedLine, "#") -} - -func doubleQuoteEscape(line string) string { - for _, c := range doubleQuoteSpecialChars { - toReplace := "\\" + string(c) - if c == '\n' { - toReplace = `\n` - } - if c == '\r' { - toReplace = `\r` - } - line = strings.Replace(line, string(c), toReplace, -1) - } - return line -} diff --git a/vendor/github.com/joho/godotenv/godotenv_test.go b/vendor/github.com/joho/godotenv/godotenv_test.go deleted file mode 100644 index fc4f7f0fdd91..000000000000 --- a/vendor/github.com/joho/godotenv/godotenv_test.go +++ /dev/null @@ -1,378 +0,0 @@ -package godotenv - -import ( - "bytes" - "fmt" - "os" - "reflect" - "testing" -) - -var noopPresets = make(map[string]string) - -func parseAndCompare(t *testing.T, rawEnvLine string, expectedKey string, expectedValue string) { - key, value, _ := parseLine(rawEnvLine) - if key != expectedKey || value != expectedValue { - t.Errorf("Expected '%v' to parse as '%v' => '%v', got '%v' => '%v' instead", rawEnvLine, expectedKey, expectedValue, key, value) - } -} - -func loadEnvAndCompareValues(t *testing.T, loader func(files ...string) error, envFileName string, expectedValues map[string]string, presets map[string]string) { - // first up, clear the env - os.Clearenv() - - for k, v := range presets { - os.Setenv(k, v) - } - - err := loader(envFileName) - if err != nil { - t.Fatalf("Error loading %v", envFileName) - } - - for k := range expectedValues { - envValue := os.Getenv(k) - v := expectedValues[k] - if envValue != v { - t.Errorf("Mismatch for key '%v': expected '%v' got '%v'", k, v, envValue) - } - } -} - -func TestLoadWithNoArgsLoadsDotEnv(t *testing.T) { - err := Load() - pathError := err.(*os.PathError) - if pathError == nil || pathError.Op != "open" || pathError.Path != ".env" { - t.Errorf("Didn't try and open .env by default") - } -} - -func TestOverloadWithNoArgsOverloadsDotEnv(t *testing.T) { - err := Overload() - pathError := err.(*os.PathError) - if pathError == nil || pathError.Op != "open" || pathError.Path != ".env" { - t.Errorf("Didn't try and open .env by default") - } -} - -func TestLoadFileNotFound(t *testing.T) { - err := Load("somefilethatwillneverexistever.env") - if err == nil { - t.Error("File wasn't found but Load didn't return an error") - } -} - -func TestOverloadFileNotFound(t *testing.T) { - err := Overload("somefilethatwillneverexistever.env") - if err == nil { - t.Error("File wasn't found but Overload didn't return an error") - } -} - -func TestReadPlainEnv(t *testing.T) { - envFileName := "fixtures/plain.env" - expectedValues := map[string]string{ - "OPTION_A": "1", - "OPTION_B": "2", - "OPTION_C": "3", - "OPTION_D": "4", - "OPTION_E": "5", - "OPTION_F": "", - "OPTION_G": "", - } - - envMap, err := Read(envFileName) - if err != nil { - t.Error("Error reading file") - } - - if len(envMap) != len(expectedValues) { - t.Error("Didn't get the right size map back") - } - - for key, value := range expectedValues { - if envMap[key] != value { - t.Error("Read got one of the keys wrong") - } - } -} - -func TestParse(t *testing.T) { - envMap, err := Parse(bytes.NewReader([]byte("ONE=1\nTWO='2'\nTHREE = \"3\""))) - expectedValues := map[string]string{ - "ONE": "1", - "TWO": "2", - "THREE": "3", - } - if err != nil { - t.Fatalf("error parsing env: %v", err) - } - for key, value := range expectedValues { - if envMap[key] != value { - t.Errorf("expected %s to be %s, got %s", key, value, envMap[key]) - } - } -} - -func TestLoadDoesNotOverride(t *testing.T) { - envFileName := "fixtures/plain.env" - - // ensure NO overload - presets := map[string]string{ - "OPTION_A": "do_not_override", - "OPTION_B": "", - } - - expectedValues := map[string]string{ - "OPTION_A": "do_not_override", - "OPTION_B": "", - } - loadEnvAndCompareValues(t, Load, envFileName, expectedValues, presets) -} - -func TestOveroadDoesOverride(t *testing.T) { - envFileName := "fixtures/plain.env" - - // ensure NO overload - presets := map[string]string{ - "OPTION_A": "do_not_override", - } - - expectedValues := map[string]string{ - "OPTION_A": "1", - } - loadEnvAndCompareValues(t, Overload, envFileName, expectedValues, presets) -} - -func TestLoadPlainEnv(t *testing.T) { - envFileName := "fixtures/plain.env" - expectedValues := map[string]string{ - "OPTION_A": "1", - "OPTION_B": "2", - "OPTION_C": "3", - "OPTION_D": "4", - "OPTION_E": "5", - } - - loadEnvAndCompareValues(t, Load, envFileName, expectedValues, noopPresets) -} - -func TestLoadExportedEnv(t *testing.T) { - envFileName := "fixtures/exported.env" - expectedValues := map[string]string{ - "OPTION_A": "2", - "OPTION_B": "\n", - } - - loadEnvAndCompareValues(t, Load, envFileName, expectedValues, noopPresets) -} - -func TestLoadEqualsEnv(t *testing.T) { - envFileName := "fixtures/equals.env" - expectedValues := map[string]string{ - "OPTION_A": "postgres://localhost:5432/database?sslmode=disable", - } - - loadEnvAndCompareValues(t, Load, envFileName, expectedValues, noopPresets) -} - -func TestLoadQuotedEnv(t *testing.T) { - envFileName := "fixtures/quoted.env" - expectedValues := map[string]string{ - "OPTION_A": "1", - "OPTION_B": "2", - "OPTION_C": "", - "OPTION_D": "\n", - "OPTION_E": "1", - "OPTION_F": "2", - "OPTION_G": "", - "OPTION_H": "\n", - "OPTION_I": "echo 'asd'", - } - - loadEnvAndCompareValues(t, Load, envFileName, expectedValues, noopPresets) -} - -func TestActualEnvVarsAreLeftAlone(t *testing.T) { - os.Clearenv() - os.Setenv("OPTION_A", "actualenv") - _ = Load("fixtures/plain.env") - - if os.Getenv("OPTION_A") != "actualenv" { - t.Error("An ENV var set earlier was overwritten") - } -} - -func TestParsing(t *testing.T) { - // unquoted values - parseAndCompare(t, "FOO=bar", "FOO", "bar") - - // parses values with spaces around equal sign - parseAndCompare(t, "FOO =bar", "FOO", "bar") - parseAndCompare(t, "FOO= bar", "FOO", "bar") - - // parses double quoted values - parseAndCompare(t, `FOO="bar"`, "FOO", "bar") - - // parses single quoted values - parseAndCompare(t, "FOO='bar'", "FOO", "bar") - - // parses escaped double quotes - parseAndCompare(t, `FOO="escaped\"bar"`, "FOO", `escaped"bar`) - - // parses single quotes inside double quotes - parseAndCompare(t, `FOO="'d'"`, "FOO", `'d'`) - - // parses yaml style options - parseAndCompare(t, "OPTION_A: 1", "OPTION_A", "1") - - //parses yaml values with equal signs - parseAndCompare(t, "OPTION_A: Foo=bar", "OPTION_A", "Foo=bar") - - // parses non-yaml options with colons - parseAndCompare(t, "OPTION_A=1:B", "OPTION_A", "1:B") - - // parses export keyword - parseAndCompare(t, "export OPTION_A=2", "OPTION_A", "2") - parseAndCompare(t, `export OPTION_B='\n'`, "OPTION_B", "\n") - - // it 'expands newlines in quoted strings' do - // expect(env('FOO="bar\nbaz"')).to eql('FOO' => "bar\nbaz") - parseAndCompare(t, `FOO="bar\nbaz"`, "FOO", "bar\nbaz") - - // it 'parses varibales with "." in the name' do - // expect(env('FOO.BAR=foobar')).to eql('FOO.BAR' => 'foobar') - parseAndCompare(t, "FOO.BAR=foobar", "FOO.BAR", "foobar") - - // it 'parses varibales with several "=" in the value' do - // expect(env('FOO=foobar=')).to eql('FOO' => 'foobar=') - parseAndCompare(t, "FOO=foobar=", "FOO", "foobar=") - - // it 'strips unquoted values' do - // expect(env('foo=bar ')).to eql('foo' => 'bar') # not 'bar ' - parseAndCompare(t, "FOO=bar ", "FOO", "bar") - - // it 'ignores inline comments' do - // expect(env("foo=bar # this is foo")).to eql('foo' => 'bar') - parseAndCompare(t, "FOO=bar # this is foo", "FOO", "bar") - - // it 'allows # in quoted value' do - // expect(env('foo="bar#baz" # comment')).to eql('foo' => 'bar#baz') - parseAndCompare(t, `FOO="bar#baz" # comment`, "FOO", "bar#baz") - parseAndCompare(t, "FOO='bar#baz' # comment", "FOO", "bar#baz") - parseAndCompare(t, `FOO="bar#baz#bang" # comment`, "FOO", "bar#baz#bang") - - // it 'parses # in quoted values' do - // expect(env('foo="ba#r"')).to eql('foo' => 'ba#r') - // expect(env("foo='ba#r'")).to eql('foo' => 'ba#r') - parseAndCompare(t, `FOO="ba#r"`, "FOO", "ba#r") - parseAndCompare(t, "FOO='ba#r'", "FOO", "ba#r") - - //newlines and backslashes should be escaped - parseAndCompare(t, `FOO="bar\n\ b\az"`, "FOO", "bar\n baz") - parseAndCompare(t, `FOO="bar\\\n\ b\az"`, "FOO", "bar\\\n baz") - parseAndCompare(t, `FOO="bar\\r\ b\az"`, "FOO", "bar\\r baz") - - parseAndCompare(t, `="value"`, "", "value") - parseAndCompare(t, `KEY="`, "KEY", "\"") - parseAndCompare(t, `KEY="value`, "KEY", "\"value") - - // it 'throws an error if line format is incorrect' do - // expect{env('lol$wut')}.to raise_error(Dotenv::FormatError) - badlyFormattedLine := "lol$wut" - _, _, err := parseLine(badlyFormattedLine) - if err == nil { - t.Errorf("Expected \"%v\" to return error, but it didn't", badlyFormattedLine) - } -} - -func TestLinesToIgnore(t *testing.T) { - // it 'ignores empty lines' do - // expect(env("\n \t \nfoo=bar\n \nfizz=buzz")).to eql('foo' => 'bar', 'fizz' => 'buzz') - if !isIgnoredLine("\n") { - t.Error("Line with nothing but line break wasn't ignored") - } - - if !isIgnoredLine("\t\t ") { - t.Error("Line full of whitespace wasn't ignored") - } - - // it 'ignores comment lines' do - // expect(env("\n\n\n # HERE GOES FOO \nfoo=bar")).to eql('foo' => 'bar') - if !isIgnoredLine("# comment") { - t.Error("Comment wasn't ignored") - } - - if !isIgnoredLine("\t#comment") { - t.Error("Indented comment wasn't ignored") - } - - // make sure we're not getting false positives - if isIgnoredLine(`export OPTION_B='\n'`) { - t.Error("ignoring a perfectly valid line to parse") - } -} - -func TestErrorReadDirectory(t *testing.T) { - envFileName := "fixtures/" - envMap, err := Read(envFileName) - - if err == nil { - t.Errorf("Expected error, got %v", envMap) - } -} - -func TestErrorParsing(t *testing.T) { - envFileName := "fixtures/invalid1.env" - envMap, err := Read(envFileName) - if err == nil { - t.Errorf("Expected error, got %v", envMap) - } -} - -func TestWrite(t *testing.T) { - writeAndCompare := func(env string, expected string) { - envMap, _ := Unmarshal(env) - actual, _ := Marshal(envMap) - if expected != actual { - t.Errorf("Expected '%v' (%v) to write as '%v', got '%v' instead.", env, envMap, expected, actual) - } - } - //just test some single lines to show the general idea - //TestRoundtrip makes most of the good assertions - - //values are always double-quoted - writeAndCompare(`key=value`, `key="value"`) - //double-quotes are escaped - writeAndCompare(`key=va"lu"e`, `key="va\"lu\"e"`) - //but single quotes are left alone - writeAndCompare(`key=va'lu'e`, `key="va'lu'e"`) - // newlines, backslashes, and some other special chars are escaped - writeAndCompare(`foo="$ba\n\r\\r!"`, `foo="\$ba\n\r\\r\!"`) - // lines should be sorted - writeAndCompare("foo=bar\nbaz=buzz", "baz=\"buzz\"\nfoo=\"bar\"") - -} - -func TestRoundtrip(t *testing.T) { - fixtures := []string{"equals.env", "exported.env", "plain.env", "quoted.env"} - for _, fixture := range fixtures { - fixtureFilename := fmt.Sprintf("fixtures/%s", fixture) - env, err := readFile(fixtureFilename) - if err != nil { - t.Errorf("Expected '%s' to read without error (%v)", fixtureFilename, err) - } - rep, err := Marshal(env) - if err != nil { - t.Errorf("Expected '%s' to Marshal (%v)", fixtureFilename, err) - } - roundtripped, err := Unmarshal(rep) - if err != nil { - t.Errorf("Expected '%s' to Mashal and Unmarshal (%v)", fixtureFilename, err) - } - if !reflect.DeepEqual(env, roundtripped) { - t.Errorf("Expected '%s' to roundtrip as '%v', got '%v' instead", fixtureFilename, env, roundtripped) - } - - } -} diff --git a/vendor/github.com/lestrrat-go/jspointer/.gitignore b/vendor/github.com/lestrrat-go/jspointer/.gitignore deleted file mode 100644 index daf913b1b347..000000000000 --- a/vendor/github.com/lestrrat-go/jspointer/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/lestrrat-go/jspointer/.travis.yml b/vendor/github.com/lestrrat-go/jspointer/.travis.yml deleted file mode 100644 index 21e0a8e8d7c0..000000000000 --- a/vendor/github.com/lestrrat-go/jspointer/.travis.yml +++ /dev/null @@ -1,5 +0,0 @@ -language: go -sudo: false -go: - - 1.11 - - tip diff --git a/vendor/github.com/lestrrat-go/jspointer/LICENSE b/vendor/github.com/lestrrat-go/jspointer/LICENSE deleted file mode 100644 index 20054b15434d..000000000000 --- a/vendor/github.com/lestrrat-go/jspointer/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 lestrrat - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/lestrrat-go/jspointer/README.md b/vendor/github.com/lestrrat-go/jspointer/README.md deleted file mode 100644 index e1a4fbcd01b0..000000000000 --- a/vendor/github.com/lestrrat-go/jspointer/README.md +++ /dev/null @@ -1,34 +0,0 @@ -# go-jspointer - -[![Build Status](https://travis-ci.org/lestrrat-go/jspointer.svg?branch=master)](https://travis-ci.org/lestrrat-go/jspointer) - -[![GoDoc](https://godoc.org/github.com/lestrrat-go/jspointer?status.svg)](https://godoc.org/github.com/lestrrat-go/jspointer) - -JSON pointer for Go - -# Features - -* Compile and match against Maps, Slices, Structs (or pointers to those) -* Set values in each of those - -# Usage - -```go -p, _ := jspointer.New(`/foo/bar/baz`) -result, _ := p.Get(someStruct) -``` - -# Credits - -This is almost a fork of https://github.com/xeipuuv/gojsonpointer. - -# References - -| Name | Notes | -|:--------------------------------------------------------:|:---------------------------------| -| [go-jsval](https://github.com/lestrrat-go/jsval) | Validator generator | -| [go-jsschema](https://github.com/lestrrat-go/jsschema) | JSON Schema implementation | -| [go-jshschema](https://github.com/lestrrat-go/jshschema) | JSON Hyper Schema implementation | -| [go-jsref](https://github.com/lestrrat-go/jsref) | JSON Reference implementation | - - diff --git a/vendor/github.com/lestrrat-go/jspointer/bench/bench_test.go b/vendor/github.com/lestrrat-go/jspointer/bench/bench_test.go deleted file mode 100644 index 3ffe29fb4ded..000000000000 --- a/vendor/github.com/lestrrat-go/jspointer/bench/bench_test.go +++ /dev/null @@ -1,40 +0,0 @@ -// +build bench - -package bench_test - -import ( - "encoding/json" - "testing" - - "github.com/lestrrat-go/jspointer" - "github.com/xeipuuv/gojsonpointer" -) - -const jsontxt = `{"a":[{"b": 1, "c": 2}], "d": 3}` - -var m map[string]interface{} - -func init() { - if err := json.Unmarshal([]byte(jsontxt), &m); err != nil { - panic(err) - } -} - -func BenchmarkGojsonpointer(b *testing.B) { - p, _ := gojsonpointer.NewJsonPointer(`/a/0/c`) - for i := 0; i < b.N; i++ { - res, kind, err := p.Get(m) - _ = res - _ = kind - _ = err - } -} - -func BenchmarkJspointer(b *testing.B) { - p, _ := jspointer.New(`/a/0/c`) - for i := 0; i < b.N; i++ { - res, err := p.Get(m) - _ = res - _ = err - } -} \ No newline at end of file diff --git a/vendor/github.com/lestrrat-go/jspointer/interface.go b/vendor/github.com/lestrrat-go/jspointer/interface.go deleted file mode 100644 index 7fe800233bb2..000000000000 --- a/vendor/github.com/lestrrat-go/jspointer/interface.go +++ /dev/null @@ -1,27 +0,0 @@ -package jspointer - -import "errors" - -// Errors used in jspointer package -var ( - ErrInvalidPointer = errors.New("invalid pointer") - ErrCanNotSet = errors.New("field cannot be set to") - ErrSliceIndexOutOfBounds = errors.New("slice index out of bounds") -) - -// Consntants used in jspointer package. Mostly for internal usage only -const ( - EncodedTilde = "~0" - EncodedSlash = "~1" - Separator = '/' -) - -type ErrNotFound struct { - Ptr string -} - -// JSPointer represents a JSON pointer -type JSPointer struct { - raw string - tokens tokens -} diff --git a/vendor/github.com/lestrrat-go/jspointer/jspointer.go b/vendor/github.com/lestrrat-go/jspointer/jspointer.go deleted file mode 100644 index c42b9613f9a3..000000000000 --- a/vendor/github.com/lestrrat-go/jspointer/jspointer.go +++ /dev/null @@ -1,262 +0,0 @@ -package jspointer - -import ( - "bytes" - "encoding/json" - "errors" - "reflect" - "strconv" - - "github.com/lestrrat-go/structinfo" -) - -type tokens struct { - s string - positions [][2]int -} - -func (t *tokens) size() int { - return len(t.positions) -} - -func (t *tokens) get(i int) string { - p := t.positions[i] - return t.s[p[0]:p[1]] -} - -// New creates a new JSON pointer for given path spec. If the path fails -// to be parsed, an error is returned -func New(path string) (*JSPointer, error) { - var p JSPointer - - if err := p.parse(path); err != nil { - return nil, err - } - p.raw = path - return &p, nil -} - -func (p *JSPointer) parse(s string) error { - if s == "" { - return nil - } - - if s[0] != Separator { - return ErrInvalidPointer - } - - if len(s) < 2 { - return ErrInvalidPointer - } - - ntokens := 0 - for i := 0; i < len(s); i++ { - if s[i] == '/' { - ntokens++ - } - } - - positions := make([][2]int, 0, ntokens) - start := 1 - var buf bytes.Buffer - buf.WriteByte(s[0]) - for i := 1; i < len(s); i++ { - switch s[i] { - case Separator: - buf.WriteByte(s[i]) - positions = append(positions, [2]int{start, buf.Len() - 1}) - start = i + 1 - case '~': - if len(s) == 1 { - buf.WriteByte(s[i]) - } else { - switch s[1] { - case '0': - buf.WriteByte('~') - case '1': - buf.WriteByte('/') - default: - buf.WriteByte(s[i]) - } - } - default: - buf.WriteByte(s[i]) - } - } - - if start < buf.Len() { - positions = append(positions, [2]int{start, buf.Len()}) - } - - p.tokens.s = buf.String() - p.tokens.positions = positions - return nil -} - -// String returns the stringified version of this JSON pointer -func (p JSPointer) String() string { - return p.raw -} - -// Get applies the JSON pointer to the given item, and returns -// the result. -func (p JSPointer) Get(item interface{}) (interface{}, error) { - var ctx matchCtx - - ctx.raw = p.raw - ctx.tokens = &p.tokens - ctx.apply(item) - return ctx.result, ctx.err -} - -// Set applies the JSON pointer to the given item, and sets the -// value accordingly. -func (p JSPointer) Set(item interface{}, value interface{}) error { - var ctx matchCtx - - ctx.set = true - ctx.raw = p.raw - ctx.tokens = &p.tokens - ctx.setvalue = value - ctx.apply(item) - return ctx.err -} - -type matchCtx struct { - err error - raw string - result interface{} - set bool - setvalue interface{} - tokens *tokens -} - -func (e ErrNotFound) Error() string { - return "match to JSON pointer not found: " + e.Ptr -} - -type JSONGetter interface { - JSONGet(tok string) (interface{}, error) -} - -var strType = reflect.TypeOf("") -var zeroval reflect.Value - -func (c *matchCtx) apply(item interface{}) { - if c.tokens.size() == 0 { - c.result = item - return - } - - node := item - lastidx := c.tokens.size() - 1 - for i := 0; i < c.tokens.size(); i++ { - token := c.tokens.get(i) - - if getter, ok := node.(JSONGetter); ok { - x, err := getter.JSONGet(token) - if err != nil { - c.err = ErrNotFound{Ptr: c.raw} - return - } - if i == lastidx { - c.result = x - return - } - node = x - continue - } - v := reflect.ValueOf(node) - - // Does this thing implement a JSONGet? - - if v.Kind() == reflect.Ptr { - v = v.Elem() - } - - switch v.Kind() { - case reflect.Struct: - fn := structinfo.StructFieldFromJSONName(v, token) - if fn == "" { - c.err = ErrNotFound{Ptr: c.raw} - return - } - f := v.FieldByName(fn) - if i == lastidx { - if c.set { - if !f.CanSet() { - c.err = ErrCanNotSet - return - } - f.Set(reflect.ValueOf(c.setvalue)) - return - } - c.result = f.Interface() - return - } - node = f.Interface() - case reflect.Map: - var vt reflect.Value - // We shall try to inflate the token to its Go native - // type if it's not a string. In other words, try not to - // outdo yourselves. - if t := v.Type().Key(); t != strType { - vt = reflect.New(t).Elem() - if err := json.Unmarshal([]byte(token), vt.Addr().Interface()); err != nil { - name := t.PkgPath() + "." + t.Name() - if name == "" { - name = "(anonymous type)" - } - c.err = errors.New("unsupported conversion of string to " + name) - return - } - } else { - vt = reflect.ValueOf(token) - } - n := v.MapIndex(vt) - if zeroval == n { - c.err = ErrNotFound{Ptr: c.raw} - return - } - - if i == lastidx { - if c.set { - v.SetMapIndex(vt, reflect.ValueOf(c.setvalue)) - } else { - c.result = n.Interface() - } - return - } - - node = n.Interface() - case reflect.Slice: - m := node.([]interface{}) - wantidx, err := strconv.Atoi(token) - if err != nil { - c.err = err - return - } - - if wantidx < 0 || len(m) <= wantidx { - c.err = ErrSliceIndexOutOfBounds - return - } - - if i == lastidx { - if c.set { - m[wantidx] = c.setvalue - } else { - c.result = m[wantidx] - } - return - } - node = m[wantidx] - default: - c.err = ErrNotFound{Ptr: c.raw} - return - } - } - - // If you fell through here, there was a big problem - c.err = ErrNotFound{Ptr: c.raw} -} diff --git a/vendor/github.com/lestrrat-go/jspointer/jspointer_test.go b/vendor/github.com/lestrrat-go/jspointer/jspointer_test.go deleted file mode 100644 index 50ba8e610d29..000000000000 --- a/vendor/github.com/lestrrat-go/jspointer/jspointer_test.go +++ /dev/null @@ -1,168 +0,0 @@ -package jspointer_test - -import ( - "encoding/json" - "testing" - - "github.com/lestrrat-go/jspointer" - "github.com/stretchr/testify/assert" -) - -var src = `{ -"foo": ["bar", "baz"], -"obj": { "a":1, "b":2, "c":[3,4], "d":[ {"e":9}, {"f":[50,51]} ] }, -"": 0, -"a/b": 1, -"c%d": 2, -"e^f": 3, -"g|h": 4, -"i\\j": 5, -"k\"l": 6, -" ": 7, -"m~n": 8 -}` -var target map[string]interface{} - -func init() { - if err := json.Unmarshal([]byte(src), &target); err != nil { - panic(err) - } -} - -func TestEscaping(t *testing.T) { - data := []string{ - `/a~1b`, - `/m~0n`, - `/a~1b/m~0n`, - } - for _, pat := range data { - p, err := jspointer.New(pat) - if !assert.NoError(t, err, "jspointer.New should succeed for '%s'", pat) { - return - } - - if !assert.Equal(t, pat, p.String(), "input pattern and generated expression should match") { - return - } - } -} - -func runmatch(t *testing.T, pat string, m interface{}) (interface{}, error) { - p, err := jspointer.New(pat) - if !assert.NoError(t, err, "jspointer.New should succeed for '%s'", pat) { - return nil, err - } - - return p.Get(m) -} - -func TestFullDocument(t *testing.T) { - res, err := runmatch(t, ``, target) - if !assert.NoError(t, err, "jsonpointer.Get should succeed") { - return - } - if !assert.Equal(t, res, target, "res should be equal to target") { - return - } -} - -func TestGetObject(t *testing.T) { - pats := map[string]interface{}{ - `/obj/a`: float64(1), - `/obj/b`: float64(2), - `/obj/c/0`: float64(3), - `/obj/c/1`: float64(4), - `/obj/d/1/f/0`: float64(50), - } - for pat, expected := range pats { - res, err := runmatch(t, pat, target) - if !assert.NoError(t, err, "jsonpointer.Get should succeed") { - return - } - - if !assert.Equal(t, res, expected, "res should be equal to expected") { - return - } - } -} - -func TestGetArray(t *testing.T) { - foo := target["foo"].([]interface{}) - pats := map[string]interface{}{ - `/foo/0`: foo[0], - `/foo/1`: foo[1], - } - for pat, expected := range pats { - res, err := runmatch(t, pat, target) - if !assert.NoError(t, err, "jsonpointer.Get should succeed") { - return - } - - if !assert.Equal(t, res, expected, "res should be equal to expected") { - return - } - } -} - -func TestSet(t *testing.T) { - var m interface{} - json.Unmarshal([]byte(`{ -"a": [{"b": 1, "c": 2}], "d": 3 -}`), &m) - - p, err := jspointer.New(`/a/0/c`) - if !assert.NoError(t, err, "jspointer.New should succeed") { - return - } - - if !assert.NoError(t, p.Set(m, 999), "jspointer.Set should succeed") { - return - } - - res, err := runmatch(t, `/a/0/c`, m) - if !assert.NoError(t, err, "jsonpointer.Get should succeed") { - return - } - - if !assert.Equal(t, res, 999, "res should be equal to expected") { - return - } -} - -func TestStruct(t *testing.T) { - var s struct { - Foo string `json:"foo"` - Bar map[string]interface{} `json:"bar"` - Baz map[int]int `json:"baz"` - quux int - } - - s.Foo = "foooooo" - s.Bar = map[string]interface{}{ - "a": 0, - "b": 1, - } - s.Baz = map[int]int{ - 2: 3, - } - - res, err := runmatch(t, `/bar/b`, s) - if !assert.NoError(t, err, "jsonpointer.Get should succeed") { - return - } - - if !assert.Equal(t, res, 1, "res should be equal to expected value") { - return - } - - res, err = runmatch(t, `/baz/2`, s) - if !assert.NoError(t, err, "jsonpointer.Get should succeed") { - return - } - - if !assert.Equal(t, res, 3, "res should be equal to expected value") { - return - } -} - - diff --git a/vendor/github.com/lestrrat-go/jsref/.gitignore b/vendor/github.com/lestrrat-go/jsref/.gitignore deleted file mode 100644 index daf913b1b347..000000000000 --- a/vendor/github.com/lestrrat-go/jsref/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/lestrrat-go/jsref/.travis.yml b/vendor/github.com/lestrrat-go/jsref/.travis.yml deleted file mode 100644 index 2fbcb829a8e6..000000000000 --- a/vendor/github.com/lestrrat-go/jsref/.travis.yml +++ /dev/null @@ -1,5 +0,0 @@ -language: go -sudo: false -go: - - 1.11.x - - tip diff --git a/vendor/github.com/lestrrat-go/jsref/LICENSE b/vendor/github.com/lestrrat-go/jsref/LICENSE deleted file mode 100644 index 20054b15434d..000000000000 --- a/vendor/github.com/lestrrat-go/jsref/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 lestrrat - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/lestrrat-go/jsref/README.md b/vendor/github.com/lestrrat-go/jsref/README.md deleted file mode 100644 index 16a88c1aafa8..000000000000 --- a/vendor/github.com/lestrrat-go/jsref/README.md +++ /dev/null @@ -1,107 +0,0 @@ -# go-jsref - -[![Build Status](https://travis-ci.org/lestrrat-go/jsref.svg?branch=master)](https://travis-ci.org/lestrrat-go/jsref) - -[![GoDoc](https://godoc.org/github.com/lestrrat-go/jsref?status.svg)](https://godoc.org/github.com/lestrrat-go/jsref) - -JSON Reference Implementation for Go - -# SYNOPSIS - -```go -package jsref_test - -import ( - "encoding/json" - "fmt" - "log" - - jsref "github.com/lestrrat-go/jsref" - "github.com/lestrrat-go/jsref/provider" -) - -func Example() { - var v interface{} - src := []byte(` -{ - "foo": ["bar", {"$ref": "#/sub"}, {"$ref": "obj2#/sub"}], - "sub": "baz" -}`) - if err := json.Unmarshal(src, &v); err != nil { - log.Printf("%s", err) - return - } - - // External reference - mp := provider.NewMap() - mp.Set("obj2", map[string]string{"sub": "quux"}) - - res := jsref.New() - res.AddProvider(mp) // Register the provider - - data := []struct { - Ptr string - Options []jsref.Option - }{ - { - Ptr: "#/foo/0", // "bar" - }, - { - Ptr: "#/foo/1", // "baz" - }, - { - Ptr: "#/foo/2", // "quux" (resolves via `mp`) - }, - { - Ptr: "#/foo", // ["bar",{"$ref":"#/sub"},{"$ref":"obj2#/sub"}] - }, - { - Ptr: "#/foo", // ["bar","baz","quux"] - // experimental option to resolve all resulting values - Options: []jsref.Option{ jsref.WithRecursiveResolution(true) }, - }, - } - for _, set := range data { - result, err := res.Resolve(v, set.Ptr, set.Options...) - if err != nil { // failed to resolve - fmt.Printf("err: %s\n", err) - continue - } - b, _ := json.Marshal(result) - fmt.Printf("%s -> %s\n", set.Ptr, string(b)) - } - - // OUTPUT: - // #/foo/0 -> "bar" - // #/foo/1 -> "baz" - // #/foo/2 -> "quux" - // #/foo -> ["bar",{"$ref":"#/sub"},{"$ref":"obj2#/sub"}] - // #/foo -> ["bar","baz","quux"] -} -``` - -# Providers - -The Resolver object by default does not know how to resolve *any* reference: -You must provide it one or more `Provider`s to look for and resolve external references. - -Currently available `Provider`s are: - -| Name | Description | -|:--------------|:------------| -| provider.FS | Resolve from local file system. References must start with a `file:///` prefix | -| provider.Map | Resolve from in memory map. | -| provider.HTTP | Resolve by making HTTP requests. References must start with a `http(s?)://` prefix | - -# References - -| Name | Notes | -|:--------------------------------------------------------:|:---------------------------------| -| [go-jsval](https://github.com/lestrrat-go/jsval) | Validator generator | -| [go-jshschema](https://github.com/lestrrat-go/jshschema) | JSON Hyper Schema implementation | -| [go-jsschema](https://github.com/lestrrat-go/jsschema) | JSON Schema implementation | -| [go-jspointer](https://github.com/lestrrat-go/jspointer) | JSON Pointer implementations | - -# Acknowledgements - -* Boris Burtin diff --git a/vendor/github.com/lestrrat-go/jsref/interface.go b/vendor/github.com/lestrrat-go/jsref/interface.go deleted file mode 100644 index 905a8c313e45..000000000000 --- a/vendor/github.com/lestrrat-go/jsref/interface.go +++ /dev/null @@ -1,23 +0,0 @@ -package jsref - -import ( - "errors" - "net/url" - "reflect" -) - -var zeroval = reflect.Value{} - -var ErrMaxRecursion = errors.New("reached max number of recursions") - -// Resolver is responsible for interpreting the provided JSON -// reference. -type Resolver struct { - providers []Provider - MaxRecursions int -} - -// Provider resolves a URL into a ... thing. -type Provider interface { - Get(*url.URL) (interface{}, error) -} diff --git a/vendor/github.com/lestrrat-go/jsref/jsref.go b/vendor/github.com/lestrrat-go/jsref/jsref.go deleted file mode 100644 index 107473062bf2..000000000000 --- a/vendor/github.com/lestrrat-go/jsref/jsref.go +++ /dev/null @@ -1,396 +0,0 @@ -package jsref - -import ( - "net/url" - "reflect" - - "github.com/lestrrat-go/jspointer" - "github.com/lestrrat-go/pdebug" - "github.com/lestrrat-go/structinfo" - "github.com/pkg/errors" -) - -const ref = "$ref" -var refrv = reflect.ValueOf(ref) - -type Option interface { - Name() string - Value() interface{} -} - -type option struct { - name string - value interface{} -} - -func (o option) Name() string { return o.name } -func (o option) Value() interface{} { return o.value } - -// WithRecursiveResolution allows ou to enable recursive resolution -// on the *result* data structure. This means that after resolving -// the JSON reference in the structure at hand, it does another -// pass at resolving the entire data structure. Depending on your -// structure and size, this may incur significant cost. -// -// Please note that recursive resolution of the result is still -// experimental. If you find problems, please submit a pull request -// with a failing test case. -func WithRecursiveResolution(b bool) Option { - return &option{ - name: "recursiveResolution", - value: b, - } -} - -var DefaultMaxRecursions = 10 - -// New creates a new Resolver -func New() *Resolver { - return &Resolver{MaxRecursions: DefaultMaxRecursions} -} - -// AddProvider adds a new Provider to be searched for in case -// a JSON pointer with more than just the URI fragment is given. -func (r *Resolver) AddProvider(p Provider) error { - r.providers = append(r.providers, p) - return nil -} - -type resolveCtx struct { - rlevel int // recurse level - maxrlevel int // max recurse level - object interface{} // the main object that was passed to `Resolve()` -} - -// Resolve takes a target `v`, and a JSON pointer `spec`. -// spec is expected to be in the form of -// -// [scheme://[userinfo@]host/path[?query]]#fragment -// [scheme:opaque[?query]]#fragment -// -// where everything except for `#fragment` is optional. -// If the fragment is empty, an error is returned. -// -// If `spec` is the empty string, `v` is returned -// This method handles recursive JSON references. -// -// If `WithRecursiveResolution` option is given and its value is true, -// an attempt to resolve all references within the resulting object -// is made by traversing the structure recursively. Default is false -func (r *Resolver) Resolve(v interface{}, ptr string, options ...Option) (ret interface{}, err error) { - if pdebug.Enabled { - g := pdebug.Marker("Resolver.Resolve(%s)", ptr).BindError(&err) - defer g.End() - } - var recursiveResolution bool - for _, opt := range options { - switch opt.Name() { - case "recursiveResolution": - recursiveResolution = opt.Value().(bool) - } - } - - ctx := resolveCtx{ - rlevel: 0, - maxrlevel: r.MaxRecursions, - object: v, - } - - // First, expand the target as much as we can - v, err = expandRefRecursive(&ctx, r, v) - if err != nil { - return nil, errors.Wrap(err, "recursive search failed") - } - - result, err := evalptr(&ctx, r, v, ptr) - if err != nil { - return nil, err - } - - if recursiveResolution { - rv, err := traverseExpandRefRecursive(&ctx, r, reflect.ValueOf(result)) - if err != nil { - return nil, errors.Wrap(err, `failed to resolve result`) - } - result = rv.Interface() - } - - return result, nil -} - -func setPtrOrInterface(container, value reflect.Value) bool { - switch container.Kind() { - case reflect.Ptr: - if !value.CanAddr() { - return false - } - container.Set(value.Addr()) - case reflect.Interface: - container.Set(value) - default: - return false - } - return true -} - -func traverseExpandRefRecursive(ctx *resolveCtx, r *Resolver, rv reflect.Value) (reflect.Value, error) { - if pdebug.Enabled { - g := pdebug.Marker("traverseExpandRefRecursive") - defer g.End() - } - - switch rv.Kind() { - case reflect.Ptr, reflect.Interface: - rv = rv.Elem() - } - - switch rv.Kind() { - case reflect.Array, reflect.Slice: - for i := 0; i < rv.Len(); i++ { - elem := rv.Index(i) - var elemcontainer reflect.Value - switch elem.Kind() { - case reflect.Ptr, reflect.Interface: - elemcontainer = elem - elem = elem.Elem() - } - - // Need to check for elem being Valid, otherwise the - // subsequent call to Interface() will fail - if !elem.IsValid() { - continue - } - - if elemcontainer.IsValid() { - if !elemcontainer.CanSet() { - continue - } - } - newv, err := expandRefRecursive(ctx, r, elem.Interface()) - if err != nil { - return zeroval, errors.Wrap(err, `failed to expand array/slice element`) - } - newrv, err := traverseExpandRefRecursive(ctx, r, reflect.ValueOf(newv)) - if err != nil { - return zeroval, errors.Wrap(err, `failed to recurse into array/slice element`) - } - - if elemcontainer.IsValid() { - setPtrOrInterface(elemcontainer, newrv) - } else { - elem.Set(newrv) - } - } - case reflect.Map: - // No refs found in the map keys, but there could be more - // in the values - if _, err := findRef(rv.Interface()); err != nil { - for _, key := range rv.MapKeys() { - value, err := traverseExpandRefRecursive(ctx, r, rv.MapIndex(key)) - if err != nil { - return zeroval, errors.Wrap(err, `failed to traverse map value`) - } - rv.SetMapIndex(key, value) - } - return rv, nil - } - newv, err := expandRefRecursive(ctx, r, rv.Interface()) - if err != nil { - return zeroval, errors.Wrap(err, `failed to expand map element`) - } - return traverseExpandRefRecursive(ctx, r, reflect.ValueOf(newv)) - case reflect.Struct: - // No refs found in the map keys, but there could be more - // in the values - if _, err := findRef(rv.Interface()); err != nil { - for i := 0; i < rv.NumField(); i++ { - field := rv.Field(i) - value, err := traverseExpandRefRecursive(ctx, r, field) - if err != nil { - return zeroval, errors.Wrap(err, `failed to traverse struct field value`) - } - field.Set(value) - } - return rv, nil - } - newv, err := expandRefRecursive(ctx, r, rv.Interface()) - if err != nil { - return zeroval, errors.Wrap(err, `failed to expand struct element`) - } - return traverseExpandRefRecursive(ctx, r, reflect.ValueOf(newv)) - } - return rv, nil -} - -// expands $ref with in v, until all $refs are expanded. -// note: DOES NOT recurse down into structures -func expandRefRecursive(ctx *resolveCtx, r *Resolver, v interface{}) (ret interface{}, err error) { - if pdebug.Enabled { - g := pdebug.Marker("expandRefRecursive") - defer g.End() - } - for { - ref, err := findRef(v) - if err != nil { - if pdebug.Enabled { - pdebug.Printf("No refs found. bailing out of loop") - } - break - } - - if pdebug.Enabled { - pdebug.Printf("Found ref '%s'", ref) - } - - newv, err := expandRef(ctx, r, v, ref) - if err != nil { - if pdebug.Enabled { - pdebug.Printf("Failed to expand ref '%s': %s", ref, err) - } - return nil, errors.Wrap(err, "failed to expand ref") - } - - v = newv - } - - return v, nil -} - -func expandRef(ctx *resolveCtx, r *Resolver, v interface{}, ref string) (ret interface{}, err error) { - ctx.rlevel++ - if ctx.rlevel > ctx.maxrlevel { - return nil, ErrMaxRecursion - } - - defer func() { ctx.rlevel-- }() - - u, err := url.Parse(ref) - if err != nil { - return nil, errors.Wrap(err, "failed to parse ref as URL") - } - - ptr := "#" + u.Fragment - if u.Host == "" && u.Path == "" { - if pdebug.Enabled { - pdebug.Printf("ptr doesn't contain any host/path part, apply json pointer directly to object") - } - return evalptr(ctx, r, ctx.object, ptr) - } - - u.Fragment = "" - for _, p := range r.providers { - pv, err := p.Get(u) - if err == nil { - if pdebug.Enabled { - pdebug.Printf("Found object matching %s", u) - } - - return evalptr(ctx, r, pv, ptr) - } - } - - return nil, errors.New("element pointed by $ref '" + ref + "' not found") -} - -func findRef(v interface{}) (ref string, err error) { - if pdebug.Enabled { - g := pdebug.Marker("findRef").BindError(&err) - defer g.End() - } - - rv := reflect.ValueOf(v) - switch rv.Kind() { - case reflect.Interface, reflect.Ptr: - rv = rv.Elem() - } - - if pdebug.Enabled { - pdebug.Printf("object is a '%s'", rv.Kind()) - } - - // Find if we have a "$ref" element - var refv reflect.Value - switch rv.Kind() { - case reflect.Map: - refv = rv.MapIndex(refrv) - case reflect.Struct: - if fn := structinfo.StructFieldFromJSONName(rv, ref); fn != "" { - refv = rv.FieldByName(fn) - } - default: - return "", errors.New("element is not a map-like container") - } - - if !refv.IsValid() { - return "", errors.New("$ref element not found") - } - - switch refv.Kind() { - case reflect.Interface, reflect.Ptr: - refv = refv.Elem() - } - - switch refv.Kind() { - case reflect.String: - // Empty string isn't a valid pointer - if refv.Len() <= 0 { - return "", errors.New("$ref element not found (empty)") - } - if pdebug.Enabled { - pdebug.Printf("Found ref '%s'", refv) - } - return refv.String(), nil - case reflect.Invalid: - return "", errors.New("$ref element not found") - default: - if pdebug.Enabled { - pdebug.Printf("'$ref' was found, but its kind is %s", refv.Kind()) - } - } - - return "", errors.New("$ref element must be a string") -} - -func evalptr(ctx *resolveCtx, r *Resolver, v interface{}, ptrspec string) (ret interface{}, err error) { - if pdebug.Enabled { - g := pdebug.Marker("evalptr(%s)", ptrspec).BindError(&err) - defer g.End() - } - - // If the reference is empty, return v - if ptrspec == "" || ptrspec == "#" { - if pdebug.Enabled { - pdebug.Printf("Empty pointer, return v itself") - } - return v, nil - } - - // Parse the spec. - u, err := url.Parse(ptrspec) - if err != nil { - return nil, errors.Wrap(err, "failed to parse reference spec") - } - - ptr := u.Fragment - - // We are evaluating the pointer part. That means if the - // Fragment portion is not set, there's no point in evaluating - if ptr == "" { - return nil, errors.Wrap(err, "empty json pointer") - } - - p, err := jspointer.New(ptr) - if err != nil { - return nil, errors.Wrap(err, "failed create a new JSON pointer") - } - x, err := p.Get(v) - if err != nil { - return nil, errors.Wrap(err, "failed to fetch value") - } - - if pdebug.Enabled { - pdebug.Printf("Evaulated JSON pointer, now checking if we can expand further") - } - // If this result contains more refs, expand that - return expandRefRecursive(ctx, r, x) -} diff --git a/vendor/github.com/lestrrat-go/jsref/jsref_example_test.go b/vendor/github.com/lestrrat-go/jsref/jsref_example_test.go deleted file mode 100644 index eeacd46da33d..000000000000 --- a/vendor/github.com/lestrrat-go/jsref/jsref_example_test.go +++ /dev/null @@ -1,69 +0,0 @@ -package jsref_test - -import ( - "encoding/json" - "fmt" - "log" - - jsref "github.com/lestrrat-go/jsref" - "github.com/lestrrat-go/jsref/provider" -) - -func Example() { - var v interface{} - src := []byte(` -{ - "foo": ["bar", {"$ref": "#/sub"}, {"$ref": "obj2#/sub"}], - "sub": "baz" -}`) - if err := json.Unmarshal(src, &v); err != nil { - log.Printf("%s", err) - return - } - - // External reference - mp := provider.NewMap() - mp.Set("obj2", map[string]string{"sub": "quux"}) - - res := jsref.New() - res.AddProvider(mp) // Register the provider - - data := []struct { - Ptr string - Options []jsref.Option - }{ - { - Ptr: "#/foo/0", // "bar" - }, - { - Ptr: "#/foo/1", // "baz" - }, - { - Ptr: "#/foo/2", // "quux" (resolves via `mp`) - }, - { - Ptr: "#/foo", // ["bar",{"$ref":"#/sub"},{"$ref":"obj2#/sub"}] - }, - { - Ptr: "#/foo", // ["bar","baz","quux"] - // experimental option to resolve all resulting values - Options: []jsref.Option{ jsref.WithRecursiveResolution(true) }, - }, - } - for _, set := range data { - result, err := res.Resolve(v, set.Ptr, set.Options...) - if err != nil { // failed to resolve - fmt.Printf("err: %s\n", err) - continue - } - b, _ := json.Marshal(result) - fmt.Printf("%s -> %s\n", set.Ptr, string(b)) - } - - // OUTPUT: - // #/foo/0 -> "bar" - // #/foo/1 -> "baz" - // #/foo/2 -> "quux" - // #/foo -> ["bar",{"$ref":"#/sub"},{"$ref":"obj2#/sub"}] - // #/foo -> ["bar","baz","quux"] -} diff --git a/vendor/github.com/lestrrat-go/jsref/jsref_test.go b/vendor/github.com/lestrrat-go/jsref/jsref_test.go deleted file mode 100644 index 7c47688baff9..000000000000 --- a/vendor/github.com/lestrrat-go/jsref/jsref_test.go +++ /dev/null @@ -1,295 +0,0 @@ -package jsref_test - -import ( - "encoding/json" - "io/ioutil" - "log" - "net" - "net/http" - "os" - "path/filepath" - "sort" - "strconv" - "strings" - "testing" - "time" - - "github.com/lestrrat-go/jsref" - "github.com/lestrrat-go/jsref/provider" - "github.com/stretchr/testify/assert" -) - -func TestResolveMemory(t *testing.T) { - m := map[string]interface{}{ - "foo": []interface{}{ - "bar", - map[string]interface{}{ - "$ref": "#/sub", - }, - map[string]interface{}{ - "$ref": "obj2#/sub", - }, - }, - "sub": "baz", - } - - data := map[string]string{ - "#/foo/0": "bar", - "#/foo/1": "baz", - "#/foo/2": "quux", - } - - res := jsref.New() - mp := provider.NewMap() - mp.Set("obj2", map[string]string{"sub": "quux"}) - res.AddProvider(mp) - - ptrlist := make([]string, 0, len(data)) - for ptr := range data { - ptrlist = append(ptrlist, ptr) - } - sort.Strings(ptrlist) - - for _, ptr := range ptrlist { - expected := data[ptr] - v, err := res.Resolve(m, ptr) - if !assert.NoError(t, err, "Resolve(%s) should succeed", ptr) { - return - } - if !assert.Equal(t, v, expected, "Resolve(%s) resolves to '%s'", ptr, expected) { - return - } - } - - // In this test we test if we can optionally recursively - // resolve references - v, err := res.Resolve(m, "#/foo", jsref.WithRecursiveResolution(true)) - if !assert.NoError(t, err, "Resolve(%s) should succeed", "#/foo") { - return - } - - if !assert.Equal(t, []interface{}{"bar", "baz", "quux"}, v) { - return - } -} - -func TestResolveFS(t *testing.T) { - dir, err := ioutil.TempDir("", "jsref-test-") - if !assert.NoError(t, err, "creating temporary directory should succeed") { - return - } - defer os.RemoveAll(dir) - - path := filepath.Join(dir, "obj2") - f, err := os.Create(path) - if !assert.NoError(t, err, "creating %s file should succeed", path) { - return - } - f.Write([]byte(`{"sub":"quux"}`)) - f.Close() - - m := map[string]interface{}{ - "foo": []interface{}{ - "bar", - map[string]interface{}{ - "$ref": "#/sub", - }, - map[string]interface{}{ - "$ref": "file:///obj2#/sub", - }, - }, - "sub": "baz", - } - - data := map[string]string{ - "#/foo/0": "bar", - "#/foo/1": "baz", - "#/foo/2": "quux", - } - - res := jsref.New() - res.AddProvider(provider.NewFS(dir)) - - ptrlist := make([]string, 0, len(data)) - for ptr := range data { - ptrlist = append(ptrlist, ptr) - } - sort.Strings(ptrlist) - - for _, ptr := range ptrlist { - expected := data[ptr] - v, err := res.Resolve(m, ptr) - if !assert.NoError(t, err, "Resolve(%s) should succeed", ptr) { - return - } - if !assert.Equal(t, v, expected, "Resolve(%s) resolves to '%s'", ptr, expected) { - return - } - } -} - -func TestResolveHTTP(t *testing.T) { - if b, _ := strconv.ParseBool(os.Getenv("JSREF_LIVE_TESTS")); !b { - t.Skip("JSREF_LIVE_TESTS is not available, skipping test") - } - - cl := http.Client{ - Transport: &http.Transport{ - Dial: func(n, a string) (net.Conn, error) { - return net.DialTimeout(n, a, 2*time.Second) - }, - }, - } - - const schemaURL = `http://json-schema.org/draft-04/schema#` - if _, err := cl.Get(schemaURL); err != nil { - t.Skip("JSON schema '" + schemaURL + "' unavailable, skipping test") - } - - res := jsref.New() - hp := provider.NewHTTP() - res.AddProvider(hp) - - m := map[string]interface{}{ - "fetch": map[string]string{ - "$ref": schemaURL, - }, - } - - ptr := "#/fetch" - v, err := res.Resolve(m, ptr) - if !assert.NoError(t, err, "Resolve(%s) should succeed", ptr) { - return - } - - switch v.(type) { - case map[string]interface{}: - mv := v.(map[string]interface{}) - if !assert.Equal(t, mv["id"], schemaURL, "Resolve("+schemaURL+") resolved to JSON schema") { - return - } - default: - t.Errorf("Expected map[string]interface{}") - } -} - -func TestResolveRecursive(t *testing.T) { - var v interface{} - src := []byte(` -{ - "foo": { - "type": "array", - "items": [{ "$ref": "#" }] - } -}`) - if err := json.Unmarshal(src, &v); err != nil { - log.Printf("%s", err) - return - } - - res := jsref.New() - _, err := res.Resolve(v, "#/foo") // "bar" - if !assert.NoError(t, err, "res.Resolve should succeed") { - return - } -} - -func TestGHPR12(t *testing.T) { - // https://github.com/lestrrat-go/jsref/pull/2 gave me an example - // using "foo" as the JS pointer (could've been a typo) - // but it gave me weird results, so this is where I'm testing it - var v interface{} - src := []byte(` -{ - "foo": "bar" -}`) - if err := json.Unmarshal(src, &v); err != nil { - log.Printf("%s", err) - return - } - - res := jsref.New() - _, err := res.Resolve(v, "foo") - if !assert.NoError(t, err, "res.Resolve should fail") { - return - } -} - -func TestHyperSchemaRecursive(t *testing.T) { - src := []byte(` -{ - "definitions": { - "virtual_machine": { - "type": "object" - } - }, - "links": [ - { - "schema": { - "type": "object" - }, - "targetSchema": { - "$ref": "#/definitions/virtual_machine" - } - }, - { - "targetSchema": { - "type": "array", - "items": { - "$ref": "#/definitions/virtual_machine" - } - } - } - ] -}`) - var v interface{} - err := json.Unmarshal(src, &v) - assert.Nil(t, err) - res := jsref.New() - - ptrs := []string{ - "#/links/0/schema", - "#/links/0/targetSchema", - "#/links/1/targetSchema", - } - for _, ptr := range ptrs { - result, err := res.Resolve(v, ptr, jsref.WithRecursiveResolution(true)) - assert.Nil(t, err) - b, err := json.Marshal(result) - if !assert.NoError(t, err, "json.Marshal should succeed") { - return - } - if !assert.False(t, strings.Contains(string(b), "$ref"), "%s did not recursively resolve", ptr) { - t.Logf("resolved to '%s'", b) - return - } - } -} - -func TestGHIssue7(t *testing.T) { - src := []byte(`{ - "status": { - "type": ["string", "null"], - "enum": [ - "sent", - "duplicate", - "error", - "invalid", - "rejected", - "unqueued", - "unsubscribed", - null - ] - } -}`) - - var v interface{} - if !assert.NoError(t, json.Unmarshal(src, &v), `Unmarshal should succeed`) { - return - } - - res := jsref.New() - result, err := res.Resolve(v, "", jsref.WithRecursiveResolution(true)) - t.Logf("%s", result) - t.Logf("%s", err) -} diff --git a/vendor/github.com/lestrrat-go/jsref/provider/fs.go b/vendor/github.com/lestrrat-go/jsref/provider/fs.go deleted file mode 100644 index 21eab966d109..000000000000 --- a/vendor/github.com/lestrrat-go/jsref/provider/fs.go +++ /dev/null @@ -1,75 +0,0 @@ -package provider - -import ( - "encoding/json" - "net/url" - "os" - "path/filepath" - "strings" - - "github.com/lestrrat-go/pdebug" - "github.com/pkg/errors" -) - -// NewFS creates a new Provider that looks for JSON documents -// from the local file system. Documents are only searched -// within `root` -func NewFS(root string) *FS { - return &FS{ - mp: NewMap(), - Root: root, - } -} - -// Get fetches the document specified by the `key` argument. -// Everything other than `.Path` is ignored. -// Note that once a document is read, it WILL be cached for the -// duration of this object, unless you call `Reset` -func (fp *FS) Get(key *url.URL) (out interface{}, err error) { - if pdebug.Enabled { - g := pdebug.Marker("provider.FS.Get(%s)", key.String()).BindError(&err) - defer g.End() - } - - if strings.ToLower(key.Scheme) != "file" { - return nil, errors.New("unsupported scheme '" + key.Scheme + "'") - } - - // Everything other than "Path" is ignored - path := filepath.Clean(filepath.Join(fp.Root, key.Path)) - - mpkey := &url.URL{Path: path} - if x, err := fp.mp.Get(mpkey); err == nil { - return x, nil - } - - fi, err := os.Stat(path) - if err != nil { - return nil, errors.Wrap(err, "failed to stat local resource") - } - - if fi.IsDir() { - return nil, errors.New("target is not a file") - } - - f, err := os.Open(path) - if err != nil { - return nil, errors.Wrap(err, "failed to open local resource") - } - defer f.Close() - - var x interface{} - dec := json.NewDecoder(f) - if err := dec.Decode(&x); err != nil { - return nil, errors.Wrap(err, "failed to parse JSON local resource") - } - - fp.mp.Set(path, x) - - return x, nil -} - -// Reset resets the in memory cache of JSON documents -func (fp *FS) Reset() error { - return fp.mp.Reset() -} diff --git a/vendor/github.com/lestrrat-go/jsref/provider/http.go b/vendor/github.com/lestrrat-go/jsref/provider/http.go deleted file mode 100644 index acb407b8850f..000000000000 --- a/vendor/github.com/lestrrat-go/jsref/provider/http.go +++ /dev/null @@ -1,65 +0,0 @@ -package provider - -import ( - "encoding/json" - "net/http" - "net/url" - "strings" - "time" - - "github.com/lestrrat-go/pdebug" - "github.com/pkg/errors" -) - -// NewFS creates a new Provider that looks for JSON documents -// from the internet over HTTP(s) -func NewHTTP() *HTTP { - return &HTTP{ - mp: NewMap(), - Client: &http.Client{ - Timeout: 5 * time.Second, - }, - } -} - -// Get fetches the document specified by the `key` argument, making -// a HTTP request if necessary. -// Note that once a document is read, it WILL be cached for the -// duration of this object, unless you call `Reset` -func (hp *HTTP) Get(key *url.URL) (interface{}, error) { - if pdebug.Enabled { - g := pdebug.Marker("HTTP.Get(%s)", key) - defer g.End() - } - - switch strings.ToLower(key.Scheme) { - case "http", "https": - default: - return nil, errors.New("key is not http/https URL") - } - - v, err := hp.mp.Get(key) - if err == nil { // Found! - return v, nil - } - - res, err := hp.Client.Get(key.String()) - if err != nil { - return nil, errors.Wrap(err, "failed to fetch HTTP resource") - } - defer res.Body.Close() - - dec := json.NewDecoder(res.Body) - - var x interface{} - if err := dec.Decode(&x); err != nil { - return nil, errors.Wrap(err, "failed to parse JSON from HTTP resource") - } - - return x, nil -} - -// Reset resets the in memory cache of JSON documents -func (hp *HTTP) Reset() error { - return hp.mp.Reset() -} diff --git a/vendor/github.com/lestrrat-go/jsref/provider/interface.go b/vendor/github.com/lestrrat-go/jsref/provider/interface.go deleted file mode 100644 index 4eaf7190a28f..000000000000 --- a/vendor/github.com/lestrrat-go/jsref/provider/interface.go +++ /dev/null @@ -1,21 +0,0 @@ -package provider - -import ( - "net/http" - "sync" -) - -type FS struct { - mp *Map - Root string -} - -type HTTP struct { - mp *Map - Client *http.Client -} - -type Map struct { - lock sync.Mutex - mapping map[string]interface{} -} diff --git a/vendor/github.com/lestrrat-go/jsref/provider/map.go b/vendor/github.com/lestrrat-go/jsref/provider/map.go deleted file mode 100644 index 48d65a6a4c48..000000000000 --- a/vendor/github.com/lestrrat-go/jsref/provider/map.go +++ /dev/null @@ -1,47 +0,0 @@ -package provider - -import ( - "net/url" - - "github.com/lestrrat-go/pdebug" - "github.com/pkg/errors" -) - -func NewMap() *Map { - return &Map{ - mapping: make(map[string]interface{}), - } -} - -func (mp *Map) Set(key string, v interface{}) error { - mp.lock.Lock() - defer mp.lock.Unlock() - - mp.mapping[key] = v - return nil -} - -func (mp *Map) Get(key *url.URL) (res interface{}, err error) { - if pdebug.Enabled { - g := pdebug.Marker("Map.Get(%s)", key).BindError(&err) - defer g.End() - } - - mp.lock.Lock() - defer mp.lock.Unlock() - - v, ok := mp.mapping[key.String()] - if !ok { - return nil, errors.New("not found") - } - - return v, nil -} - -func (mp *Map) Reset() error { - mp.lock.Lock() - defer mp.lock.Unlock() - - mp.mapping = make(map[string]interface{}) - return nil -} diff --git a/vendor/github.com/lestrrat-go/pdebug/.gitignore b/vendor/github.com/lestrrat-go/pdebug/.gitignore deleted file mode 100644 index daf913b1b347..000000000000 --- a/vendor/github.com/lestrrat-go/pdebug/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/lestrrat-go/pdebug/.travis.yml b/vendor/github.com/lestrrat-go/pdebug/.travis.yml deleted file mode 100644 index baecfce60a99..000000000000 --- a/vendor/github.com/lestrrat-go/pdebug/.travis.yml +++ /dev/null @@ -1,14 +0,0 @@ -language: go -sudo: false -go: - - 1.6 - - 1.7 - - tip -install: - - go get -t -v ./... - - go get -t -tags debug0 -v ./... -script: - - go test -v ./... - - go test -tags debug ./... - - PDEBUG_TRACE=1 go test -tags debug ./... - - go test -tags debug0 ./... diff --git a/vendor/github.com/lestrrat-go/pdebug/LICENSE b/vendor/github.com/lestrrat-go/pdebug/LICENSE deleted file mode 100644 index 20054b15434d..000000000000 --- a/vendor/github.com/lestrrat-go/pdebug/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 lestrrat - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/lestrrat-go/pdebug/README.md b/vendor/github.com/lestrrat-go/pdebug/README.md deleted file mode 100644 index 4f6d88959f8a..000000000000 --- a/vendor/github.com/lestrrat-go/pdebug/README.md +++ /dev/null @@ -1,95 +0,0 @@ -# go-pdebug - -[![Build Status](https://travis-ci.org/lestrrat-go/pdebug.svg?branch=master)](https://travis-ci.org/lestrrat-go/pdebug) - -[![GoDoc](https://godoc.org/github.com/lestrrat-go/pdebug?status.svg)](https://godoc.org/github.com/lestrrat-go/pdebug) - -Utilities for my print debugging fun. YMMV - -# Synopsis - -![optimized](https://pbs.twimg.com/media/CbiqhzLUUAIN_7o.png) - -# Description - -Building with `pdebug` declares a constant, `pdebug.Enabled` which you -can use to easily compile in/out depending on the presence of a build tag. - -```go -func Foo() { - // will only be available if you compile with `-tags debug` - if pdebug.Enabled { - pdebug.Printf("Starting Foo()! - } -} -``` - -Note that using `github.com/lestrrat-go/pdebug` and `-tags debug` only -compiles in the code. In order to actually show the debug trace, you need -to specify an environment variable: - -```shell -# For example, to show debug code during testing: -PDEBUG_TRACE=1 go test -tags debug -``` - -If you want to forcefully show the trace (which is handy when you're -debugging/testing), you can use the `debug0` tag instead: - -```shell -go test -tags debug0 -``` - -# Markers - -When you want to print debug a chain of function calls, you can use the -`Marker` functions: - -```go -func Foo() { - if pdebug.Enabled { - g := pdebug.Marker("Foo") - defer g.End() - } - - pdebug.Printf("Inside Foo()!") -} -``` - -This will cause all of the `Printf` calls to automatically indent -the output so it's visually easier to see where a certain trace log -is being generated. - -By default it will print something like: - -``` -|DEBUG| START Foo -|DEBUG| Inside Foo()! -|DEBUG| END Foo (1.23μs) -``` - -If you want to automatically show the error value you are returning -(but only if there is an error), you can use the `BindError` method: - -```go -func Foo() (err error) { - if pdebug.Enabled { - g := pdebug.Marker("Foo").BindError(&err) - defer g.End() - } - - pdebug.Printf("Inside Foo()!") - - return errors.New("boo") -} -``` - -This will print something like: - - -``` -|DEBUG| START Foo -|DEBUG| Inside Foo()! -|DEBUG| END Foo (1.23μs): ERROR boo -``` - diff --git a/vendor/github.com/lestrrat-go/pdebug/autoflag_off.go b/vendor/github.com/lestrrat-go/pdebug/autoflag_off.go deleted file mode 100644 index 3ca774591fd0..000000000000 --- a/vendor/github.com/lestrrat-go/pdebug/autoflag_off.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build debug - -package pdebug - -import ( - "os" - "strconv" -) - -var Trace = false -func init() { - if b, err := strconv.ParseBool(os.Getenv("PDEBUG_TRACE")); err == nil && b { - Trace = true - } -} \ No newline at end of file diff --git a/vendor/github.com/lestrrat-go/pdebug/autoflag_on.go b/vendor/github.com/lestrrat-go/pdebug/autoflag_on.go deleted file mode 100644 index f5f674db5d05..000000000000 --- a/vendor/github.com/lestrrat-go/pdebug/autoflag_on.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build debug0 - -package pdebug - -var Trace = true - diff --git a/vendor/github.com/lestrrat-go/pdebug/common.go b/vendor/github.com/lestrrat-go/pdebug/common.go deleted file mode 100644 index 95f11a007695..000000000000 --- a/vendor/github.com/lestrrat-go/pdebug/common.go +++ /dev/null @@ -1,43 +0,0 @@ -package pdebug - -import ( - "io" - "os" - "sync" - "time" -) - -type pdctx struct { - mutex sync.Mutex - indentL int - LogTime bool - Prefix string - Writer io.Writer -} - -var emptyMarkerGuard = &markerg{} - -type markerg struct { - indentg guard - ctx *pdctx - f string - args []interface{} - start time.Time - errptr *error -} - -var DefaultCtx = &pdctx{ - LogTime: true, - Prefix: "|DEBUG| ", - Writer: os.Stdout, -} - -type guard struct { - cb func() -} - -func (g *guard) End() { - if cb := g.cb; cb != nil { - cb() - } -} diff --git a/vendor/github.com/lestrrat-go/pdebug/common_test.go b/vendor/github.com/lestrrat-go/pdebug/common_test.go deleted file mode 100644 index 2589727304ce..000000000000 --- a/vendor/github.com/lestrrat-go/pdebug/common_test.go +++ /dev/null @@ -1,112 +0,0 @@ -package pdebug - -import ( - "bytes" - "errors" - "io" - "regexp" - "testing" - - "github.com/stretchr/testify/assert" -) - -func setw(ctx *pdctx, w io.Writer) func() { - oldw := ctx.Writer - ctx.Writer = w - return func() { ctx.Writer = oldw } -} - -func TestPrintf(t *testing.T) { - buf := &bytes.Buffer{} - wg := setw(DefaultCtx, buf) - defer wg() - - Printf("Hello, World!") - - if Enabled && Trace { - re := regexp.MustCompile(`\|DEBUG\| \d+\.\d+ Hello, World!\n`) - if !assert.True(t, re.MatchString(buf.String()), "Simple Printf works") { - return - } - } else { - if !assert.Equal(t, "", buf.String(), "Simple Printf should be suppressed") { - return - } - } -} - -func TestMarker(t *testing.T) { - buf := &bytes.Buffer{} - wg := setw(DefaultCtx, buf) - defer wg() - - f2 := func() (err error) { - g := Marker("f2").BindError(&err) - defer g.End() - Printf("Hello, World!") - return errors.New("dummy error") - } - - f1 := func() { - g := Marker("f1") - defer g.End() - f2() - } - - f1() - - if Enabled && Trace { - re := regexp.MustCompile(`\|DEBUG\| \d+\.\d+ START f1\n\|DEBUG\| \d+\.\d+ START f2\n\|DEBUG\| \d+\.\d+ Hello, World!\n\|DEBUG\| \d+\.\d+ END f2 \(`) - if !assert.True(t, re.MatchString(buf.String()), "Markers should work") { - t.Logf("Expected '%v'", re) - t.Logf("Actual '%v'", buf.String()) - return - } - } else { - if !assert.Equal(t, "", buf.String(), "Markers should work") { - return - } - } -} - -func TestLegacyMarker(t *testing.T) { - buf := &bytes.Buffer{} - wg := setw(DefaultCtx, buf) - defer wg() - - f2 := func() (err error) { - g := IPrintf("START f2") - defer func() { - if err == nil { - g.IRelease("END f2") - } else { - g.IRelease("END f2: %s", err) - } - }() - Printf("Hello, World!") - return errors.New("dummy error") - } - - f1 := func() { - g := IPrintf("START f1") - defer g.IRelease("END f1") - f2() - } - - f1() - - if Enabled && Trace { - re := regexp.MustCompile(`\|DEBUG\| \d+\.\d+ START f1\n\|DEBUG\| \d+\.\d+ START f2\n\|DEBUG\| \d+\.\d+ Hello, World!\n\|DEBUG\| \d+\.\d+ END f2`) - if !assert.True(t, re.MatchString(buf.String()), "Markers should work") { - t.Logf("Expected '%v'", re) - t.Logf("Actual '%v'", buf.String()) - return - } - - // TODO: check for error and timestamp - } else { - if !assert.Equal(t, "", buf.String(), "Markers should work") { - return - } - } -} diff --git a/vendor/github.com/lestrrat-go/pdebug/debug0_test.go b/vendor/github.com/lestrrat-go/pdebug/debug0_test.go deleted file mode 100644 index 052fcca96231..000000000000 --- a/vendor/github.com/lestrrat-go/pdebug/debug0_test.go +++ /dev/null @@ -1,13 +0,0 @@ -//+build debug0,!debug - -package pdebug - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestDebug0Enabled(t *testing.T) { - assert.True(t, Enabled, "Enable is true") -} diff --git a/vendor/github.com/lestrrat-go/pdebug/debug_off.go b/vendor/github.com/lestrrat-go/pdebug/debug_off.go deleted file mode 100644 index 9f794b29747b..000000000000 --- a/vendor/github.com/lestrrat-go/pdebug/debug_off.go +++ /dev/null @@ -1,39 +0,0 @@ -//+build !debug,!debug0 - -package pdebug - -// Enabled is true if `-tags debug` or `-tags debug0` is used -// during compilation. Use this to "ifdef-out" debug blocks. -const Enabled = false - -// Trace is true if `-tags debug` is used AND the environment -// variable `PDEBUG_TRACE` is set to a `true` value (i.e., -// 1, true, etc), or `-tags debug0` is used. This allows you to -// compile-in the trace logs, but only show them when you -// set the environment variable -const Trace = false - -// IRelease is deprecated. Use Marker()/End() instead -func (g guard) IRelease(f string, args ...interface{}) {} - -// IPrintf is deprecated. Use Marker()/End() instead -func IPrintf(f string, args ...interface{}) guard { return guard{} } - -// Printf prints to standard out, just like a normal fmt.Printf, -// but respects the indentation level set by IPrintf/IRelease. -// Printf is no op unless you compile with the `debug` tag. -func Printf(f string, args ...interface{}) {} - -// Dump dumps the objects using go-spew. -// Dump is a no op unless you compile with the `debug` tag. -func Dump(v ...interface{}) {} - -// Marker marks the beginning of an indented block. The message -// you specify in the arguments is prefixed witha "START", and -// subsequent calls to Printf will be indented one level more. -// -// To reset this, you must call End() on the guard object that -// gets returned by Marker(). -func Marker(f string, args ...interface{}) *markerg { return emptyMarkerGuard } -func (g *markerg) BindError(_ *error) *markerg { return g } -func (g *markerg) End() {} diff --git a/vendor/github.com/lestrrat-go/pdebug/debug_on.go b/vendor/github.com/lestrrat-go/pdebug/debug_on.go deleted file mode 100644 index 064f3420ca8b..000000000000 --- a/vendor/github.com/lestrrat-go/pdebug/debug_on.go +++ /dev/null @@ -1,170 +0,0 @@ -// +build debug OR debug0 - -package pdebug - -import ( - "bytes" - "fmt" - "strings" - "time" - - "github.com/davecgh/go-spew/spew" -) - -const Enabled = true - -type Guard interface { - End() -} - -var emptyGuard = &guard{} - -func (ctx *pdctx) Unindent() { - ctx.mutex.Lock() - defer ctx.mutex.Unlock() - ctx.indentL-- -} - -func (ctx *pdctx) Indent() guard { - ctx.mutex.Lock() - ctx.indentL++ - ctx.mutex.Unlock() - - return guard{cb: ctx.Unindent} -} - -func (ctx *pdctx) preamble(buf *bytes.Buffer) { - if p := ctx.Prefix; len(p) > 0 { - buf.WriteString(p) - } - if ctx.LogTime { - fmt.Fprintf(buf, "%0.5f ", float64(time.Now().UnixNano()) / 1000000.0) - } - - for i := 0; i < ctx.indentL; i++ { - buf.WriteString(" ") - } -} - -func (ctx *pdctx) Printf(f string, args ...interface{}) { - if !strings.HasSuffix(f, "\n") { - f = f + "\n" - } - buf := bytes.Buffer{} - ctx.preamble(&buf) - fmt.Fprintf(&buf, f, args...) - buf.WriteTo(ctx.Writer) -} - -func Marker(f string, args ...interface{}) *markerg { - return DefaultCtx.Marker(f, args...) -} - -func (ctx *pdctx) Marker(f string, args ...interface{}) *markerg { - if !Trace { - return emptyMarkerGuard - } - - buf := &bytes.Buffer{} - ctx.preamble(buf) - buf.WriteString("START ") - fmt.Fprintf(buf, f, args...) - if buf.Len() > 0 { - if b := buf.Bytes(); b[buf.Len()-1] != '\n' { - buf.WriteRune('\n') - } - } - - buf.WriteTo(ctx.Writer) - - g := ctx.Indent() - return &markerg{ - indentg: g, - ctx: ctx, - f: f, - args: args, - start: time.Now(), - errptr: nil, - } -} - -func (g *markerg) BindError(errptr *error) *markerg { - if g.ctx == nil { - return g - } - g.ctx.mutex.Lock() - defer g.ctx.mutex.Unlock() - - g.errptr = errptr - return g -} - -func (g *markerg) End() { - if g.ctx == nil { - return - } - - g.indentg.End() // unindent - buf := &bytes.Buffer{} - g.ctx.preamble(buf) - fmt.Fprint(buf, "END ") - fmt.Fprintf(buf, g.f, g.args...) - fmt.Fprintf(buf, " (%s)", time.Since(g.start)) - if errptr := g.errptr; errptr != nil && *errptr != nil { - fmt.Fprintf(buf, ": ERROR: %s", *errptr) - } - - if buf.Len() > 0 { - if b := buf.Bytes(); b[buf.Len()-1] != '\n' { - buf.WriteRune('\n') - } - } - - buf.WriteTo(g.ctx.Writer) -} - -type legacyg struct { - guard - start time.Time -} - -var emptylegacyg = legacyg{} - -func (g legacyg) IRelease(f string, args ...interface{}) { - if !Trace { - return - } - g.End() - dur := time.Since(g.start) - Printf("%s (%s)", fmt.Sprintf(f, args...), dur) -} - -// IPrintf indents and then prints debug messages. Execute the callback -// to undo the indent -func IPrintf(f string, args ...interface{}) legacyg { - if !Trace { - return emptylegacyg - } - - DefaultCtx.Printf(f, args...) - g := legacyg{ - guard: DefaultCtx.Indent(), - start: time.Now(), - } - return g -} - -// Printf prints debug messages. Only available if compiled with "debug" tag -func Printf(f string, args ...interface{}) { - if !Trace { - return - } - DefaultCtx.Printf(f, args...) -} - -func Dump(v ...interface{}) { - if !Trace { - return - } - spew.Dump(v...) -} diff --git a/vendor/github.com/lestrrat-go/pdebug/debug_test.go b/vendor/github.com/lestrrat-go/pdebug/debug_test.go deleted file mode 100644 index 7e1077ee7596..000000000000 --- a/vendor/github.com/lestrrat-go/pdebug/debug_test.go +++ /dev/null @@ -1,30 +0,0 @@ -//+build debug,!debug0 - -package pdebug - -import ( - "os" - "strconv" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestDebugEnabled(t *testing.T) { - if !assert.True(t, Enabled, "Enable is true") { - return - } - - b, err := strconv.ParseBool(os.Getenv("PDEBUG_TRACE")) - if err == nil && b { - if !assert.True(t, Trace, "Trace is true") { - return - } - t.Logf("Trace is enabled") - } else { - if !assert.False(t, Trace, "Trace is false") { - return - } - t.Logf("Trace is disabled") - } -} \ No newline at end of file diff --git a/vendor/github.com/lestrrat-go/pdebug/doc.go b/vendor/github.com/lestrrat-go/pdebug/doc.go deleted file mode 100644 index d0566de384ab..000000000000 --- a/vendor/github.com/lestrrat-go/pdebug/doc.go +++ /dev/null @@ -1,15 +0,0 @@ -// Package pdebug provides tools to produce debug logs the way the author -// (Daisuke Maki a.k.a. lestrrat) likes. All of the functions are no-ops -// unless you compile with the `-tags debug` option. -// -// When you compile your program with `-tags debug`, no trace is displayed, -// but the code enclosed within `if pdebug.Enabled { ... }` is compiled in. -// To show the debug trace, set the PDEBUG_TRACE environment variable to -// true (or 1, or whatever `strconv.ParseBool` parses to true) -// -// If you want to show the debug trace regardless of an environment variable, -// for example, perhaps while you are debugging or running tests, use the -// `-tags debug0` build tag instead. This will enable the debug trace -// forcefully -package pdebug - diff --git a/vendor/github.com/lestrrat-go/pdebug/nodebug_test.go b/vendor/github.com/lestrrat-go/pdebug/nodebug_test.go deleted file mode 100644 index ae09c76fa85c..000000000000 --- a/vendor/github.com/lestrrat-go/pdebug/nodebug_test.go +++ /dev/null @@ -1,14 +0,0 @@ -//+build !debug,!debug0 - -package pdebug - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestDisabled(t *testing.T) { - assert.False(t, Enabled, "Enable is false") - assert.False(t, Trace, "Trace is false") -} \ No newline at end of file diff --git a/vendor/github.com/lestrrat-go/structinfo/.gitignore b/vendor/github.com/lestrrat-go/structinfo/.gitignore deleted file mode 100644 index daf913b1b347..000000000000 --- a/vendor/github.com/lestrrat-go/structinfo/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/lestrrat-go/structinfo/.travis.yml b/vendor/github.com/lestrrat-go/structinfo/.travis.yml deleted file mode 100644 index 5b800297f00e..000000000000 --- a/vendor/github.com/lestrrat-go/structinfo/.travis.yml +++ /dev/null @@ -1,5 +0,0 @@ -language: go -sudo: false -go: - - 1.5 - - tip diff --git a/vendor/github.com/lestrrat-go/structinfo/LICENSE b/vendor/github.com/lestrrat-go/structinfo/LICENSE deleted file mode 100644 index 20054b15434d..000000000000 --- a/vendor/github.com/lestrrat-go/structinfo/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 lestrrat - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/lestrrat-go/structinfo/README.md b/vendor/github.com/lestrrat-go/structinfo/README.md deleted file mode 100644 index fbf6c877d683..000000000000 --- a/vendor/github.com/lestrrat-go/structinfo/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# structinfo - -[![Build Status](https://travis-ci.org/lestrrat-go/structinfo.svg?branch=master)](https://travis-ci.org/lestrrat-go/structinfo) - -[![GoDoc](https://godoc.org/github.com/lestrrat-go/structinfo?status.svg)](https://godoc.org/github.com/lestrrat-go/structinfo) - -Tools to inspect Go structs diff --git a/vendor/github.com/lestrrat-go/structinfo/structinfo.go b/vendor/github.com/lestrrat-go/structinfo/structinfo.go deleted file mode 100644 index 0a283ca88897..000000000000 --- a/vendor/github.com/lestrrat-go/structinfo/structinfo.go +++ /dev/null @@ -1,118 +0,0 @@ -// Package structinfo contains tools to inspect structs. - -package structinfo - -import ( - "reflect" - "sync" -) - -type jsonFieldMap struct { - lock sync.Mutex - fields map[string]string -} - -var type2jfm = map[reflect.Type]jsonFieldMap{} -var type2jfmMutex = sync.Mutex{} - -// JSONFieldsFromStruct returns the names of JSON fields associated -// with the given struct. Returns nil if v is not a struct -func JSONFieldsFromStruct(v reflect.Value) []string { - if v.Kind() != reflect.Struct { - return nil - } - - m := getType2jfm(v.Type()) - m.lock.Lock() - defer m.lock.Unlock() - - l := make([]string, 0, len(m.fields)) - for k := range m.fields { - l = append(l, k) - } - return l -} - -// StructFieldFromJSONName returns the struct field name on the -// given struct value. Empty value means the field is either not -// public, or does not exist. -// -// This can be used to map JSON field names to actual struct fields. -func StructFieldFromJSONName(v reflect.Value, name string) string { - if v.Kind() != reflect.Struct { - return "" - } - - m := getType2jfm(v.Type()) - m.lock.Lock() - defer m.lock.Unlock() - - s, ok := m.fields[name] - if !ok { - return "" - } - return s -} - -func getType2jfm(t reflect.Type) jsonFieldMap { - type2jfmMutex.Lock() - defer type2jfmMutex.Unlock() - - return getType2jfm_nolock(t) -} - -func getType2jfm_nolock(t reflect.Type) jsonFieldMap { - fm, ok := type2jfm[t] - if ok { - return fm - } - - fm = constructJfm(t) - type2jfm[t] = fm - return fm -} - -func constructJfm(t reflect.Type) jsonFieldMap { - if t.Kind() == reflect.Ptr { - t = t.Elem() - } - - fm := jsonFieldMap{ - fields: make(map[string]string), - } - for i := 0; i < t.NumField(); i++ { - sf := t.Field(i) - if sf.Anonymous { // embedded! got to recurse - fm2 := getType2jfm_nolock(sf.Type) - for k, v := range fm2.fields { - fm.fields[k] = v - } - continue - } - - if sf.PkgPath != "" { // unexported - continue - } - - tag := sf.Tag.Get("json") - if tag == "-" { - continue - } - - if tag == "" || tag[0] == ',' { - fm.fields[sf.Name] = sf.Name - continue - } - - flen := 0 - for j := 0; j < len(tag); j++ { - if tag[j] == ',' { - break - } - flen = j - } - fm.fields[tag[:flen+1]] = sf.Name - } - - return fm -} \ No newline at end of file diff --git a/vendor/github.com/lestrrat-go/structinfo/structinfo_test.go b/vendor/github.com/lestrrat-go/structinfo/structinfo_test.go deleted file mode 100644 index 980c2654b7c3..000000000000 --- a/vendor/github.com/lestrrat-go/structinfo/structinfo_test.go +++ /dev/null @@ -1,63 +0,0 @@ -package structinfo_test - -import ( - "reflect" - "testing" - - "github.com/lestrrat-go/structinfo" - "github.com/stretchr/testify/assert" -) - -type Quux struct { - Baz string `json:"baz"` -} - -type X struct { - private int - Quux - Foo string `json:"foo"` - Bar string `json:"bar,omitempty"` -} - -func TestStructFields(t *testing.T) { - fields := make(map[string]struct{}) - for _, name := range structinfo.JSONFieldsFromStruct(reflect.ValueOf(X{})) { - fields[name] = struct{}{} - } - - expected := map[string]struct{}{ - "foo": {}, - "bar": {}, - "baz": {}, - } - - if !assert.Equal(t, expected, fields, "expected fields match") { - return - } -} - -func TestLookupSructFieldFromJSONName(t *testing.T) { - rv := reflect.ValueOf(X{}) - - data := map[string]string{ - "foo": "Foo", - "bar": "Bar", - "baz": "Baz", - } - - for jsname, fname := range data { - fn := structinfo.StructFieldFromJSONName(rv, jsname) - if !assert.NotEqual(t, fn, "", "should find '%s'", jsname) { - return - } - - sf, ok := rv.Type().FieldByName(fn) - if !assert.True(t, ok, "should be able resolve '%s' (%s)", jsname, fn) { - return - } - - if !assert.Equal(t, sf.Name, fname, "'%s' should map to '%s'", jsname, fname) { - return - } - } -} diff --git a/vendor/github.com/lestrrat/go-jspointer/.travis.yml b/vendor/github.com/lestrrat/go-jspointer/.travis.yml index 21e0a8e8d7c0..5b800297f00e 100644 --- a/vendor/github.com/lestrrat/go-jspointer/.travis.yml +++ b/vendor/github.com/lestrrat/go-jspointer/.travis.yml @@ -1,5 +1,5 @@ language: go sudo: false go: - - 1.11 + - 1.5 - tip diff --git a/vendor/github.com/lestrrat/go-jspointer/README.md b/vendor/github.com/lestrrat/go-jspointer/README.md index e1a4fbcd01b0..8bce21bcaff3 100644 --- a/vendor/github.com/lestrrat/go-jspointer/README.md +++ b/vendor/github.com/lestrrat/go-jspointer/README.md @@ -1,8 +1,8 @@ # go-jspointer -[![Build Status](https://travis-ci.org/lestrrat-go/jspointer.svg?branch=master)](https://travis-ci.org/lestrrat-go/jspointer) +[![Build Status](https://travis-ci.org/lestrrat/go-jspointer.svg?branch=master)](https://travis-ci.org/lestrrat/go-jspointer) -[![GoDoc](https://godoc.org/github.com/lestrrat-go/jspointer?status.svg)](https://godoc.org/github.com/lestrrat-go/jspointer) +[![GoDoc](https://godoc.org/github.com/lestrrat/go-jspointer?status.svg)](https://godoc.org/github.com/lestrrat/go-jspointer) JSON pointer for Go @@ -26,9 +26,9 @@ This is almost a fork of https://github.com/xeipuuv/gojsonpointer. | Name | Notes | |:--------------------------------------------------------:|:---------------------------------| -| [go-jsval](https://github.com/lestrrat-go/jsval) | Validator generator | -| [go-jsschema](https://github.com/lestrrat-go/jsschema) | JSON Schema implementation | -| [go-jshschema](https://github.com/lestrrat-go/jshschema) | JSON Hyper Schema implementation | -| [go-jsref](https://github.com/lestrrat-go/jsref) | JSON Reference implementation | +| [go-jsval](https://github.com/lestrrat/go-jsval) | Validator generator | +| [go-jsschema](https://github.com/lestrrat/go-jsschema) | JSON Schema implementation | +| [go-jshschema](https://github.com/lestrrat/go-jshschema) | JSON Hyper Schema implementation | +| [go-jsref](https://github.com/lestrrat/go-jsref) | JSON Reference implementation | diff --git a/vendor/github.com/lestrrat/go-jspointer/bench/bench_test.go b/vendor/github.com/lestrrat/go-jspointer/bench/bench_test.go index 3ffe29fb4ded..c0350b280b93 100644 --- a/vendor/github.com/lestrrat/go-jspointer/bench/bench_test.go +++ b/vendor/github.com/lestrrat/go-jspointer/bench/bench_test.go @@ -6,7 +6,7 @@ import ( "encoding/json" "testing" - "github.com/lestrrat-go/jspointer" + "github.com/lestrrat/go-jspointer" "github.com/xeipuuv/gojsonpointer" ) diff --git a/vendor/github.com/lestrrat/go-jspointer/interface.go b/vendor/github.com/lestrrat/go-jspointer/interface.go index 7fe800233bb2..1e83d599556e 100644 --- a/vendor/github.com/lestrrat/go-jspointer/interface.go +++ b/vendor/github.com/lestrrat/go-jspointer/interface.go @@ -23,5 +23,5 @@ type ErrNotFound struct { // JSPointer represents a JSON pointer type JSPointer struct { raw string - tokens tokens + tokens []string } diff --git a/vendor/github.com/lestrrat/go-jspointer/jspointer.go b/vendor/github.com/lestrrat/go-jspointer/jspointer.go index c42b9613f9a3..1c1f88fd2a53 100644 --- a/vendor/github.com/lestrrat/go-jspointer/jspointer.go +++ b/vendor/github.com/lestrrat/go-jspointer/jspointer.go @@ -1,96 +1,79 @@ package jspointer import ( - "bytes" "encoding/json" "errors" "reflect" "strconv" + "strings" + "sync" - "github.com/lestrrat-go/structinfo" + "github.com/lestrrat/go-structinfo" ) -type tokens struct { - s string - positions [][2]int +var ctxPool = sync.Pool{ + New: moreCtx, } -func (t *tokens) size() int { - return len(t.positions) +func moreCtx() interface{} { + return &matchCtx{} } -func (t *tokens) get(i int) string { - p := t.positions[i] - return t.s[p[0]:p[1]] +func getCtx() *matchCtx { + return ctxPool.Get().(*matchCtx) +} + +func releaseCtx(ctx *matchCtx) { + ctx.err = nil + ctx.set = false + ctx.tokens = nil + ctx.result = nil + ctxPool.Put(ctx) } // New creates a new JSON pointer for given path spec. If the path fails // to be parsed, an error is returned func New(path string) (*JSPointer, error) { var p JSPointer - - if err := p.parse(path); err != nil { + dtokens, err := parse(path) + if err != nil { return nil, err } p.raw = path + p.tokens = dtokens return &p, nil } -func (p *JSPointer) parse(s string) error { +func parse(s string) ([]string, error) { if s == "" { - return nil + return nil, nil } if s[0] != Separator { - return ErrInvalidPointer + return nil, ErrInvalidPointer } - if len(s) < 2 { - return ErrInvalidPointer - } - - ntokens := 0 - for i := 0; i < len(s); i++ { - if s[i] == '/' { - ntokens++ - } - } - - positions := make([][2]int, 0, ntokens) - start := 1 - var buf bytes.Buffer - buf.WriteByte(s[0]) + prev := 0 + tokens := []string{} for i := 1; i < len(s); i++ { switch s[i] { case Separator: - buf.WriteByte(s[i]) - positions = append(positions, [2]int{start, buf.Len() - 1}) - start = i + 1 - case '~': - if len(s) == 1 { - buf.WriteByte(s[i]) - } else { - switch s[1] { - case '0': - buf.WriteByte('~') - case '1': - buf.WriteByte('/') - default: - buf.WriteByte(s[i]) - } - } - default: - buf.WriteByte(s[i]) + tokens = append(tokens, s[prev+1:i]) + prev = i } } - if start < buf.Len() { - positions = append(positions, [2]int{start, buf.Len()}) + if prev != len(s) { + tokens = append(tokens, s[prev+1:]) + } + + dtokens := make([]string, 0, len(tokens)) + for _, t := range tokens { + t = strings.Replace(strings.Replace(t, EncodedSlash, "/", -1), EncodedTilde, "~", -1) + dtokens = append(dtokens, t) } - p.tokens.s = buf.String() - p.tokens.positions = positions - return nil + return dtokens, nil } // String returns the stringified version of this JSON pointer @@ -101,10 +84,11 @@ func (p JSPointer) String() string { // Get applies the JSON pointer to the given item, and returns // the result. func (p JSPointer) Get(item interface{}) (interface{}, error) { - var ctx matchCtx + ctx := getCtx() + defer releaseCtx(ctx) ctx.raw = p.raw - ctx.tokens = &p.tokens + ctx.tokens = p.tokens ctx.apply(item) return ctx.result, ctx.err } @@ -112,11 +96,12 @@ func (p JSPointer) Get(item interface{}) (interface{}, error) { // Set applies the JSON pointer to the given item, and sets the // value accordingly. func (p JSPointer) Set(item interface{}, value interface{}) error { - var ctx matchCtx + ctx := getCtx() + defer releaseCtx(ctx) ctx.set = true ctx.raw = p.raw - ctx.tokens = &p.tokens + ctx.tokens = p.tokens ctx.setvalue = value ctx.apply(item) return ctx.err @@ -128,48 +113,25 @@ type matchCtx struct { result interface{} set bool setvalue interface{} - tokens *tokens + tokens []string } func (e ErrNotFound) Error() string { return "match to JSON pointer not found: " + e.Ptr } -type JSONGetter interface { - JSONGet(tok string) (interface{}, error) -} - var strType = reflect.TypeOf("") -var zeroval reflect.Value func (c *matchCtx) apply(item interface{}) { - if c.tokens.size() == 0 { + if len(c.tokens) == 0 { c.result = item return } + lastidx := len(c.tokens) - 1 node := item - lastidx := c.tokens.size() - 1 - for i := 0; i < c.tokens.size(); i++ { - token := c.tokens.get(i) - - if getter, ok := node.(JSONGetter); ok { - x, err := getter.JSONGet(token) - if err != nil { - c.err = ErrNotFound{Ptr: c.raw} - return - } - if i == lastidx { - c.result = x - return - } - node = x - continue - } + for tidx, token := range c.tokens { v := reflect.ValueOf(node) - - // Does this thing implement a JSONGet? - if v.Kind() == reflect.Ptr { v = v.Elem() } @@ -182,7 +144,7 @@ func (c *matchCtx) apply(item interface{}) { return } f := v.FieldByName(fn) - if i == lastidx { + if tidx == lastidx { if c.set { if !f.CanSet() { c.err = ErrCanNotSet @@ -214,12 +176,12 @@ func (c *matchCtx) apply(item interface{}) { vt = reflect.ValueOf(token) } n := v.MapIndex(vt) - if zeroval == n { + if (reflect.Value{}) == n { c.err = ErrNotFound{Ptr: c.raw} return } - if i == lastidx { + if tidx == lastidx { if c.set { v.SetMapIndex(vt, reflect.ValueOf(c.setvalue)) } else { @@ -242,7 +204,7 @@ func (c *matchCtx) apply(item interface{}) { return } - if i == lastidx { + if tidx == lastidx { if c.set { m[wantidx] = c.setvalue } else { diff --git a/vendor/github.com/lestrrat/go-jspointer/jspointer_test.go b/vendor/github.com/lestrrat/go-jspointer/jspointer_test.go index 50ba8e610d29..388c76261ee6 100644 --- a/vendor/github.com/lestrrat/go-jspointer/jspointer_test.go +++ b/vendor/github.com/lestrrat/go-jspointer/jspointer_test.go @@ -4,7 +4,7 @@ import ( "encoding/json" "testing" - "github.com/lestrrat-go/jspointer" + "github.com/lestrrat/go-jspointer" "github.com/stretchr/testify/assert" ) diff --git a/vendor/github.com/lestrrat/go-jsschema/.travis.yml b/vendor/github.com/lestrrat/go-jsschema/.travis.yml index a20c62692f00..ad9a6993d127 100644 --- a/vendor/github.com/lestrrat/go-jsschema/.travis.yml +++ b/vendor/github.com/lestrrat/go-jsschema/.travis.yml @@ -1,7 +1,7 @@ language: go sudo: false go: - - 1.11.x + - 1.7 - tip script: - go test -v ./... diff --git a/vendor/github.com/lestrrat/go-jsschema/README.md b/vendor/github.com/lestrrat/go-jsschema/README.md index 79031957e1dd..c3f78a31e776 100644 --- a/vendor/github.com/lestrrat/go-jsschema/README.md +++ b/vendor/github.com/lestrrat/go-jsschema/README.md @@ -1,8 +1,8 @@ # go-jsschema -[![Build Status](https://travis-ci.org/lestrrat-go/jsschema.svg?branch=master)](https://travis-ci.org/lestrrat-go/jsschema) +[![Build Status](https://travis-ci.org/lestrrat/go-jsschema.svg?branch=master)](https://travis-ci.org/lestrrat/go-jsschema) -[![GoDoc](https://godoc.org/github.com/lestrrat-go/jsschema?status.svg)](https://godoc.org/github.com/lestrrat-go/jsschema) +[![GoDoc](https://godoc.org/github.com/lestrrat/go-jsschema?status.svg)](https://godoc.org/github.com/lestrrat/go-jsschema) JSON Schema for Go @@ -14,8 +14,8 @@ package schema_test import ( "log" - "github.com/lestrrat-go/jsschema" - "github.com/lestrrat-go/jsschema/validator" + "github.com/lestrrat/go-jsschema" + "github.com/lestrrat/go-jsschema/validator" ) func Example() { @@ -47,13 +47,13 @@ This packages parses a JSON Schema file, and allows you to inspect, modify the schema, but does nothing more. If you want to validate using the JSON Schema that you read using this package, -look at [go-jsval](https://github.com/lestrrat-go/jsval), which allows you to +look at [go-jsval](https://github.com/lestrrat/go-jsval), which allows you to generate validators, so that you don't have to dynamically read in the JSON schema for each instance of your program. In the same lines, this package does not really care about loading external schemas from various locations (it's just easier to just gather all the schemas -in your local system). It *is* possible to do this via [go-jsref](https://github.com/lestrrat-go/jsref) +in your local system). It *is* possible to do this via [go-jsref](https://github.com/lestrrat/go-jsref) if you really want to do it. # BENCHMARKS @@ -86,7 +86,7 @@ PASS | Name | Notes | |:--------------------------------------------------------:|:---------------------------------| -| [go-jsval](https://github.com/lestrrat-go/jsval) | Validator generator | -| [go-jshschema](https://github.com/lestrrat-go/jshschema) | JSON Hyper Schema implementation | -| [go-jsref](https://github.com/lestrrat-go/jsref) | JSON Reference implementation | -| [go-jspointer](https://github.com/lestrrat-go/jspointer) | JSON Pointer implementations | +| [go-jsval](https://github.com/lestrrat/go-jsval) | Validator generator | +| [go-jshschema](https://github.com/lestrrat/go-jshschema) | JSON Hyper Schema implementation | +| [go-jsref](https://github.com/lestrrat/go-jsref) | JSON Reference implementation | +| [go-jspointer](https://github.com/lestrrat/go-jspointer) | JSON Pointer implementations | diff --git a/vendor/github.com/lestrrat/go-jsschema/benchmark_test.go b/vendor/github.com/lestrrat/go-jsschema/benchmark_test.go index df973b226d86..a774668f870a 100644 --- a/vendor/github.com/lestrrat/go-jsschema/benchmark_test.go +++ b/vendor/github.com/lestrrat/go-jsschema/benchmark_test.go @@ -6,8 +6,8 @@ import ( "strings" "testing" - schema "github.com/lestrrat-go/jsschema" - "github.com/lestrrat-go/jsschema/validator" + schema "github.com/lestrrat/go-jsschema" + "github.com/lestrrat/go-jsschema/validator" "github.com/xeipuuv/gojsonschema" ) diff --git a/vendor/github.com/lestrrat/go-jsschema/cmd/jsschema/jsschema.go b/vendor/github.com/lestrrat/go-jsschema/cmd/jsschema/jsschema.go index d1e39f97388f..e31a94aa335c 100644 --- a/vendor/github.com/lestrrat/go-jsschema/cmd/jsschema/jsschema.go +++ b/vendor/github.com/lestrrat/go-jsschema/cmd/jsschema/jsschema.go @@ -7,8 +7,8 @@ import ( "log" "os" - "github.com/lestrrat-go/jsschema" - "github.com/lestrrat-go/jsschema/validator" + "github.com/lestrrat/go-jsschema" + "github.com/lestrrat/go-jsschema/validator" ) func main() { diff --git a/vendor/github.com/lestrrat/go-jsschema/interface.go b/vendor/github.com/lestrrat/go-jsschema/interface.go index d47412b31531..416cfa7a7b09 100644 --- a/vendor/github.com/lestrrat/go-jsschema/interface.go +++ b/vendor/github.com/lestrrat/go-jsschema/interface.go @@ -5,7 +5,7 @@ import ( "regexp" "sync" - "github.com/lestrrat-go/jsref" + "github.com/lestrrat/go-jsref" ) const ( diff --git a/vendor/github.com/lestrrat/go-jsschema/marshal.go b/vendor/github.com/lestrrat/go-jsschema/marshal.go index 7a409cfb58e1..b08bc1c7d618 100644 --- a/vendor/github.com/lestrrat/go-jsschema/marshal.go +++ b/vendor/github.com/lestrrat/go-jsschema/marshal.go @@ -5,7 +5,7 @@ import ( "regexp" "strconv" - "github.com/lestrrat-go/pdebug" + "github.com/lestrrat/go-pdebug" "github.com/pkg/errors" ) diff --git a/vendor/github.com/lestrrat/go-jsschema/marshal_test.go b/vendor/github.com/lestrrat/go-jsschema/marshal_test.go index 584928bfed30..fc811d131711 100644 --- a/vendor/github.com/lestrrat/go-jsschema/marshal_test.go +++ b/vendor/github.com/lestrrat/go-jsschema/marshal_test.go @@ -5,8 +5,8 @@ import ( "strings" "testing" - "github.com/lestrrat-go/jsschema" - "github.com/lestrrat-go/jsschema/validator" + "github.com/lestrrat/go-jsschema" + "github.com/lestrrat/go-jsschema/validator" "github.com/stretchr/testify/assert" ) diff --git a/vendor/github.com/lestrrat/go-jsschema/schema.go b/vendor/github.com/lestrrat/go-jsschema/schema.go index 706ed7091292..8d2b2f9f7f37 100644 --- a/vendor/github.com/lestrrat/go-jsschema/schema.go +++ b/vendor/github.com/lestrrat/go-jsschema/schema.go @@ -8,9 +8,9 @@ import ( "reflect" "strconv" - "github.com/lestrrat-go/jsref" - "github.com/lestrrat-go/jsref/provider" - "github.com/lestrrat-go/pdebug" + "github.com/lestrrat/go-jsref" + "github.com/lestrrat/go-jsref/provider" + "github.com/lestrrat/go-pdebug" "github.com/pkg/errors" ) diff --git a/vendor/github.com/lestrrat/go-jsschema/schema_example_test.go b/vendor/github.com/lestrrat/go-jsschema/schema_example_test.go index cd1dab975a18..9f69f228035a 100644 --- a/vendor/github.com/lestrrat/go-jsschema/schema_example_test.go +++ b/vendor/github.com/lestrrat/go-jsschema/schema_example_test.go @@ -3,8 +3,8 @@ package schema_test import ( "log" - "github.com/lestrrat-go/jsschema" - "github.com/lestrrat-go/jsschema/validator" + "github.com/lestrrat/go-jsschema" + "github.com/lestrrat/go-jsschema/validator" ) func Example() { diff --git a/vendor/github.com/lestrrat/go-jsschema/schema_test.go b/vendor/github.com/lestrrat/go-jsschema/schema_test.go index 499e8d11b26d..8c55f728f739 100644 --- a/vendor/github.com/lestrrat/go-jsschema/schema_test.go +++ b/vendor/github.com/lestrrat/go-jsschema/schema_test.go @@ -8,8 +8,8 @@ import ( "strings" "testing" - "github.com/lestrrat-go/jsschema" - "github.com/lestrrat-go/jsschema/validator" + "github.com/lestrrat/go-jsschema" + "github.com/lestrrat/go-jsschema/validator" "github.com/stretchr/testify/assert" ) diff --git a/vendor/github.com/lestrrat/go-jsschema/validator/validator.go b/vendor/github.com/lestrrat/go-jsschema/validator/validator.go index 365f01427593..b1930617733d 100644 --- a/vendor/github.com/lestrrat/go-jsschema/validator/validator.go +++ b/vendor/github.com/lestrrat/go-jsschema/validator/validator.go @@ -3,9 +3,9 @@ package validator import ( "sync" - "github.com/lestrrat-go/jsschema" - "github.com/lestrrat-go/jsval" - "github.com/lestrrat-go/jsval/builder" + "github.com/lestrrat/go-jsschema" + "github.com/lestrrat/go-jsval" + "github.com/lestrrat/go-jsval/builder" "github.com/pkg/errors" ) diff --git a/vendor/github.com/miekg/dns/.codecov.yml b/vendor/github.com/miekg/dns/.codecov.yml deleted file mode 100644 index f91e5c1fe57c..000000000000 --- a/vendor/github.com/miekg/dns/.codecov.yml +++ /dev/null @@ -1,8 +0,0 @@ -coverage: - status: - project: - default: - target: 40% - threshold: null - patch: false - changes: false diff --git a/vendor/github.com/miekg/dns/.travis.yml b/vendor/github.com/miekg/dns/.travis.yml index 542dd68c0ffe..1f056ab7ccc4 100644 --- a/vendor/github.com/miekg/dns/.travis.yml +++ b/vendor/github.com/miekg/dns/.travis.yml @@ -1,20 +1,7 @@ language: go sudo: false go: - - 1.9.x - - tip - -env: - - TESTS="-race -v -bench=. -coverprofile=coverage.txt -covermode=atomic" - - TESTS="-race -v ./..." - -before_install: - # don't use the miekg/dns when testing forks - - mkdir -p $GOPATH/src/github.com/miekg - - ln -s $TRAVIS_BUILD_DIR $GOPATH/src/github.com/miekg/ || true - + - 1.5 + - 1.6 script: - - go test $TESTS - -after_success: - - bash <(curl -s https://codecov.io/bash) + - go test -race -v -bench=. diff --git a/vendor/github.com/miekg/dns/CONTRIBUTORS b/vendor/github.com/miekg/dns/CONTRIBUTORS index 5903779d81fa..f77e8a895f91 100644 --- a/vendor/github.com/miekg/dns/CONTRIBUTORS +++ b/vendor/github.com/miekg/dns/CONTRIBUTORS @@ -7,4 +7,3 @@ Marek Majkowski Peter van Dijk Omri Bahumi Alex Sergeyev -James Hartig diff --git a/vendor/github.com/miekg/dns/Gopkg.lock b/vendor/github.com/miekg/dns/Gopkg.lock deleted file mode 100644 index 0c73a64444f4..000000000000 --- a/vendor/github.com/miekg/dns/Gopkg.lock +++ /dev/null @@ -1,21 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - branch = "master" - name = "golang.org/x/crypto" - packages = ["ed25519","ed25519/internal/edwards25519"] - revision = "b080dc9a8c480b08e698fb1219160d598526310f" - -[[projects]] - branch = "master" - name = "golang.org/x/net" - packages = ["bpf","internal/iana","internal/socket","ipv4","ipv6"] - revision = "894f8ed5849b15b810ae41e9590a0d05395bba27" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - inputs-digest = "c4abc38abaeeeeb9be92455c9c02cae32841122b8982aaa067ef25bb8e86ff9d" - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/vendor/github.com/miekg/dns/Gopkg.toml b/vendor/github.com/miekg/dns/Gopkg.toml deleted file mode 100644 index 2f655b2c7b3f..000000000000 --- a/vendor/github.com/miekg/dns/Gopkg.toml +++ /dev/null @@ -1,26 +0,0 @@ - -# Gopkg.toml example -# -# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" - - -[[constraint]] - branch = "master" - name = "golang.org/x/crypto" diff --git a/vendor/github.com/miekg/dns/Makefile.fuzz b/vendor/github.com/miekg/dns/Makefile.fuzz deleted file mode 100644 index dc158c4acee1..000000000000 --- a/vendor/github.com/miekg/dns/Makefile.fuzz +++ /dev/null @@ -1,33 +0,0 @@ -# Makefile for fuzzing -# -# Use go-fuzz and needs the tools installed. -# See https://blog.cloudflare.com/dns-parser-meet-go-fuzzer/ -# -# Installing go-fuzz: -# $ make -f Makefile.fuzz get -# Installs: -# * github.com/dvyukov/go-fuzz/go-fuzz -# * get github.com/dvyukov/go-fuzz/go-fuzz-build - -all: build - -.PHONY: build -build: - go-fuzz-build -tags fuzz github.com/miekg/dns - -.PHONY: build-newrr -build-newrr: - go-fuzz-build -func FuzzNewRR -tags fuzz github.com/miekg/dns - -.PHONY: fuzz -fuzz: - go-fuzz -bin=dns-fuzz.zip -workdir=fuzz - -.PHONY: get -get: - go get github.com/dvyukov/go-fuzz/go-fuzz - go get github.com/dvyukov/go-fuzz/go-fuzz-build - -.PHONY: clean -clean: - rm *-fuzz.zip diff --git a/vendor/github.com/miekg/dns/Makefile.release b/vendor/github.com/miekg/dns/Makefile.release deleted file mode 100644 index 8fb748e8aaef..000000000000 --- a/vendor/github.com/miekg/dns/Makefile.release +++ /dev/null @@ -1,52 +0,0 @@ -# Makefile for releasing. -# -# The release is controlled from version.go. The version found there is -# used to tag the git repo, we're not building any artifects so there is nothing -# to upload to github. -# -# * Up the version in version.go -# * Run: make -f Makefile.release release -# * will *commit* your change with 'Release $VERSION' -# * push to github -# - -define GO -//+build ignore - -package main - -import ( - "fmt" - - "github.com/miekg/dns" -) - -func main() { - fmt.Println(dns.Version.String()) -} -endef - -$(file > version_release.go,$(GO)) -VERSION:=$(shell go run version_release.go) -TAG="v$(VERSION)" - -all: - @echo Use the \'release\' target to start a release $(VERSION) - rm -f version_release.go - -.PHONY: release -release: commit push - @echo Released $(VERSION) - rm -f version_release.go - -.PHONY: commit -commit: - @echo Committing release $(VERSION) - git commit -am"Release $(VERSION)" - git tag $(TAG) - -.PHONY: push -push: - @echo Pushing release $(VERSION) to master - git push --tags - git push diff --git a/vendor/github.com/miekg/dns/README.md b/vendor/github.com/miekg/dns/README.md index 1ad23c75161d..83b4183eb80d 100644 --- a/vendor/github.com/miekg/dns/README.md +++ b/vendor/github.com/miekg/dns/README.md @@ -1,31 +1,29 @@ [![Build Status](https://travis-ci.org/miekg/dns.svg?branch=master)](https://travis-ci.org/miekg/dns) -[![Code Coverage](https://img.shields.io/codecov/c/github/miekg/dns/master.svg)](https://codecov.io/github/miekg/dns?branch=master) -[![Go Report Card](https://goreportcard.com/badge/github.com/miekg/dns)](https://goreportcard.com/report/miekg/dns) -[![](https://godoc.org/github.com/miekg/dns?status.svg)](https://godoc.org/github.com/miekg/dns) # Alternative (more granular) approach to a DNS library > Less is more. -Complete and usable DNS library. All widely used Resource Records are supported, including the -DNSSEC types. It follows a lean and mean philosophy. If there is stuff you should know as a DNS -programmer there isn't a convenience function for it. Server side and client side programming is -supported, i.e. you can build servers and resolvers with it. +Complete and usable DNS library. All widely used Resource Records are +supported, including the DNSSEC types. It follows a lean and mean philosophy. +If there is stuff you should know as a DNS programmer there isn't a convenience +function for it. Server side and client side programming is supported, i.e. you +can build servers and resolvers with it. -We try to keep the "master" branch as sane as possible and at the bleeding edge of standards, -avoiding breaking changes wherever reasonable. We support the last two versions of Go. +We try to keep the "master" branch as sane as possible and at the bleeding edge +of standards, avoiding breaking changes wherever reasonable. We support the last +two versions of Go, currently: 1.5 and 1.6. # Goals * KISS; * Fast; -* Small API. If it's easy to code in Go, don't make a function for it. +* Small API, if its easy to code in Go, don't make a function for it. # Users A not-so-up-to-date-list-that-may-be-actually-current: -* https://github.com/coredns/coredns * https://cloudflare.com * https://github.com/abh/geodns * http://www.statdns.com/ @@ -52,18 +50,6 @@ A not-so-up-to-date-list-that-may-be-actually-current: * https://dnslookup.org * https://github.com/looterz/grimd * https://github.com/phamhongviet/serf-dns -* https://github.com/mehrdadrad/mylg -* https://github.com/bamarni/dockness -* https://github.com/fffaraz/microdns -* http://kelda.io -* https://github.com/ipdcode/hades (JD.COM) -* https://github.com/StackExchange/dnscontrol/ -* https://www.dnsperf.com/ -* https://dnssectest.net/ -* https://dns.apebits.com -* https://github.com/oif/apex -* https://github.com/jedisct1/dnscrypt-proxy -* https://github.com/jedisct1/rpdns Send pull request if you want to be listed here. @@ -76,7 +62,7 @@ Send pull request if you want to be listed here. * Parsing RRs ~ 100K RR/s, that's 5M records in about 50 seconds; * Server side programming (mimicking the net/http package); * Client side programming; -* DNSSEC: signing, validating and key generation for DSA, RSA, ECDSA and Ed25519; +* DNSSEC: signing, validating and key generation for DSA, RSA and ECDSA; * EDNS0, NSID, Cookies; * AXFR/IXFR; * TSIG, SIG(0); @@ -90,8 +76,8 @@ Miek Gieben - 2010-2012 - # Building -Building is done with the `go` tool. If you have setup your GOPATH correctly, the following should -work: +Building is done with the `go` tool. If you have setup your GOPATH +correctly, the following should work: go get github.com/miekg/dns go build github.com/miekg/dns @@ -152,13 +138,10 @@ Example programs can be found in the `github.com/miekg/exdns` repository. * 6975 - Algorithm Understanding in DNSSEC * 7043 - EUI48/EUI64 records * 7314 - DNS (EDNS) EXPIRE Option -* 7477 - CSYNC RR -* 7828 - edns-tcp-keepalive EDNS0 Option * 7553 - URI record -* 7858 - DNS over TLS: Initiation and Performance Considerations -* 7871 - EDNS0 Client Subnet +* 7858 - DNS over TLS: Initiation and Performance Considerations (draft) * 7873 - Domain Name System (DNS) Cookies (draft-ietf-dnsop-cookies) -* 8080 - EdDSA for DNSSEC +* xxxx - EDNS0 DNS Update Lease (draft) ## Loosely based upon diff --git a/vendor/github.com/miekg/dns/client.go b/vendor/github.com/miekg/dns/client.go index dd6b512afb66..1302e4e04c6d 100644 --- a/vendor/github.com/miekg/dns/client.go +++ b/vendor/github.com/miekg/dns/client.go @@ -4,62 +4,132 @@ package dns import ( "bytes" - "context" "crypto/tls" "encoding/binary" - "fmt" "io" - "io/ioutil" "net" - "net/http" - "strings" "time" ) -const ( - dnsTimeout time.Duration = 2 * time.Second - tcpIdleTimeout time.Duration = 8 * time.Second - - dohMimeType = "application/dns-message" -) +const dnsTimeout time.Duration = 2 * time.Second +const tcpIdleTimeout time.Duration = 8 * time.Second // A Conn represents a connection to a DNS server. type Conn struct { net.Conn // a net.Conn holding the connection UDPSize uint16 // minimum receive buffer for UDP messages - TsigSecret map[string]string // secret(s) for Tsig map[], zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2) + TsigSecret map[string]string // secret(s) for Tsig map[], zonename must be fully qualified + rtt time.Duration + t time.Time tsigRequestMAC string } // A Client defines parameters for a DNS client. type Client struct { - Net string // if "tcp" or "tcp-tls" (DNS over TLS) a TCP query will be initiated, otherwise an UDP one (default is "" for UDP) - UDPSize uint16 // minimum receive buffer for UDP messages - TLSConfig *tls.Config // TLS connection configuration - Dialer *net.Dialer // a net.Dialer used to set local address, timeouts and more - // Timeout is a cumulative timeout for dial, write and read, defaults to 0 (disabled) - overrides DialTimeout, ReadTimeout, - // WriteTimeout when non-zero. Can be overridden with net.Dialer.Timeout (see Client.ExchangeWithDialer and - // Client.Dialer) or context.Context.Deadline (see the deprecated ExchangeContext) - Timeout time.Duration - DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds, or net.Dialer.Timeout if expiring earlier - overridden by Timeout when that value is non-zero + Net string // if "tcp" or "tcp-tls" (DNS over TLS) a TCP query will be initiated, otherwise an UDP one (default is "" for UDP) + UDPSize uint16 // minimum receive buffer for UDP messages + TLSConfig *tls.Config // TLS connection configuration + Timeout time.Duration // a cumulative timeout for dial, write and read, defaults to 0 (disabled) - overrides DialTimeout, ReadTimeout and WriteTimeout when non-zero + DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds - overridden by Timeout when that value is non-zero ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero - HTTPClient *http.Client // The http.Client to use for DNS-over-HTTPS - TsigSecret map[string]string // secret(s) for Tsig map[], zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2) + TsigSecret map[string]string // secret(s) for Tsig map[], zonename must be fully qualified SingleInflight bool // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass group singleflight } // Exchange performs a synchronous UDP query. It sends the message m to the address -// contained in a and waits for a reply. Exchange does not retry a failed query, nor +// contained in a and waits for an reply. Exchange does not retry a failed query, nor // will it fall back to TCP in case of truncation. // See client.Exchange for more information on setting larger buffer sizes. func Exchange(m *Msg, a string) (r *Msg, err error) { - client := Client{Net: "udp"} - r, _, err = client.Exchange(m, a) + var co *Conn + co, err = DialTimeout("udp", a, dnsTimeout) + if err != nil { + return nil, err + } + + defer co.Close() + + opt := m.IsEdns0() + // If EDNS0 is used use that for size. + if opt != nil && opt.UDPSize() >= MinMsgSize { + co.UDPSize = opt.UDPSize() + } + + co.SetWriteDeadline(time.Now().Add(dnsTimeout)) + if err = co.WriteMsg(m); err != nil { + return nil, err + } + + co.SetReadDeadline(time.Now().Add(dnsTimeout)) + r, err = co.ReadMsg() + if err == nil && r.Id != m.Id { + err = ErrId + } return r, err } +// ExchangeConn performs a synchronous query. It sends the message m via the connection +// c and waits for a reply. The connection c is not closed by ExchangeConn. +// This function is going away, but can easily be mimicked: +// +// co := &dns.Conn{Conn: c} // c is your net.Conn +// co.WriteMsg(m) +// in, _ := co.ReadMsg() +// co.Close() +// +func ExchangeConn(c net.Conn, m *Msg) (r *Msg, err error) { + println("dns: this function is deprecated") + co := new(Conn) + co.Conn = c + if err = co.WriteMsg(m); err != nil { + return nil, err + } + r, err = co.ReadMsg() + if err == nil && r.Id != m.Id { + err = ErrId + } + return r, err +} + +// Exchange performs an synchronous query. It sends the message m to the address +// contained in a and waits for an reply. Basic use pattern with a *dns.Client: +// +// c := new(dns.Client) +// in, rtt, err := c.Exchange(message, "127.0.0.1:53") +// +// Exchange does not retry a failed query, nor will it fall back to TCP in +// case of truncation. +// It is up to the caller to create a message that allows for larger responses to be +// returned. Specifically this means adding an EDNS0 OPT RR that will advertise a larger +// buffer, see SetEdns0. Messsages without an OPT RR will fallback to the historic limit +// of 512 bytes. +func (c *Client) Exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) { + if !c.SingleInflight { + return c.exchange(m, a) + } + // This adds a bunch of garbage, TODO(miek). + t := "nop" + if t1, ok := TypeToString[m.Question[0].Qtype]; ok { + t = t1 + } + cl := "nop" + if cl1, ok := ClassToString[m.Question[0].Qclass]; ok { + cl = cl1 + } + r, rtt, err, shared := c.group.Do(m.Question[0].Name+t+cl, func() (*Msg, time.Duration, error) { + return c.exchange(m, a) + }) + if err != nil { + return r, rtt, err + } + if shared { + return r.Copy(), rtt, nil + } + return r, rtt, nil +} + func (c *Client) dialTimeout() time.Duration { if c.Timeout != 0 { return c.Timeout @@ -84,97 +154,37 @@ func (c *Client) writeTimeout() time.Duration { return dnsTimeout } -// Dial connects to the address on the named network. -func (c *Client) Dial(address string) (conn *Conn, err error) { - // create a new dialer with the appropriate timeout - var d net.Dialer - if c.Dialer == nil { - d = net.Dialer{Timeout:c.getTimeoutForRequest(c.dialTimeout())} - } else { - d = net.Dialer(*c.Dialer) - } - +func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) { + var co *Conn network := "udp" - useTLS := false + tls := false switch c.Net { case "tcp-tls": network = "tcp" - useTLS = true + tls = true case "tcp4-tls": network = "tcp4" - useTLS = true + tls = true case "tcp6-tls": network = "tcp6" - useTLS = true + tls = true default: if c.Net != "" { network = c.Net } } - conn = new(Conn) - if useTLS { - conn.Conn, err = tls.DialWithDialer(&d, network, address, c.TLSConfig) - } else { - conn.Conn, err = d.Dial(network, address) - } - if err != nil { - return nil, err - } - return conn, nil -} - -// Exchange performs a synchronous query. It sends the message m to the address -// contained in a and waits for a reply. Basic use pattern with a *dns.Client: -// -// c := new(dns.Client) -// in, rtt, err := c.Exchange(message, "127.0.0.1:53") -// -// Exchange does not retry a failed query, nor will it fall back to TCP in -// case of truncation. -// It is up to the caller to create a message that allows for larger responses to be -// returned. Specifically this means adding an EDNS0 OPT RR that will advertise a larger -// buffer, see SetEdns0. Messages without an OPT RR will fallback to the historic limit -// of 512 bytes -// To specify a local address or a timeout, the caller has to set the `Client.Dialer` -// attribute appropriately -func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, err error) { - if !c.SingleInflight { - if c.Net == "https" { - // TODO(tmthrgd): pipe timeouts into exchangeDOH - return c.exchangeDOH(context.TODO(), m, address) - } - - return c.exchange(m, address) - } - - t := "nop" - if t1, ok := TypeToString[m.Question[0].Qtype]; ok { - t = t1 - } - cl := "nop" - if cl1, ok := ClassToString[m.Question[0].Qclass]; ok { - cl = cl1 + var deadline time.Time + if c.Timeout != 0 { + deadline = time.Now().Add(c.Timeout) } - r, rtt, err, shared := c.group.Do(m.Question[0].Name+t+cl, func() (*Msg, time.Duration, error) { - if c.Net == "https" { - // TODO(tmthrgd): pipe timeouts into exchangeDOH - return c.exchangeDOH(context.TODO(), m, address) - } - return c.exchange(m, address) - }) - if r != nil && shared { - r = r.Copy() + if tls { + co, err = DialTimeoutWithTLS(network, a, c.TLSConfig, c.dialTimeout()) + } else { + co, err = DialTimeout(network, a, c.dialTimeout()) } - return r, rtt, err -} - -func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) { - var co *Conn - - co, err = c.Dial(a) if err != nil { return nil, 0, err @@ -192,88 +202,22 @@ func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err erro } co.TsigSecret = c.TsigSecret - t := time.Now() - // write with the appropriate write timeout - co.SetWriteDeadline(t.Add(c.getTimeoutForRequest(c.writeTimeout()))) + co.SetWriteDeadline(deadlineOrTimeout(deadline, c.writeTimeout())) if err = co.WriteMsg(m); err != nil { return nil, 0, err } - co.SetReadDeadline(time.Now().Add(c.getTimeoutForRequest(c.readTimeout()))) + co.SetReadDeadline(deadlineOrTimeout(deadline, c.readTimeout())) r, err = co.ReadMsg() if err == nil && r.Id != m.Id { err = ErrId } - rtt = time.Since(t) - return r, rtt, err -} - -func (c *Client) exchangeDOH(ctx context.Context, m *Msg, a string) (r *Msg, rtt time.Duration, err error) { - p, err := m.Pack() - if err != nil { - return nil, 0, err - } - - req, err := http.NewRequest(http.MethodPost, a, bytes.NewReader(p)) - if err != nil { - return nil, 0, err - } - - req.Header.Set("Content-Type", dohMimeType) - req.Header.Set("Accept", dohMimeType) - - hc := http.DefaultClient - if c.HTTPClient != nil { - hc = c.HTTPClient - } - - if ctx != context.Background() && ctx != context.TODO() { - req = req.WithContext(ctx) - } - - t := time.Now() - - resp, err := hc.Do(req) - if err != nil { - return nil, 0, err - } - defer closeHTTPBody(resp.Body) - - if resp.StatusCode != http.StatusOK { - return nil, 0, fmt.Errorf("dns: server returned HTTP %d error: %q", resp.StatusCode, resp.Status) - } - - if ct := resp.Header.Get("Content-Type"); ct != dohMimeType { - return nil, 0, fmt.Errorf("dns: unexpected Content-Type %q; expected %q", ct, dohMimeType) - } - - p, err = ioutil.ReadAll(resp.Body) - if err != nil { - return nil, 0, err - } - - rtt = time.Since(t) - - r = new(Msg) - if err := r.Unpack(p); err != nil { - return r, 0, err - } - - // TODO: TSIG? Is it even supported over DoH? - - return r, rtt, nil -} - -func closeHTTPBody(r io.ReadCloser) error { - io.Copy(ioutil.Discard, io.LimitReader(r, 8<<20)) - return r.Close() + return r, co.rtt, err } // ReadMsg reads a message from the connection co. -// If the received message contains a TSIG record the transaction signature -// is verified. This method always tries to return the message, however if an -// error is returned there are no guarantees that the returned message is a -// valid representation of the packet read. +// If the received message contains a TSIG record the transaction +// signature is verified. func (co *Conn) ReadMsg() (*Msg, error) { p, err := co.ReadMsgHeader(nil) if err != nil { @@ -282,10 +226,13 @@ func (co *Conn) ReadMsg() (*Msg, error) { m := new(Msg) if err := m.Unpack(p); err != nil { - // If an error was returned, we still want to allow the user to use + // If ErrTruncated was returned, we still want to allow the user to use // the message, but naively they can just check err if they don't want - // to use an erroneous message - return m, err + // to use a truncated message + if err == ErrTruncated { + return m, err + } + return nil, err } if t := m.IsTsig(); t != nil { if _, ok := co.TsigSecret[t.Hdr.Name]; !ok { @@ -318,6 +265,7 @@ func (co *Conn) ReadMsgHeader(hdr *Header) ([]byte, error) { } p = make([]byte, l) n, err = tcpRead(r, p) + co.rtt = time.Since(co.t) default: if co.UDPSize > MinMsgSize { p = make([]byte, co.UDPSize) @@ -325,6 +273,7 @@ func (co *Conn) ReadMsgHeader(hdr *Header) ([]byte, error) { p = make([]byte, MinMsgSize) } n, err = co.Read(p) + co.rtt = time.Since(co.t) } if err != nil { @@ -351,18 +300,6 @@ func tcpMsgLen(t io.Reader) (int, error) { if err != nil { return 0, err } - - // As seen with my local router/switch, returns 1 byte on the above read, - // resulting a a ShortRead. Just write it out (instead of loop) and read the - // other byte. - if n == 1 { - n1, err := t.Read(p[1:]) - if err != nil { - return 0, err - } - n += n1 - } - if n != 2 { return 0, ErrShortRead } @@ -437,6 +374,7 @@ func (co *Conn) WriteMsg(m *Msg) (err error) { if err != nil { return err } + co.t = time.Now() if _, err = co.Write(out); err != nil { return err } @@ -462,28 +400,10 @@ func (co *Conn) Write(p []byte) (n int, err error) { n, err := io.Copy(w, bytes.NewReader(p)) return int(n), err } - n, err = co.Conn.Write(p) + n, err = co.Conn.(*net.UDPConn).Write(p) return n, err } -// Return the appropriate timeout for a specific request -func (c *Client) getTimeoutForRequest(timeout time.Duration) time.Duration { - var requestTimeout time.Duration - if c.Timeout != 0 { - requestTimeout = c.Timeout - } else { - requestTimeout = timeout - } - // net.Dialer.Timeout has priority if smaller than the timeouts computed so - // far - if c.Dialer != nil && c.Dialer.Timeout != 0 { - if c.Dialer.Timeout < requestTimeout { - requestTimeout = c.Dialer.Timeout - } - } - return requestTimeout -} - // Dial connects to the address on the named network. func Dial(network, address string) (conn *Conn, err error) { conn = new(Conn) @@ -494,43 +414,10 @@ func Dial(network, address string) (conn *Conn, err error) { return conn, nil } -// ExchangeContext performs a synchronous UDP query, like Exchange. It -// additionally obeys deadlines from the passed Context. -func ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, err error) { - client := Client{Net: "udp"} - r, _, err = client.ExchangeContext(ctx, m, a) - // ignorint rtt to leave the original ExchangeContext API unchanged, but - // this function will go away - return r, err -} - -// ExchangeConn performs a synchronous query. It sends the message m via the connection -// c and waits for a reply. The connection c is not closed by ExchangeConn. -// This function is going away, but can easily be mimicked: -// -// co := &dns.Conn{Conn: c} // c is your net.Conn -// co.WriteMsg(m) -// in, _ := co.ReadMsg() -// co.Close() -// -func ExchangeConn(c net.Conn, m *Msg) (r *Msg, err error) { - println("dns: ExchangeConn: this function is deprecated") - co := new(Conn) - co.Conn = c - if err = co.WriteMsg(m); err != nil { - return nil, err - } - r, err = co.ReadMsg() - if err == nil && r.Id != m.Id { - err = ErrId - } - return r, err -} - // DialTimeout acts like Dial but takes a timeout. func DialTimeout(network, address string, timeout time.Duration) (conn *Conn, err error) { - client := Client{Net: network, Dialer: &net.Dialer{Timeout: timeout}} - conn, err = client.Dial(address) + conn = new(Conn) + conn.Conn, err = net.DialTimeout(network, address, timeout) if err != nil { return nil, err } @@ -539,12 +426,8 @@ func DialTimeout(network, address string, timeout time.Duration) (conn *Conn, er // DialWithTLS connects to the address on the named network with TLS. func DialWithTLS(network, address string, tlsConfig *tls.Config) (conn *Conn, err error) { - if !strings.HasSuffix(network, "-tls") { - network += "-tls" - } - client := Client{Net: network, TLSConfig: tlsConfig} - conn, err = client.Dial(address) - + conn = new(Conn) + conn.Conn, err = tls.Dial(network, address, tlsConfig) if err != nil { return nil, err } @@ -553,34 +436,20 @@ func DialWithTLS(network, address string, tlsConfig *tls.Config) (conn *Conn, er // DialTimeoutWithTLS acts like DialWithTLS but takes a timeout. func DialTimeoutWithTLS(network, address string, tlsConfig *tls.Config, timeout time.Duration) (conn *Conn, err error) { - if !strings.HasSuffix(network, "-tls") { - network += "-tls" - } - client := Client{Net: network, Dialer: &net.Dialer{Timeout: timeout}, TLSConfig: tlsConfig} - conn, err = client.Dial(address) + var dialer net.Dialer + dialer.Timeout = timeout + + conn = new(Conn) + conn.Conn, err = tls.DialWithDialer(&dialer, network, address, tlsConfig) if err != nil { return nil, err } return conn, nil } -// ExchangeContext acts like Exchange, but honors the deadline on the provided -// context, if present. If there is both a context deadline and a configured -// timeout on the client, the earliest of the two takes effect. -func (c *Client) ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, rtt time.Duration, err error) { - if !c.SingleInflight && c.Net == "https" { - return c.exchangeDOH(ctx, m, a) - } - - var timeout time.Duration - if deadline, ok := ctx.Deadline(); !ok { - timeout = 0 - } else { - timeout = deadline.Sub(time.Now()) +func deadlineOrTimeout(deadline time.Time, timeout time.Duration) time.Time { + if deadline.IsZero() { + return time.Now().Add(timeout) } - // not passing the context to the underlying calls, as the API does not support - // context. For timeouts you should set up Client.Dialer and call Client.Exchange. - // TODO(tmthrgd): this is a race condition - c.Dialer = &net.Dialer{Timeout: timeout} - return c.Exchange(m, a) + return deadline } diff --git a/vendor/github.com/miekg/dns/client_test.go b/vendor/github.com/miekg/dns/client_test.go index 020e141bb9a3..850bcfcda61e 100644 --- a/vendor/github.com/miekg/dns/client_test.go +++ b/vendor/github.com/miekg/dns/client_test.go @@ -1,45 +1,19 @@ package dns import ( - "context" "crypto/tls" "fmt" "net" "strconv" - "strings" - "sync" "testing" "time" ) -func TestDialUDP(t *testing.T) { - HandleFunc("miek.nl.", HelloServer) - defer HandleRemove("miek.nl.") - - s, addrstr, err := RunLocalUDPServer(":0") - if err != nil { - t.Fatalf("unable to run test server: %v", err) - } - defer s.Shutdown() - - m := new(Msg) - m.SetQuestion("miek.nl.", TypeSOA) - - c := new(Client) - conn, err := c.Dial(addrstr) - if err != nil { - t.Fatalf("failed to dial: %v", err) - } - if conn == nil { - t.Fatalf("conn is nil") - } -} - func TestClientSync(t *testing.T) { HandleFunc("miek.nl.", HelloServer) defer HandleRemove("miek.nl.") - s, addrstr, err := RunLocalUDPServer(":0") + s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") if err != nil { t.Fatalf("unable to run test server: %v", err) } @@ -51,12 +25,9 @@ func TestClientSync(t *testing.T) { c := new(Client) r, _, err := c.Exchange(m, addrstr) if err != nil { - t.Fatalf("failed to exchange: %v", err) - } - if r == nil { - t.Fatal("response is nil") + t.Errorf("failed to exchange: %v", err) } - if r.Rcode != RcodeSuccess { + if r != nil && r.Rcode != RcodeSuccess { t.Errorf("failed to get an valid answer\n%v", r) } // And now with plain Exchange(). @@ -69,42 +40,7 @@ func TestClientSync(t *testing.T) { } } -func TestClientLocalAddress(t *testing.T) { - HandleFunc("miek.nl.", HelloServerEchoAddrPort) - defer HandleRemove("miek.nl.") - - s, addrstr, err := RunLocalUDPServer(":0") - if err != nil { - t.Fatalf("unable to run test server: %v", err) - } - defer s.Shutdown() - - m := new(Msg) - m.SetQuestion("miek.nl.", TypeSOA) - - c := new(Client) - laddr := net.UDPAddr{IP: net.ParseIP("0.0.0.0"), Port: 12345, Zone: ""} - c.Dialer = &net.Dialer{LocalAddr: &laddr} - r, _, err := c.Exchange(m, addrstr) - if err != nil { - t.Fatalf("failed to exchange: %v", err) - } - if r != nil && r.Rcode != RcodeSuccess { - t.Errorf("failed to get an valid answer\n%v", r) - } - if len(r.Extra) != 1 { - t.Errorf("failed to get additional answers\n%v", r) - } - txt := r.Extra[0].(*TXT) - if txt == nil { - t.Errorf("invalid TXT response\n%v", txt) - } - if len(txt.Txt) != 1 || !strings.Contains(txt.Txt[0], ":12345") { - t.Errorf("invalid TXT response\n%v", txt.Txt) - } -} - -func TestClientTLSSyncV4(t *testing.T) { +func TestClientTLSSync(t *testing.T) { HandleFunc("miek.nl.", HelloServer) defer HandleRemove("miek.nl.") @@ -117,7 +53,7 @@ func TestClientTLSSyncV4(t *testing.T) { Certificates: []tls.Certificate{cert}, } - s, addrstr, err := RunLocalTLSServer(":0", &config) + s, addrstr, err := RunLocalTLSServer("127.0.0.1:0", &config) if err != nil { t.Fatalf("unable to run test server: %v", err) } @@ -127,8 +63,6 @@ func TestClientTLSSyncV4(t *testing.T) { m.SetQuestion("miek.nl.", TypeSOA) c := new(Client) - - // test tcp-tls c.Net = "tcp-tls" c.TLSConfig = &tls.Config{ InsecureSkipVerify: true, @@ -136,38 +70,18 @@ func TestClientTLSSyncV4(t *testing.T) { r, _, err := c.Exchange(m, addrstr) if err != nil { - t.Fatalf("failed to exchange: %v", err) - } - if r == nil { - t.Fatal("response is nil") - } - if r.Rcode != RcodeSuccess { - t.Errorf("failed to get an valid answer\n%v", r) - } - - // test tcp4-tls - c.Net = "tcp4-tls" - c.TLSConfig = &tls.Config{ - InsecureSkipVerify: true, - } - - r, _, err = c.Exchange(m, addrstr) - if err != nil { - t.Fatalf("failed to exchange: %v", err) - } - if r == nil { - t.Fatal("response is nil") + t.Errorf("failed to exchange: %v", err) } - if r.Rcode != RcodeSuccess { + if r != nil && r.Rcode != RcodeSuccess { t.Errorf("failed to get an valid answer\n%v", r) } } -func TestClientSyncBadID(t *testing.T) { - HandleFunc("miek.nl.", HelloServerBadID) +func TestClientSyncBadId(t *testing.T) { + HandleFunc("miek.nl.", HelloServerBadId) defer HandleRemove("miek.nl.") - s, addrstr, err := RunLocalUDPServer(":0") + s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") if err != nil { t.Fatalf("unable to run test server: %v", err) } @@ -190,7 +104,7 @@ func TestClientEDNS0(t *testing.T) { HandleFunc("miek.nl.", HelloServer) defer HandleRemove("miek.nl.") - s, addrstr, err := RunLocalUDPServer(":0") + s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") if err != nil { t.Fatalf("unable to run test server: %v", err) } @@ -204,11 +118,11 @@ func TestClientEDNS0(t *testing.T) { c := new(Client) r, _, err := c.Exchange(m, addrstr) if err != nil { - t.Fatalf("failed to exchange: %v", err) + t.Errorf("failed to exchange: %v", err) } if r != nil && r.Rcode != RcodeSuccess { - t.Errorf("failed to get a valid answer\n%v", r) + t.Errorf("failed to get an valid answer\n%v", r) } } @@ -237,7 +151,7 @@ func TestClientEDNS0Local(t *testing.T) { HandleFunc("miek.nl.", handler) defer HandleRemove("miek.nl.") - s, addrstr, err := RunLocalUDPServer(":0") + s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") if err != nil { t.Fatalf("unable to run test server: %s", err) } @@ -255,14 +169,12 @@ func TestClientEDNS0Local(t *testing.T) { c := new(Client) r, _, err := c.Exchange(m, addrstr) if err != nil { - t.Fatalf("failed to exchange: %s", err) + t.Errorf("failed to exchange: %s", err) } - if r == nil { - t.Fatal("response is nil") - } - if r.Rcode != RcodeSuccess { - t.Fatal("failed to get a valid answer") + if r != nil && r.Rcode != RcodeSuccess { + t.Error("failed to get a valid answer") + t.Logf("%v\n", r) } txt := r.Extra[0].(*TXT).Txt[0] @@ -274,11 +186,41 @@ func TestClientEDNS0Local(t *testing.T) { got := r.Extra[1].(*OPT).Option[0].(*EDNS0_LOCAL).String() if got != optStr1 { t.Errorf("failed to get local edns0 answer; got %s, expected %s", got, optStr1) + t.Logf("%v\n", r) } got = r.Extra[1].(*OPT).Option[1].(*EDNS0_LOCAL).String() if got != optStr2 { t.Errorf("failed to get local edns0 answer; got %s, expected %s", got, optStr2) + t.Logf("%v\n", r) + } +} + +// ExampleTsigSecret_updateLeaseTSIG shows how to update a lease signed with TSIG +func ExampleTsigSecret_updateLeaseTSIG() { + m := new(Msg) + m.SetUpdate("t.local.ip6.io.") + rr, _ := NewRR("t.local.ip6.io. 30 A 127.0.0.1") + rrs := make([]RR, 1) + rrs[0] = rr + m.Insert(rrs) + + leaseRr := new(OPT) + leaseRr.Hdr.Name = "." + leaseRr.Hdr.Rrtype = TypeOPT + e := new(EDNS0_UL) + e.Code = EDNS0UL + e.Lease = 120 + leaseRr.Option = append(leaseRr.Option, e) + m.Extra = append(m.Extra, leaseRr) + + c := new(Client) + m.SetTsig("polvi.", HmacMD5, 300, time.Now().Unix()) + c.TsigSecret = map[string]string{"polvi.": "pRZgBrBvI4NAHZYhxmhs/Q=="} + + _, _, err := c.Exchange(m, "127.0.0.1:53") + if err != nil { + panic(err) } } @@ -287,7 +229,7 @@ func TestClientConn(t *testing.T) { defer HandleRemove("miek.nl.") // This uses TCP just to make it slightly different than TestClientSync - s, addrstr, err := RunLocalTCPServer(":0") + s, addrstr, err := RunLocalTCPServer("127.0.0.1:0") if err != nil { t.Fatalf("unable to run test server: %v", err) } @@ -306,9 +248,6 @@ func TestClientConn(t *testing.T) { t.Errorf("failed to exchange: %v", err) } r, err := cn.ReadMsg() - if err != nil { - t.Errorf("failed to get a valid answer: %v", err) - } if r == nil || r.Rcode != RcodeSuccess { t.Errorf("failed to get an valid answer\n%v", r) } @@ -322,9 +261,6 @@ func TestClientConn(t *testing.T) { if buf == nil { t.Errorf("failed to get an valid answer\n%v", r) } - if err != nil { - t.Errorf("failed to get a valid answer: %v", err) - } if int(h.Bits&0xF) != RcodeSuccess { t.Errorf("failed to get an valid answer in ReadMsgHeader\n%v", r) } @@ -458,18 +394,18 @@ func TestTruncatedMsg(t *testing.T) { t.Errorf("error should not be ErrTruncated from question cutoff unpack: %v", err) } - // Finally, if we only have the header, we don't return an error. + // Finally, if we only have the header, we should still return an error buf1 = buf[:12] r = new(Msg) - if err = r.Unpack(buf1); err != nil { - t.Errorf("from header-only unpack should not return an error: %v", err) + if err = r.Unpack(buf1); err == nil || err != ErrTruncated { + t.Errorf("error not ErrTruncated from header-only unpack: %v", err) } } func TestTimeout(t *testing.T) { // Set up a dummy UDP server that won't respond - addr, err := net.ResolveUDPAddr("udp", ":0") + addr, err := net.ResolveUDPAddr("udp", "127.0.0.1:0") if err != nil { t.Fatalf("unable to resolve local udp address: %v", err) } @@ -486,7 +422,7 @@ func TestTimeout(t *testing.T) { // Use a channel + timeout to ensure we don't get stuck if the // Client Timeout is not working properly - done := make(chan struct{}, 2) + done := make(chan struct{}) timeout := time.Millisecond allowable := timeout + (10 * time.Millisecond) @@ -498,115 +434,19 @@ func TestTimeout(t *testing.T) { c := &Client{Timeout: timeout} _, _, err := c.Exchange(m, addrstr) if err == nil { - t.Error("no timeout using Client.Exchange") + t.Error("no timeout using Client") } done <- struct{}{} }() - go func() { - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() - c := &Client{} - _, _, err := c.ExchangeContext(ctx, m, addrstr) - if err == nil { - t.Error("no timeout using Client.ExchangeContext") - } - done <- struct{}{} - }() - - // Wait for both the Exchange and ExchangeContext tests to be done. - for i := 0; i < 2; i++ { - select { - case <-done: - case <-time.After(abortAfter): - } + select { + case <-done: + case <-time.After(abortAfter): } length := time.Since(start) if length > allowable { - t.Errorf("exchange took longer %v than specified Timeout %v", length, allowable) - } -} - -// Check that responses from deduplicated requests aren't shared between callers -func TestConcurrentExchanges(t *testing.T) { - cases := make([]*Msg, 2) - cases[0] = new(Msg) - cases[1] = new(Msg) - cases[1].Truncated = true - for _, m := range cases { - block := make(chan struct{}) - waiting := make(chan struct{}) - - handler := func(w ResponseWriter, req *Msg) { - r := m.Copy() - r.SetReply(req) - - waiting <- struct{}{} - <-block - w.WriteMsg(r) - } - - HandleFunc("miek.nl.", handler) - defer HandleRemove("miek.nl.") - - s, addrstr, err := RunLocalUDPServer(":0") - if err != nil { - t.Fatalf("unable to run test server: %s", err) - } - defer s.Shutdown() - - m := new(Msg) - m.SetQuestion("miek.nl.", TypeSRV) - c := &Client{ - SingleInflight: true, - } - r := make([]*Msg, 2) - - var wg sync.WaitGroup - wg.Add(len(r)) - for i := 0; i < len(r); i++ { - go func(i int) { - defer wg.Done() - r[i], _, _ = c.Exchange(m.Copy(), addrstr) - if r[i] == nil { - t.Errorf("response %d is nil", i) - } - }(i) - } - select { - case <-waiting: - case <-time.After(time.Second): - t.FailNow() - } - close(block) - wg.Wait() - - if r[0] == r[1] { - t.Errorf("got same response, expected non-shared responses") - } + t.Errorf("exchange took longer (%v) than specified Timeout (%v)", length, timeout) } } - -func TestDoHExchange(t *testing.T) { - const addrstr = "https://dns.cloudflare.com/dns-query" - - m := new(Msg) - m.SetQuestion("miek.nl.", TypeSOA) - - cl := &Client{Net: "https"} - - r, _, err := cl.Exchange(m, addrstr) - if err != nil { - t.Fatalf("failed to exchange: %v", err) - } - - if r == nil || r.Rcode != RcodeSuccess { - t.Errorf("failed to get an valid answer\n%v", r) - } - - t.Log(r) - - // TODO: proper tests for this -} diff --git a/vendor/github.com/miekg/dns/clientconfig.go b/vendor/github.com/miekg/dns/clientconfig.go index f13cfa30cb54..cfa9ad0b228e 100644 --- a/vendor/github.com/miekg/dns/clientconfig.go +++ b/vendor/github.com/miekg/dns/clientconfig.go @@ -2,7 +2,6 @@ package dns import ( "bufio" - "io" "os" "strconv" "strings" @@ -26,13 +25,8 @@ func ClientConfigFromFile(resolvconf string) (*ClientConfig, error) { return nil, err } defer file.Close() - return ClientConfigFromReader(file) -} - -// ClientConfigFromReader works like ClientConfigFromFile but takes an io.Reader as argument -func ClientConfigFromReader(resolvconf io.Reader) (*ClientConfig, error) { c := new(ClientConfig) - scanner := bufio.NewScanner(resolvconf) + scanner := bufio.NewScanner(file) c.Servers = make([]string, 0) c.Search = make([]string, 0) c.Port = "53" @@ -79,10 +73,8 @@ func ClientConfigFromReader(resolvconf io.Reader) (*ClientConfig, error) { switch { case len(s) >= 6 && s[:6] == "ndots:": n, _ := strconv.Atoi(s[6:]) - if n < 0 { - n = 0 - } else if n > 15 { - n = 15 + if n < 1 { + n = 1 } c.Ndots = n case len(s) >= 8 && s[:8] == "timeout:": @@ -91,7 +83,7 @@ func ClientConfigFromReader(resolvconf io.Reader) (*ClientConfig, error) { n = 1 } c.Timeout = n - case len(s) >= 9 && s[:9] == "attempts:": + case len(s) >= 8 && s[:9] == "attempts:": n, _ := strconv.Atoi(s[9:]) if n < 1 { n = 1 @@ -105,35 +97,3 @@ func ClientConfigFromReader(resolvconf io.Reader) (*ClientConfig, error) { } return c, nil } - -// NameList returns all of the names that should be queried based on the -// config. It is based off of go's net/dns name building, but it does not -// check the length of the resulting names. -func (c *ClientConfig) NameList(name string) []string { - // if this domain is already fully qualified, no append needed. - if IsFqdn(name) { - return []string{name} - } - - // Check to see if the name has more labels than Ndots. Do this before making - // the domain fully qualified. - hasNdots := CountLabel(name) > c.Ndots - // Make the domain fully qualified. - name = Fqdn(name) - - // Make a list of names based off search. - names := []string{} - - // If name has enough dots, try that first. - if hasNdots { - names = append(names, name) - } - for _, s := range c.Search { - names = append(names, Fqdn(name+s)) - } - // If we didn't have enough dots, try after suffixes. - if !hasNdots { - names = append(names, name) - } - return names -} diff --git a/vendor/github.com/miekg/dns/clientconfig_test.go b/vendor/github.com/miekg/dns/clientconfig_test.go index ad5d7d086330..63bc5c814b33 100644 --- a/vendor/github.com/miekg/dns/clientconfig_test.go +++ b/vendor/github.com/miekg/dns/clientconfig_test.go @@ -4,7 +4,6 @@ import ( "io/ioutil" "os" "path/filepath" - "strings" "testing" ) @@ -21,76 +20,6 @@ nameserver 10.28.10.2 nameserver 11.28.10.1` // <- NOTE: NO newline. func testConfig(t *testing.T, data string) { - cc, err := ClientConfigFromReader(strings.NewReader(data)) - if err != nil { - t.Errorf("error parsing resolv.conf: %v", err) - } - if l := len(cc.Servers); l != 2 { - t.Errorf("incorrect number of nameservers detected: %d", l) - } - if l := len(cc.Search); l != 1 { - t.Errorf("domain directive not parsed correctly: %v", cc.Search) - } else { - if cc.Search[0] != "somedomain.com" { - t.Errorf("domain is unexpected: %v", cc.Search[0]) - } - } -} - -func TestNameserver(t *testing.T) { testConfig(t, normal) } -func TestMissingFinalNewLine(t *testing.T) { testConfig(t, missingNewline) } - -func TestNdots(t *testing.T) { - ndotsVariants := map[string]int{ - "options ndots:0": 0, - "options ndots:1": 1, - "options ndots:15": 15, - "options ndots:16": 15, - "options ndots:-1": 0, - "": 1, - } - - for data := range ndotsVariants { - cc, err := ClientConfigFromReader(strings.NewReader(data)) - if err != nil { - t.Errorf("error parsing resolv.conf: %v", err) - } - if cc.Ndots != ndotsVariants[data] { - t.Errorf("Ndots not properly parsed: (Expected: %d / Was: %d)", ndotsVariants[data], cc.Ndots) - } - } -} - -func TestClientConfigFromReaderAttempts(t *testing.T) { - testCases := []struct { - data string - expected int - }{ - {data: "options attempts:0", expected: 1}, - {data: "options attempts:1", expected: 1}, - {data: "options attempts:15", expected: 15}, - {data: "options attempts:16", expected: 16}, - {data: "options attempts:-1", expected: 1}, - {data: "options attempt:", expected: 2}, - } - - for _, test := range testCases { - test := test - t.Run(strings.Replace(test.data, ":", " ", -1), func(t *testing.T) { - t.Parallel() - - cc, err := ClientConfigFromReader(strings.NewReader(test.data)) - if err != nil { - t.Errorf("error parsing resolv.conf: %v", err) - } - if cc.Attempts != test.expected { - t.Errorf("A attempts not properly parsed: (Expected: %d / Was: %d)", test.expected, cc.Attempts) - } - }) - } -} - -func TestReadFromFile(t *testing.T) { tempDir, err := ioutil.TempDir("", "") if err != nil { t.Fatalf("tempDir: %v", err) @@ -98,7 +27,7 @@ func TestReadFromFile(t *testing.T) { defer os.RemoveAll(tempDir) path := filepath.Join(tempDir, "resolv.conf") - if err := ioutil.WriteFile(path, []byte(normal), 0644); err != nil { + if err := ioutil.WriteFile(path, []byte(data), 0644); err != nil { t.Fatalf("writeFile: %v", err) } cc, err := ClientConfigFromFile(path) @@ -117,65 +46,5 @@ func TestReadFromFile(t *testing.T) { } } -func TestNameListNdots1(t *testing.T) { - cfg := ClientConfig{ - Ndots: 1, - } - // fqdn should be only result returned - names := cfg.NameList("miek.nl.") - if len(names) != 1 { - t.Errorf("NameList returned != 1 names: %v", names) - } else if names[0] != "miek.nl." { - t.Errorf("NameList didn't return sent fqdn domain: %v", names[0]) - } - - cfg.Search = []string{ - "test", - } - // Sent domain has NDots and search - names = cfg.NameList("miek.nl") - if len(names) != 2 { - t.Errorf("NameList returned != 2 names: %v", names) - } else if names[0] != "miek.nl." { - t.Errorf("NameList didn't return sent domain first: %v", names[0]) - } else if names[1] != "miek.nl.test." { - t.Errorf("NameList didn't return search last: %v", names[1]) - } -} -func TestNameListNdots2(t *testing.T) { - cfg := ClientConfig{ - Ndots: 2, - } - - // Sent domain has less than NDots and search - cfg.Search = []string{ - "test", - } - names := cfg.NameList("miek.nl") - - if len(names) != 2 { - t.Errorf("NameList returned != 2 names: %v", names) - } else if names[0] != "miek.nl.test." { - t.Errorf("NameList didn't return search first: %v", names[0]) - } else if names[1] != "miek.nl." { - t.Errorf("NameList didn't return sent domain last: %v", names[1]) - } -} - -func TestNameListNdots0(t *testing.T) { - cfg := ClientConfig{ - Ndots: 0, - } - cfg.Search = []string{ - "test", - } - // Sent domain has less than NDots and search - names := cfg.NameList("miek") - if len(names) != 2 { - t.Errorf("NameList returned != 2 names: %v", names) - } else if names[0] != "miek." { - t.Errorf("NameList didn't return search first: %v", names[0]) - } else if names[1] != "miek.test." { - t.Errorf("NameList didn't return sent domain last: %v", names[1]) - } -} +func TestNameserver(t *testing.T) { testConfig(t, normal) } +func TestMissingFinalNewLine(t *testing.T) { testConfig(t, missingNewline) } diff --git a/vendor/github.com/miekg/dns/compress_generate.go b/vendor/github.com/miekg/dns/compress_generate.go deleted file mode 100644 index 9a136c414ad7..000000000000 --- a/vendor/github.com/miekg/dns/compress_generate.go +++ /dev/null @@ -1,198 +0,0 @@ -//+build ignore - -// compression_generate.go is meant to run with go generate. It will use -// go/{importer,types} to track down all the RR struct types. Then for each type -// it will look to see if there are (compressible) names, if so it will add that -// type to compressionLenHelperType and comressionLenSearchType which "fake" the -// compression so that Len() is fast. -package main - -import ( - "bytes" - "fmt" - "go/format" - "go/importer" - "go/types" - "log" - "os" -) - -var packageHdr = ` -// Code generated by "go run compress_generate.go"; DO NOT EDIT. - -package dns - -` - -// getTypeStruct will take a type and the package scope, and return the -// (innermost) struct if the type is considered a RR type (currently defined as -// those structs beginning with a RR_Header, could be redefined as implementing -// the RR interface). The bool return value indicates if embedded structs were -// resolved. -func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) { - st, ok := t.Underlying().(*types.Struct) - if !ok { - return nil, false - } - if st.Field(0).Type() == scope.Lookup("RR_Header").Type() { - return st, false - } - if st.Field(0).Anonymous() { - st, _ := getTypeStruct(st.Field(0).Type(), scope) - return st, true - } - return nil, false -} - -func main() { - // Import and type-check the package - pkg, err := importer.Default().Import("github.com/miekg/dns") - fatalIfErr(err) - scope := pkg.Scope() - - var domainTypes []string // Types that have a domain name in them (either compressible or not). - var cdomainTypes []string // Types that have a compressible domain name in them (subset of domainType) -Names: - for _, name := range scope.Names() { - o := scope.Lookup(name) - if o == nil || !o.Exported() { - continue - } - st, _ := getTypeStruct(o.Type(), scope) - if st == nil { - continue - } - if name == "PrivateRR" { - continue - } - - if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" { - log.Fatalf("Constant Type%s does not exist.", o.Name()) - } - - for i := 1; i < st.NumFields(); i++ { - if _, ok := st.Field(i).Type().(*types.Slice); ok { - if st.Tag(i) == `dns:"domain-name"` { - domainTypes = append(domainTypes, o.Name()) - continue Names - } - if st.Tag(i) == `dns:"cdomain-name"` { - cdomainTypes = append(cdomainTypes, o.Name()) - domainTypes = append(domainTypes, o.Name()) - continue Names - } - continue - } - - switch { - case st.Tag(i) == `dns:"domain-name"`: - domainTypes = append(domainTypes, o.Name()) - continue Names - case st.Tag(i) == `dns:"cdomain-name"`: - cdomainTypes = append(cdomainTypes, o.Name()) - domainTypes = append(domainTypes, o.Name()) - continue Names - } - } - } - - b := &bytes.Buffer{} - b.WriteString(packageHdr) - - // compressionLenHelperType - all types that have domain-name/cdomain-name can be used for compressing names - - fmt.Fprint(b, "func compressionLenHelperType(c map[string]int, r RR, initLen int) int {\n") - fmt.Fprint(b, "currentLen := initLen\n") - fmt.Fprint(b, "switch x := r.(type) {\n") - for _, name := range domainTypes { - o := scope.Lookup(name) - st, _ := getTypeStruct(o.Type(), scope) - - fmt.Fprintf(b, "case *%s:\n", name) - for i := 1; i < st.NumFields(); i++ { - out := func(s string) { - fmt.Fprintf(b, "currentLen -= len(x.%s) + 1\n", st.Field(i).Name()) - fmt.Fprintf(b, "currentLen += compressionLenHelper(c, x.%s, currentLen)\n", st.Field(i).Name()) - } - - if _, ok := st.Field(i).Type().(*types.Slice); ok { - switch st.Tag(i) { - case `dns:"domain-name"`: - fallthrough - case `dns:"cdomain-name"`: - // For HIP we need to slice over the elements in this slice. - fmt.Fprintf(b, `for i := range x.%s { - currentLen -= len(x.%s[i]) + 1 -} -`, st.Field(i).Name(), st.Field(i).Name()) - fmt.Fprintf(b, `for i := range x.%s { - currentLen += compressionLenHelper(c, x.%s[i], currentLen) -} -`, st.Field(i).Name(), st.Field(i).Name()) - } - continue - } - - switch { - case st.Tag(i) == `dns:"cdomain-name"`: - fallthrough - case st.Tag(i) == `dns:"domain-name"`: - out(st.Field(i).Name()) - } - } - } - fmt.Fprintln(b, "}\nreturn currentLen - initLen\n}\n\n") - - // compressionLenSearchType - search cdomain-tags types for compressible names. - - fmt.Fprint(b, "func compressionLenSearchType(c map[string]int, r RR) (int, bool, int) {\n") - fmt.Fprint(b, "switch x := r.(type) {\n") - for _, name := range cdomainTypes { - o := scope.Lookup(name) - st, _ := getTypeStruct(o.Type(), scope) - - fmt.Fprintf(b, "case *%s:\n", name) - j := 1 - for i := 1; i < st.NumFields(); i++ { - out := func(s string, j int) { - fmt.Fprintf(b, "k%d, ok%d, sz%d := compressionLenSearch(c, x.%s)\n", j, j, j, st.Field(i).Name()) - } - - // There are no slice types with names that can be compressed. - - switch { - case st.Tag(i) == `dns:"cdomain-name"`: - out(st.Field(i).Name(), j) - j++ - } - } - k := "k1" - ok := "ok1" - sz := "sz1" - for i := 2; i < j; i++ { - k += fmt.Sprintf(" + k%d", i) - ok += fmt.Sprintf(" && ok%d", i) - sz += fmt.Sprintf(" + sz%d", i) - } - fmt.Fprintf(b, "return %s, %s, %s\n", k, ok, sz) - } - fmt.Fprintln(b, "}\nreturn 0, false, 0\n}\n\n") - - // gofmt - res, err := format.Source(b.Bytes()) - if err != nil { - b.WriteTo(os.Stderr) - log.Fatal(err) - } - - f, err := os.Create("zcompress.go") - fatalIfErr(err) - defer f.Close() - f.Write(res) -} - -func fatalIfErr(err error) { - if err != nil { - log.Fatal(err) - } -} diff --git a/vendor/github.com/miekg/dns/dane.go b/vendor/github.com/miekg/dns/dane.go deleted file mode 100644 index 8c4a14ef1906..000000000000 --- a/vendor/github.com/miekg/dns/dane.go +++ /dev/null @@ -1,43 +0,0 @@ -package dns - -import ( - "crypto/sha256" - "crypto/sha512" - "crypto/x509" - "encoding/hex" - "errors" -) - -// CertificateToDANE converts a certificate to a hex string as used in the TLSA or SMIMEA records. -func CertificateToDANE(selector, matchingType uint8, cert *x509.Certificate) (string, error) { - switch matchingType { - case 0: - switch selector { - case 0: - return hex.EncodeToString(cert.Raw), nil - case 1: - return hex.EncodeToString(cert.RawSubjectPublicKeyInfo), nil - } - case 1: - h := sha256.New() - switch selector { - case 0: - h.Write(cert.Raw) - return hex.EncodeToString(h.Sum(nil)), nil - case 1: - h.Write(cert.RawSubjectPublicKeyInfo) - return hex.EncodeToString(h.Sum(nil)), nil - } - case 2: - h := sha512.New() - switch selector { - case 0: - h.Write(cert.Raw) - return hex.EncodeToString(h.Sum(nil)), nil - case 1: - h.Write(cert.RawSubjectPublicKeyInfo) - return hex.EncodeToString(h.Sum(nil)), nil - } - } - return "", errors.New("dns: bad MatchingType or Selector") -} diff --git a/vendor/github.com/miekg/dns/defaults.go b/vendor/github.com/miekg/dns/defaults.go index 14e18b0b38f6..cf456165f435 100644 --- a/vendor/github.com/miekg/dns/defaults.go +++ b/vendor/github.com/miekg/dns/defaults.go @@ -13,12 +13,9 @@ const hexDigit = "0123456789abcdef" // SetReply creates a reply message from a request message. func (dns *Msg) SetReply(request *Msg) *Msg { dns.Id = request.Id + dns.RecursionDesired = request.RecursionDesired // Copy rd bit dns.Response = true - dns.Opcode = request.Opcode - if dns.Opcode == OpcodeQuery { - dns.RecursionDesired = request.RecursionDesired // Copy rd bit - dns.CheckingDisabled = request.CheckingDisabled // Copy cd bit - } + dns.Opcode = OpcodeQuery dns.Rcode = RcodeSuccess if len(request.Question) > 0 { dns.Question = make([]Question, 1) @@ -105,11 +102,11 @@ func (dns *Msg) SetAxfr(z string) *Msg { // SetTsig appends a TSIG RR to the message. // This is only a skeleton TSIG RR that is added as the last RR in the // additional section. The Tsig is calculated when the message is being send. -func (dns *Msg) SetTsig(z, algo string, fudge uint16, timesigned int64) *Msg { +func (dns *Msg) SetTsig(z, algo string, fudge, timesigned int64) *Msg { t := new(TSIG) t.Hdr = RR_Header{z, TypeTSIG, ClassANY, 0, 0} t.Algorithm = algo - t.Fudge = fudge + t.Fudge = 300 t.TimeSigned = uint64(timesigned) t.OrigId = dns.Id dns.Extra = append(dns.Extra, t) @@ -273,11 +270,8 @@ func (t Type) String() string { // String returns the string representation for the class c. func (c Class) String() string { - if s, ok := ClassToString[uint16(c)]; ok { - // Only emit mnemonics when they are unambiguous, specically ANY is in both. - if _, ok := StringToType[s]; !ok { - return s - } + if c1, ok := ClassToString[uint16(c)]; ok { + return c1 } return "CLASS" + strconv.Itoa(int(c)) } diff --git a/vendor/github.com/miekg/dns/dns.go b/vendor/github.com/miekg/dns/dns.go index e7557f51a815..b3292287ce75 100644 --- a/vendor/github.com/miekg/dns/dns.go +++ b/vendor/github.com/miekg/dns/dns.go @@ -6,12 +6,9 @@ const ( year68 = 1 << 31 // For RFC1982 (Serial Arithmetic) calculations in 32 bits. defaultTtl = 3600 // Default internal TTL. - // DefaultMsgSize is the standard default for messages larger than 512 bytes. - DefaultMsgSize = 4096 - // MinMsgSize is the minimal size of a DNS packet. - MinMsgSize = 512 - // MaxMsgSize is the largest possible DNS packet. - MaxMsgSize = 65535 + DefaultMsgSize = 4096 // DefaultMsgSize is the standard default for messages larger than 512 bytes. + MinMsgSize = 512 // MinMsgSize is the minimal size of a DNS packet. + MaxMsgSize = 65535 // MaxMsgSize is the largest possible DNS packet. ) // Error represents a DNS error. @@ -55,6 +52,16 @@ func (h *RR_Header) Header() *RR_Header { return h } // Just to implement the RR interface. func (h *RR_Header) copy() RR { return nil } +func (h *RR_Header) copyHeader() *RR_Header { + r := new(RR_Header) + r.Name = h.Name + r.Rrtype = h.Rrtype + r.Class = h.Class + r.Ttl = h.Ttl + r.Rdlength = h.Rdlength + return r +} + func (h *RR_Header) String() string { var s string diff --git a/vendor/github.com/miekg/dns/dns_bench_test.go b/vendor/github.com/miekg/dns/dns_bench_test.go index 7bf8bd21104c..bccc3d5404d4 100644 --- a/vendor/github.com/miekg/dns/dns_bench_test.go +++ b/vendor/github.com/miekg/dns/dns_bench_test.go @@ -17,26 +17,7 @@ func BenchmarkMsgLength(b *testing.B) { return msg } name1 := "12345678901234567890123456789012345.12345678.123." - rrMx := testRR(name1 + " 3600 IN MX 10 " + name1) - msg := makeMsg(name1, []RR{rrMx, rrMx}, nil, nil) - b.StartTimer() - for i := 0; i < b.N; i++ { - msg.Len() - } -} - -func BenchmarkMsgLengthNoCompression(b *testing.B) { - b.StopTimer() - makeMsg := func(question string, ans, ns, e []RR) *Msg { - msg := new(Msg) - msg.SetQuestion(Fqdn(question), TypeANY) - msg.Answer = append(msg.Answer, ans...) - msg.Ns = append(msg.Ns, ns...) - msg.Extra = append(msg.Extra, e...) - return msg - } - name1 := "12345678901234567890123456789012345.12345678.123." - rrMx := testRR(name1 + " 3600 IN MX 10 " + name1) + rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1) msg := makeMsg(name1, []RR{rrMx, rrMx}, nil, nil) b.StartTimer() for i := 0; i < b.N; i++ { @@ -55,7 +36,7 @@ func BenchmarkMsgLengthPack(b *testing.B) { return msg } name1 := "12345678901234567890123456789012345.12345678.123." - rrMx := testRR(name1 + " 3600 IN MX 10 " + name1) + rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1) msg := makeMsg(name1, []RR{rrMx, rrMx}, nil, nil) b.ResetTimer() for i := 0; i < b.N; i++ { @@ -96,11 +77,11 @@ func BenchmarkCopy(b *testing.B) { b.ReportAllocs() m := new(Msg) m.SetQuestion("miek.nl.", TypeA) - rr := testRR("miek.nl. 2311 IN A 127.0.0.1") + rr, _ := NewRR("miek.nl. 2311 IN A 127.0.0.1") m.Answer = []RR{rr} - rr = testRR("miek.nl. 2311 IN NS 127.0.0.1") + rr, _ = NewRR("miek.nl. 2311 IN NS 127.0.0.1") m.Ns = []RR{rr} - rr = testRR("miek.nl. 2311 IN A 127.0.0.1") + rr, _ = NewRR("miek.nl. 2311 IN A 127.0.0.1") m.Extra = []RR{rr} b.ResetTimer() @@ -158,7 +139,7 @@ func BenchmarkUnpackMX(b *testing.B) { } func BenchmarkPackAAAAA(b *testing.B) { - aaaa := testRR(". IN A ::1") + aaaa, _ := NewRR(". IN A ::1") buf := make([]byte, aaaa.len()) b.ReportAllocs() @@ -169,7 +150,7 @@ func BenchmarkPackAAAAA(b *testing.B) { } func BenchmarkUnpackAAAA(b *testing.B) { - aaaa := testRR(". IN A ::1") + aaaa, _ := NewRR(". IN A ::1") buf := make([]byte, aaaa.len()) PackRR(aaaa, buf, 0, nil, false) @@ -192,7 +173,7 @@ func BenchmarkPackMsg(b *testing.B) { return msg } name1 := "12345678901234567890123456789012345.12345678.123." - rrMx := testRR(name1 + " 3600 IN MX 10 " + name1) + rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1) msg := makeMsg(name1, []RR{rrMx, rrMx}, nil, nil) buf := make([]byte, 512) b.ReportAllocs() @@ -213,7 +194,7 @@ func BenchmarkUnpackMsg(b *testing.B) { return msg } name1 := "12345678901234567890123456789012345.12345678.123." - rrMx := testRR(name1 + " 3600 IN MX 10 " + name1) + rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1) msg := makeMsg(name1, []RR{rrMx, rrMx}, nil, nil) msgBuf, _ := msg.Pack() b.ReportAllocs() diff --git a/vendor/github.com/miekg/dns/dns_test.go b/vendor/github.com/miekg/dns/dns_test.go index 3c9d910d4722..ad68533fd554 100644 --- a/vendor/github.com/miekg/dns/dns_test.go +++ b/vendor/github.com/miekg/dns/dns_test.go @@ -1,7 +1,6 @@ package dns import ( - "bytes" "encoding/hex" "net" "testing" @@ -127,30 +126,199 @@ func TestBailiwick(t *testing.T) { } } +func TestPack(t *testing.T) { + rr := []string{"US. 86400 IN NSEC 0-.us. NS SOA RRSIG NSEC DNSKEY TYPE65534"} + m := new(Msg) + var err error + m.Answer = make([]RR, 1) + for _, r := range rr { + m.Answer[0], err = NewRR(r) + if err != nil { + t.Errorf("failed to create RR: %v", err) + continue + } + if _, err := m.Pack(); err != nil { + t.Errorf("packing failed: %v", err) + } + } + x := new(Msg) + ns, _ := NewRR("pool.ntp.org. 390 IN NS a.ntpns.org") + ns.(*NS).Ns = "a.ntpns.org" + x.Ns = append(m.Ns, ns) + x.Ns = append(m.Ns, ns) + x.Ns = append(m.Ns, ns) + // This crashes due to the fact the a.ntpns.org isn't a FQDN + // How to recover() from a remove panic()? + if _, err := x.Pack(); err == nil { + t.Error("packing should fail") + } + x.Answer = make([]RR, 1) + x.Answer[0], err = NewRR(rr[0]) + if _, err := x.Pack(); err == nil { + t.Error("packing should fail") + } + x.Question = make([]Question, 1) + x.Question[0] = Question{";sd#edddds鍛↙赏‘℅∥↙xzztsestxssweewwsssstx@s@Z嵌e@cn.pool.ntp.org.", TypeA, ClassINET} + if _, err := x.Pack(); err == nil { + t.Error("packing should fail") + } +} + func TestPackNAPTR(t *testing.T) { for _, n := range []string{ `apple.com. IN NAPTR 100 50 "se" "SIP+D2U" "" _sip._udp.apple.com.`, `apple.com. IN NAPTR 90 50 "se" "SIP+D2T" "" _sip._tcp.apple.com.`, `apple.com. IN NAPTR 50 50 "se" "SIPS+D2T" "" _sips._tcp.apple.com.`, } { - rr := testRR(n) + rr, _ := NewRR(n) msg := make([]byte, rr.len()) if off, err := PackRR(rr, msg, 0, nil, false); err != nil { t.Errorf("packing failed: %v", err) t.Errorf("length %d, need more than %d", rr.len(), off) + } else { + t.Logf("buf size needed: %d", off) + } + } +} + +func TestCompressLength(t *testing.T) { + m := new(Msg) + m.SetQuestion("miek.nl", TypeMX) + ul := m.Len() + m.Compress = true + if ul != m.Len() { + t.Fatalf("should be equal") + } +} + +// Does the predicted length match final packed length? +func TestMsgCompressLength(t *testing.T) { + makeMsg := func(question string, ans, ns, e []RR) *Msg { + msg := new(Msg) + msg.SetQuestion(Fqdn(question), TypeANY) + msg.Answer = append(msg.Answer, ans...) + msg.Ns = append(msg.Ns, ns...) + msg.Extra = append(msg.Extra, e...) + msg.Compress = true + return msg + } + + name1 := "12345678901234567890123456789012345.12345678.123." + rrA, _ := NewRR(name1 + " 3600 IN A 192.0.2.1") + rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1) + tests := []*Msg{ + makeMsg(name1, []RR{rrA}, nil, nil), + makeMsg(name1, []RR{rrMx, rrMx}, nil, nil)} + + for _, msg := range tests { + predicted := msg.Len() + buf, err := msg.Pack() + if err != nil { + t.Error(err) + } + if predicted < len(buf) { + t.Errorf("predicted compressed length is wrong: predicted %s (len=%d) %d, actual %d", + msg.Question[0].Name, len(msg.Answer), predicted, len(buf)) } } } +func TestMsgLength(t *testing.T) { + makeMsg := func(question string, ans, ns, e []RR) *Msg { + msg := new(Msg) + msg.SetQuestion(Fqdn(question), TypeANY) + msg.Answer = append(msg.Answer, ans...) + msg.Ns = append(msg.Ns, ns...) + msg.Extra = append(msg.Extra, e...) + return msg + } + + name1 := "12345678901234567890123456789012345.12345678.123." + rrA, _ := NewRR(name1 + " 3600 IN A 192.0.2.1") + rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1) + tests := []*Msg{ + makeMsg(name1, []RR{rrA}, nil, nil), + makeMsg(name1, []RR{rrMx, rrMx}, nil, nil)} + + for _, msg := range tests { + predicted := msg.Len() + buf, err := msg.Pack() + if err != nil { + t.Error(err) + } + if predicted < len(buf) { + t.Errorf("predicted length is wrong: predicted %s (len=%d), actual %d", + msg.Question[0].Name, predicted, len(buf)) + } + } +} + +func TestMsgLength2(t *testing.T) { + // Serialized replies + var testMessages = []string{ + // google.com. IN A? + "064e81800001000b0004000506676f6f676c6503636f6d0000010001c00c00010001000000050004adc22986c00c00010001000000050004adc22987c00c00010001000000050004adc22988c00c00010001000000050004adc22989c00c00010001000000050004adc2298ec00c00010001000000050004adc22980c00c00010001000000050004adc22981c00c00010001000000050004adc22982c00c00010001000000050004adc22983c00c00010001000000050004adc22984c00c00010001000000050004adc22985c00c00020001000000050006036e7331c00cc00c00020001000000050006036e7332c00cc00c00020001000000050006036e7333c00cc00c00020001000000050006036e7334c00cc0d800010001000000050004d8ef200ac0ea00010001000000050004d8ef220ac0fc00010001000000050004d8ef240ac10e00010001000000050004d8ef260a0000290500000000050000", + // amazon.com. IN A? (reply has no EDNS0 record) + // TODO(miek): this one is off-by-one, need to find out why + //"6de1818000010004000a000806616d617a6f6e03636f6d0000010001c00c000100010000000500044815c2d4c00c000100010000000500044815d7e8c00c00010001000000050004b02062a6c00c00010001000000050004cdfbf236c00c000200010000000500140570646e733408756c747261646e73036f726700c00c000200010000000500150570646e733508756c747261646e7304696e666f00c00c000200010000000500160570646e733608756c747261646e7302636f02756b00c00c00020001000000050014036e7331037033310664796e656374036e657400c00c00020001000000050006036e7332c0cfc00c00020001000000050006036e7333c0cfc00c00020001000000050006036e7334c0cfc00c000200010000000500110570646e733108756c747261646e73c0dac00c000200010000000500080570646e7332c127c00c000200010000000500080570646e7333c06ec0cb00010001000000050004d04e461fc0eb00010001000000050004cc0dfa1fc0fd00010001000000050004d04e471fc10f00010001000000050004cc0dfb1fc12100010001000000050004cc4a6c01c121001c000100000005001020010502f3ff00000000000000000001c13e00010001000000050004cc4a6d01c13e001c0001000000050010261000a1101400000000000000000001", + // yahoo.com. IN A? + "fc2d81800001000300070008057961686f6f03636f6d0000010001c00c00010001000000050004628afd6dc00c00010001000000050004628bb718c00c00010001000000050004cebe242dc00c00020001000000050006036e7336c00cc00c00020001000000050006036e7338c00cc00c00020001000000050006036e7331c00cc00c00020001000000050006036e7332c00cc00c00020001000000050006036e7333c00cc00c00020001000000050006036e7334c00cc00c00020001000000050006036e7335c00cc07b0001000100000005000444b48310c08d00010001000000050004448eff10c09f00010001000000050004cb54dd35c0b100010001000000050004628a0b9dc0c30001000100000005000477a0f77cc05700010001000000050004ca2bdfaac06900010001000000050004caa568160000290500000000050000", + // microsoft.com. IN A? + "f4368180000100020005000b096d6963726f736f667403636f6d0000010001c00c0001000100000005000440040b25c00c0001000100000005000441373ac9c00c0002000100000005000e036e7331046d736674036e657400c00c00020001000000050006036e7332c04fc00c00020001000000050006036e7333c04fc00c00020001000000050006036e7334c04fc00c00020001000000050006036e7335c04fc04b000100010000000500044137253ec04b001c00010000000500102a010111200500000000000000010001c0650001000100000005000440043badc065001c00010000000500102a010111200600060000000000010001c07700010001000000050004d5c7b435c077001c00010000000500102a010111202000000000000000010001c08900010001000000050004cf2e4bfec089001c00010000000500102404f800200300000000000000010001c09b000100010000000500044137e28cc09b001c00010000000500102a010111200f000100000000000100010000290500000000050000", + // google.com. IN MX? + "724b8180000100050004000b06676f6f676c6503636f6d00000f0001c00c000f000100000005000c000a056173706d78016cc00cc00c000f0001000000050009001404616c7431c02ac00c000f0001000000050009001e04616c7432c02ac00c000f0001000000050009002804616c7433c02ac00c000f0001000000050009003204616c7434c02ac00c00020001000000050006036e7332c00cc00c00020001000000050006036e7333c00cc00c00020001000000050006036e7334c00cc00c00020001000000050006036e7331c00cc02a00010001000000050004adc2421bc02a001c00010000000500102a00145040080c01000000000000001bc04200010001000000050004adc2461bc05700010001000000050004adc2451bc06c000100010000000500044a7d8f1bc081000100010000000500044a7d191bc0ca00010001000000050004d8ef200ac09400010001000000050004d8ef220ac0a600010001000000050004d8ef240ac0b800010001000000050004d8ef260a0000290500000000050000", + // reddit.com. IN A? + "12b98180000100080000000c0672656464697403636f6d0000020001c00c0002000100000005000f046175733204616b616d036e657400c00c000200010000000500070475736534c02dc00c000200010000000500070475737733c02dc00c000200010000000500070475737735c02dc00c00020001000000050008056173696131c02dc00c00020001000000050008056173696139c02dc00c00020001000000050008056e73312d31c02dc00c0002000100000005000a076e73312d313935c02dc02800010001000000050004c30a242ec04300010001000000050004451f1d39c05600010001000000050004451f3bc7c0690001000100000005000460073240c07c000100010000000500046007fb81c090000100010000000500047c283484c090001c00010000000500102a0226f0006700000000000000000064c0a400010001000000050004c16c5b01c0a4001c000100000005001026001401000200000000000000000001c0b800010001000000050004c16c5bc3c0b8001c0001000000050010260014010002000000000000000000c30000290500000000050000", + } + + for i, hexData := range testMessages { + // we won't fail the decoding of the hex + input, _ := hex.DecodeString(hexData) + + m := new(Msg) + m.Unpack(input) + m.Compress = true + lenComp := m.Len() + b, _ := m.Pack() + pacComp := len(b) + m.Compress = false + lenUnComp := m.Len() + b, _ = m.Pack() + pacUnComp := len(b) + if pacComp+1 != lenComp { + t.Errorf("msg.Len(compressed)=%d actual=%d for test %d", lenComp, pacComp, i) + } + if pacUnComp+1 != lenUnComp { + t.Errorf("msg.Len(uncompressed)=%d actual=%d for test %d", lenUnComp, pacUnComp, i) + } + } +} + +func TestMsgLengthCompressionMalformed(t *testing.T) { + // SOA with empty hostmaster, which is illegal + soa := &SOA{Hdr: RR_Header{Name: ".", Rrtype: TypeSOA, Class: ClassINET, Ttl: 12345}, + Ns: ".", + Mbox: "", + Serial: 0, + Refresh: 28800, + Retry: 7200, + Expire: 604800, + Minttl: 60} + m := new(Msg) + m.Compress = true + m.Ns = []RR{soa} + m.Len() // Should not crash. +} + func TestToRFC3597(t *testing.T) { - a := testRR("miek.nl. IN A 10.0.1.1") + a, _ := NewRR("miek.nl. IN A 10.0.1.1") x := new(RFC3597) x.ToRFC3597(a) if x.String() != `miek.nl. 3600 CLASS1 TYPE1 \# 4 0a000101` { t.Errorf("string mismatch, got: %s", x) } - b := testRR("miek.nl. IN MX 10 mx.miek.nl.") + b, _ := NewRR("miek.nl. IN MX 10 mx.miek.nl.") x.ToRFC3597(b) if x.String() != `miek.nl. 3600 CLASS1 TYPE15 \# 14 000a026d78046d69656b026e6c00` { t.Errorf("string mismatch, got: %s", x) @@ -172,9 +340,8 @@ func TestNoRdataPack(t *testing.T) { func TestNoRdataUnpack(t *testing.T) { data := make([]byte, 1024) for typ, fn := range TypeToRR { - if typ == TypeSOA || typ == TypeTSIG || typ == TypeTKEY { + if typ == TypeSOA || typ == TypeTSIG { // SOA, TSIG will not be seen (like this) in dyn. updates? - // TKEY requires length fields to be present for the Key and OtherData fields continue } r := fn() @@ -185,9 +352,11 @@ func TestNoRdataUnpack(t *testing.T) { t.Errorf("failed to pack RR: %v", err) continue } - if _, _, err := UnpackRR(data[:off], 0); err != nil { + rr, _, err := UnpackRR(data[:off], 0) + if err != nil { t.Errorf("failed to unpack RR with zero rdata: %s: %v", TypeToString[typ], err) } + t.Log(rr) } } @@ -208,7 +377,7 @@ func TestRdataOverflow(t *testing.T) { } func TestCopy(t *testing.T) { - rr := testRR("miek.nl. 2311 IN A 127.0.0.1") // Weird TTL to avoid catching TTL + rr, _ := NewRR("miek.nl. 2311 IN A 127.0.0.1") // Weird TTL to avoid catching TTL rr1 := Copy(rr) if rr.String() != rr1.String() { t.Fatalf("Copy() failed %s != %s", rr.String(), rr1.String()) @@ -218,9 +387,9 @@ func TestCopy(t *testing.T) { func TestMsgCopy(t *testing.T) { m := new(Msg) m.SetQuestion("miek.nl.", TypeA) - rr := testRR("miek.nl. 2311 IN A 127.0.0.1") + rr, _ := NewRR("miek.nl. 2311 IN A 127.0.0.1") m.Answer = []RR{rr} - rr = testRR("miek.nl. 2311 IN NS 127.0.0.1") + rr, _ = NewRR("miek.nl. 2311 IN NS 127.0.0.1") m.Ns = []RR{rr} m1 := m.Copy() @@ -228,12 +397,12 @@ func TestMsgCopy(t *testing.T) { t.Fatalf("Msg.Copy() failed %s != %s", m.String(), m1.String()) } - m1.Answer[0] = testRR("somethingelse.nl. 2311 IN A 127.0.0.1") + m1.Answer[0], _ = NewRR("somethingelse.nl. 2311 IN A 127.0.0.1") if m.String() == m1.String() { t.Fatalf("Msg.Copy() failed; change to copy changed template %s", m.String()) } - rr = testRR("miek.nl. 2311 IN A 127.0.0.2") + rr, _ = NewRR("miek.nl. 2311 IN A 127.0.0.2") m1.Answer = append(m1.Answer, rr) if m1.Ns[0].String() == m1.Answer[1].String() { t.Fatalf("Msg.Copy() failed; append changed underlying array %s", m1.Ns[0].String()) @@ -259,62 +428,6 @@ func TestMsgPackBuffer(t *testing.T) { t.Errorf("packet %d failed to unpack", i) continue } + t.Logf("packet %d %s", i, m.String()) } } - -// Make sure we can decode a TKEY packet from the string, modify the RR, and then pack it again. -func TestTKEY(t *testing.T) { - // An example TKEY RR captured. There is no known accepted standard text format for a TKEY - // record so we do this from a hex string instead of from a text readable string. - tkeyStr := "0737362d6d732d370932322d3332633233332463303439663961662d633065612d313165372d363839362d6463333937396666656666640000f900ff0000000000d2086773732d747369670059fd01f359fe53730003000000b8a181b53081b2a0030a0100a10b06092a864882f712010202a2819d04819a60819706092a864886f71201020202006f8187308184a003020105a10302010fa2783076a003020112a26f046db29b1b1d2625da3b20b49dafef930dd1e9aad335e1c5f45dcd95e0005d67a1100f3e573d70506659dbed064553f1ab890f68f65ae10def0dad5b423b39f240ebe666f2886c5fe03819692d29182bbed87b83e1f9d16b7334ec16a3c4fc5ad4a990088e0be43f0c6957916f5fe60000" - tkeyBytes, err := hex.DecodeString(tkeyStr) - if err != nil { - t.Fatal("unable to decode TKEY string ", err) - } - // Decode the RR - rr, tkeyLen, unPackErr := UnpackRR(tkeyBytes, 0) - if unPackErr != nil { - t.Fatal("unable to decode TKEY RR", unPackErr) - } - // Make sure it's a TKEY record - if rr.Header().Rrtype != TypeTKEY { - t.Fatal("Unable to decode TKEY") - } - // Make sure we get back the same length - if rr.len() != len(tkeyBytes) { - t.Fatalf("Lengths don't match %d != %d", rr.len(), len(tkeyBytes)) - } - // make space for it with some fudge room - msg := make([]byte, tkeyLen+1000) - offset, packErr := PackRR(rr, msg, 0, nil, false) - if packErr != nil { - t.Fatal("unable to pack TKEY RR", packErr) - } - if offset != len(tkeyBytes) { - t.Fatalf("mismatched TKEY RR size %d != %d", len(tkeyBytes), offset) - } - if bytes.Compare(tkeyBytes, msg[0:offset]) != 0 { - t.Fatal("mismatched TKEY data after rewriting bytes") - } - t.Logf("got TKEY of: " + rr.String()) - // Now add some bytes to this and make sure we can encode OtherData properly - tkey := rr.(*TKEY) - tkey.OtherData = "abcd" - tkey.OtherLen = 2 - offset, packErr = PackRR(tkey, msg, 0, nil, false) - if packErr != nil { - t.Fatal("unable to pack TKEY RR after modification", packErr) - } - if offset != (len(tkeyBytes) + 2) { - t.Fatalf("mismatched TKEY RR size %d != %d", offset, len(tkeyBytes)+2) - } - t.Logf("modified to TKEY of: " + rr.String()) - - // Make sure we can parse our string output - tkey.Hdr.Class = ClassINET // https://github.com/miekg/dns/issues/577 - newRR, newError := NewRR(tkey.String()) - if newError != nil { - t.Fatalf("unable to parse TKEY string: %s", newError) - } - t.Log("got reparsed TKEY of newRR: " + newRR.String()) -} diff --git a/vendor/github.com/miekg/dns/dnssec.go b/vendor/github.com/miekg/dns/dnssec.go index 7e6bac4287e6..f5f3fbdd899f 100644 --- a/vendor/github.com/miekg/dns/dnssec.go +++ b/vendor/github.com/miekg/dns/dnssec.go @@ -19,8 +19,6 @@ import ( "sort" "strings" "time" - - "golang.org/x/crypto/ed25519" ) // DNSSEC encryption algorithm codes. @@ -40,14 +38,12 @@ const ( ECCGOST ECDSAP256SHA256 ECDSAP384SHA384 - ED25519 - ED448 INDIRECT uint8 = 252 PRIVATEDNS uint8 = 253 // Private (experimental keys) PRIVATEOID uint8 = 254 ) -// AlgorithmToString is a map of algorithm IDs to algorithm names. +// Map for algorithm names. var AlgorithmToString = map[uint8]string{ RSAMD5: "RSAMD5", DH: "DH", @@ -60,27 +56,23 @@ var AlgorithmToString = map[uint8]string{ ECCGOST: "ECC-GOST", ECDSAP256SHA256: "ECDSAP256SHA256", ECDSAP384SHA384: "ECDSAP384SHA384", - ED25519: "ED25519", - ED448: "ED448", INDIRECT: "INDIRECT", PRIVATEDNS: "PRIVATEDNS", PRIVATEOID: "PRIVATEOID", } -// StringToAlgorithm is the reverse of AlgorithmToString. +// Map of algorithm strings. var StringToAlgorithm = reverseInt8(AlgorithmToString) -// AlgorithmToHash is a map of algorithm crypto hash IDs to crypto.Hash's. +// Map of algorithm crypto hashes. var AlgorithmToHash = map[uint8]crypto.Hash{ RSAMD5: crypto.MD5, // Deprecated in RFC 6725 - DSA: crypto.SHA1, RSASHA1: crypto.SHA1, RSASHA1NSEC3SHA1: crypto.SHA1, RSASHA256: crypto.SHA256, ECDSAP256SHA256: crypto.SHA256, ECDSAP384SHA384: crypto.SHA384, RSASHA512: crypto.SHA512, - ED25519: crypto.Hash(0), } // DNSSEC hashing algorithm codes. @@ -93,7 +85,7 @@ const ( SHA512 // Experimental ) -// HashToString is a map of hash IDs to names. +// Map for hash names. var HashToString = map[uint8]string{ SHA1: "SHA1", SHA256: "SHA256", @@ -102,7 +94,7 @@ var HashToString = map[uint8]string{ SHA512: "SHA512", } -// StringToHash is a map of names to hash IDs. +// Map of hash strings. var StringToHash = reverseInt8(HashToString) // DNSKEY flag values. @@ -216,6 +208,9 @@ func (k *DNSKEY) ToDS(h uint8) *DS { // "|" denotes concatenation // DNSKEY RDATA = Flags | Protocol | Algorithm | Public Key. + // digest buffer + digest := append(owner, wire...) // another copy + var hash crypto.Hash switch h { case SHA1: @@ -231,8 +226,7 @@ func (k *DNSKEY) ToDS(h uint8) *DS { } s := hash.New() - s.Write(owner) - s.Write(wire) + s.Write(digest) ds.Digest = hex.EncodeToString(s.Sum(nil)) return ds } @@ -240,7 +234,7 @@ func (k *DNSKEY) ToDS(h uint8) *DS { // ToCDNSKEY converts a DNSKEY record to a CDNSKEY record. func (k *DNSKEY) ToCDNSKEY() *CDNSKEY { c := &CDNSKEY{DNSKEY: *k} - c.Hdr = k.Hdr + c.Hdr = *k.Hdr.copyHeader() c.Hdr.Rrtype = TypeCDNSKEY return c } @@ -248,7 +242,7 @@ func (k *DNSKEY) ToCDNSKEY() *CDNSKEY { // ToCDS converts a DS record to a CDS record. func (d *DS) ToCDS() *CDS { c := &CDS{DS: *d} - c.Hdr = d.Hdr + c.Hdr = *d.Hdr.copyHeader() c.Hdr.Rrtype = TypeCDS return c } @@ -303,39 +297,23 @@ func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error { if err != nil { return err } + signdata = append(signdata, wire...) hash, ok := AlgorithmToHash[rr.Algorithm] if !ok { return ErrAlg } - switch rr.Algorithm { - case ED25519: - // ed25519 signs the raw message and performs hashing internally. - // All other supported signature schemes operate over the pre-hashed - // message, and thus ed25519 must be handled separately here. - // - // The raw message is passed directly into sign and crypto.Hash(0) is - // used to signal to the crypto.Signer that the data has not been hashed. - signature, err := sign(k, append(signdata, wire...), crypto.Hash(0), rr.Algorithm) - if err != nil { - return err - } - - rr.Signature = toBase64(signature) - default: - h := hash.New() - h.Write(signdata) - h.Write(wire) + h := hash.New() + h.Write(signdata) - signature, err := sign(k, h.Sum(nil), hash, rr.Algorithm) - if err != nil { - return err - } - - rr.Signature = toBase64(signature) + signature, err := sign(k, h.Sum(nil), hash, rr.Algorithm) + if err != nil { + return err } + rr.Signature = toBase64(signature) + return nil } @@ -376,9 +354,6 @@ func sign(k crypto.Signer, hashed []byte, hash crypto.Hash, alg uint8) ([]byte, // signature = append(signature, intToBytes(r1, 20)...) // signature = append(signature, intToBytes(s1, 20)...) // rr.Signature = signature - - case ED25519: - return signature, nil } return nil, ErrAlg @@ -440,6 +415,7 @@ func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error { if err != nil { return err } + signeddata = append(signeddata, wire...) sigbuf := rr.sigBuf() // Get the binary signature data if rr.Algorithm == PRIVATEDNS { // PRIVATEOID @@ -462,7 +438,6 @@ func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error { h := hash.New() h.Write(signeddata) - h.Write(wire) return rsa.VerifyPKCS1v15(pubkey, hash, h.Sum(nil), sigbuf) case ECDSAP256SHA256, ECDSAP384SHA384: @@ -477,23 +452,11 @@ func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error { h := hash.New() h.Write(signeddata) - h.Write(wire) if ecdsa.Verify(pubkey, h.Sum(nil), r, s) { return nil } return ErrSig - case ED25519: - pubkey := k.publicKeyED25519() - if pubkey == nil { - return ErrKey - } - - if ed25519.Verify(pubkey, append(signeddata, wire...), sigbuf) { - return nil - } - return ErrSig - default: return ErrAlg } @@ -542,20 +505,20 @@ func (k *DNSKEY) publicKeyRSA() *rsa.PublicKey { explen = uint16(keybuf[1])<<8 | uint16(keybuf[2]) keyoff = 3 } - if explen > 4 { - // Larger exponent than supported by the crypto package. - return nil - } pubkey := new(rsa.PublicKey) pubkey.N = big.NewInt(0) + shift := uint64((explen - 1) * 8) expo := uint64(0) - for i := 0; i < int(explen); i++ { - expo <<= 8 - expo |= uint64(keybuf[keyoff+i]) - } - if expo > 1<<31-1 { - // Larger exponent than supported by the crypto package. + for i := int(explen - 1); i > 0; i-- { + expo += uint64(keybuf[keyoff+i]) << shift + shift -= 8 + } + // Remainder + expo += uint64(keybuf[keyoff]) + if expo > 2<<31 { + // Larger expo than supported. + // println("dns: F5 primes (or larger) are not supported") return nil } pubkey.E = int(expo) @@ -616,17 +579,6 @@ func (k *DNSKEY) publicKeyDSA() *dsa.PublicKey { return pubkey } -func (k *DNSKEY) publicKeyED25519() ed25519.PublicKey { - keybuf, err := fromBase64([]byte(k.PublicKey)) - if err != nil { - return nil - } - if len(keybuf) != ed25519.PublicKeySize { - return nil - } - return keybuf -} - type wireSlice [][]byte func (p wireSlice) Len() int { return len(p) } @@ -664,10 +616,6 @@ func rawSignatureData(rrset []RR, s *RRSIG) (buf []byte, err error) { switch x := r1.(type) { case *NS: x.Ns = strings.ToLower(x.Ns) - case *MD: - x.Md = strings.ToLower(x.Md) - case *MF: - x.Mf = strings.ToLower(x.Mf) case *CNAME: x.Target = strings.ToLower(x.Target) case *SOA: @@ -686,18 +634,6 @@ func rawSignatureData(rrset []RR, s *RRSIG) (buf []byte, err error) { x.Email = strings.ToLower(x.Email) case *MX: x.Mx = strings.ToLower(x.Mx) - case *RP: - x.Mbox = strings.ToLower(x.Mbox) - x.Txt = strings.ToLower(x.Txt) - case *AFSDB: - x.Hostname = strings.ToLower(x.Hostname) - case *RT: - x.Host = strings.ToLower(x.Host) - case *SIG: - x.SignerName = strings.ToLower(x.SignerName) - case *PX: - x.Map822 = strings.ToLower(x.Map822) - x.Mapx400 = strings.ToLower(x.Mapx400) case *NAPTR: x.Replacement = strings.ToLower(x.Replacement) case *KX: diff --git a/vendor/github.com/miekg/dns/dnssec_keygen.go b/vendor/github.com/miekg/dns/dnssec_keygen.go index 33e913ac527d..229a079370b7 100644 --- a/vendor/github.com/miekg/dns/dnssec_keygen.go +++ b/vendor/github.com/miekg/dns/dnssec_keygen.go @@ -8,8 +8,6 @@ import ( "crypto/rand" "crypto/rsa" "math/big" - - "golang.org/x/crypto/ed25519" ) // Generate generates a DNSKEY of the given bit size. @@ -40,10 +38,6 @@ func (k *DNSKEY) Generate(bits int) (crypto.PrivateKey, error) { if bits != 384 { return nil, ErrKeySize } - case ED25519: - if bits != 256 { - return nil, ErrKeySize - } } switch k.Algorithm { @@ -81,13 +75,6 @@ func (k *DNSKEY) Generate(bits int) (crypto.PrivateKey, error) { } k.setPublicKeyECDSA(priv.PublicKey.X, priv.PublicKey.Y) return priv, nil - case ED25519: - pub, priv, err := ed25519.GenerateKey(rand.Reader) - if err != nil { - return nil, err - } - k.setPublicKeyED25519(pub) - return priv, nil default: return nil, ErrAlg } @@ -130,30 +117,21 @@ func (k *DNSKEY) setPublicKeyDSA(_Q, _P, _G, _Y *big.Int) bool { return true } -// Set the public key for Ed25519 -func (k *DNSKEY) setPublicKeyED25519(_K ed25519.PublicKey) bool { - if _K == nil { - return false - } - k.PublicKey = toBase64(_K) - return true -} - // Set the public key (the values E and N) for RSA // RFC 3110: Section 2. RSA Public KEY Resource Records func exponentToBuf(_E int) []byte { var buf []byte - i := big.NewInt(int64(_E)).Bytes() - if len(i) < 256 { - buf = make([]byte, 1, 1+len(i)) - buf[0] = uint8(len(i)) + i := big.NewInt(int64(_E)) + if len(i.Bytes()) < 256 { + buf = make([]byte, 1) + buf[0] = uint8(len(i.Bytes())) } else { - buf = make([]byte, 3, 3+len(i)) + buf = make([]byte, 3) buf[0] = 0 - buf[1] = uint8(len(i) >> 8) - buf[2] = uint8(len(i)) + buf[1] = uint8(len(i.Bytes()) >> 8) + buf[2] = uint8(len(i.Bytes())) } - buf = append(buf, i...) + buf = append(buf, i.Bytes()...) return buf } diff --git a/vendor/github.com/miekg/dns/dnssec_keyscan.go b/vendor/github.com/miekg/dns/dnssec_keyscan.go index e2d9d8f924f3..c0b54dc7640a 100644 --- a/vendor/github.com/miekg/dns/dnssec_keyscan.go +++ b/vendor/github.com/miekg/dns/dnssec_keyscan.go @@ -1,7 +1,6 @@ package dns import ( - "bytes" "crypto" "crypto/dsa" "crypto/ecdsa" @@ -10,14 +9,12 @@ import ( "math/big" "strconv" "strings" - - "golang.org/x/crypto/ed25519" ) // NewPrivateKey returns a PrivateKey by parsing the string s. // s should be in the same form of the BIND private key files. func (k *DNSKEY) NewPrivateKey(s string) (crypto.PrivateKey, error) { - if s == "" || s[len(s)-1] != '\n' { // We need a closing newline + if s[len(s)-1] != '\n' { // We need a closing newline return k.ReadPrivateKey(strings.NewReader(s+"\n"), "") } return k.ReadPrivateKey(strings.NewReader(s), "") @@ -39,7 +36,7 @@ func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (crypto.PrivateKey, er return nil, ErrPrivKey } // TODO(mg): check if the pubkey matches the private key - algo, err := strconv.ParseUint(strings.SplitN(m["algorithm"], " ", 2)[0], 10, 8) + algo, err := strconv.Atoi(strings.SplitN(m["algorithm"], " ", 2)[0]) if err != nil { return nil, ErrPrivKey } @@ -89,8 +86,6 @@ func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (crypto.PrivateKey, er } priv.PublicKey = *pub return priv, nil - case ED25519: - return readPrivateKeyED25519(m) default: return nil, ErrPrivKey } @@ -171,56 +166,13 @@ func readPrivateKeyECDSA(m map[string]string) (*ecdsa.PrivateKey, error) { return p, nil } -func readPrivateKeyED25519(m map[string]string) (ed25519.PrivateKey, error) { - var p ed25519.PrivateKey - // TODO: validate that the required flags are present - for k, v := range m { - switch k { - case "privatekey": - p1, err := fromBase64([]byte(v)) - if err != nil { - return nil, err - } - if len(p1) != 32 { - return nil, ErrPrivKey - } - // RFC 8080 and Golang's x/crypto/ed25519 differ as to how the - // private keys are represented. RFC 8080 specifies that private - // keys be stored solely as the seed value (p1 above) while the - // ed25519 package represents them as the seed value concatenated - // to the public key, which is derived from the seed value. - // - // ed25519.GenerateKey reads exactly 32 bytes from the passed in - // io.Reader and uses them as the seed. It also derives the - // public key and produces a compatible private key. - _, p, err = ed25519.GenerateKey(bytes.NewReader(p1)) - if err != nil { - return nil, err - } - case "created", "publish", "activate": - /* not used in Go (yet) */ - } - } - return p, nil -} - // parseKey reads a private key from r. It returns a map[string]string, // with the key-value pairs, or an error when the file is not correct. func parseKey(r io.Reader, file string) (map[string]string, error) { - s, cancel := scanInit(r) + s := scanInit(r) m := make(map[string]string) c := make(chan lex) k := "" - defer func() { - cancel() - // zlexer can send up to two tokens, the next one and possibly 1 remainders. - // Do a non-blocking read. - _, ok := <-c - _, ok = <-c - if !ok { - // too bad - } - }() // Start the lexer go klexer(s, c) for l := range c { diff --git a/vendor/github.com/miekg/dns/dnssec_privkey.go b/vendor/github.com/miekg/dns/dnssec_privkey.go index 46f3215c8fb4..56f3ea934f63 100644 --- a/vendor/github.com/miekg/dns/dnssec_privkey.go +++ b/vendor/github.com/miekg/dns/dnssec_privkey.go @@ -7,8 +7,6 @@ import ( "crypto/rsa" "math/big" "strconv" - - "golang.org/x/crypto/ed25519" ) const format = "Private-key-format: v1.3\n" @@ -81,12 +79,6 @@ func (r *DNSKEY) PrivateKeyString(p crypto.PrivateKey) string { "Private_value(x): " + priv + "\n" + "Public_value(y): " + pub + "\n" - case ed25519.PrivateKey: - private := toBase64(p[:32]) - return format + - "Algorithm: " + algorithm + "\n" + - "PrivateKey: " + private + "\n" - default: return "" } diff --git a/vendor/github.com/miekg/dns/dnssec_test.go b/vendor/github.com/miekg/dns/dnssec_test.go index 2dea4e50710f..ca085ed3b889 100644 --- a/vendor/github.com/miekg/dns/dnssec_test.go +++ b/vendor/github.com/miekg/dns/dnssec_test.go @@ -8,8 +8,6 @@ import ( "strings" "testing" "time" - - "golang.org/x/crypto/ed25519" ) func getKey() *DNSKEY { @@ -37,6 +35,57 @@ func getSoa() *SOA { return soa } +func TestGenerateEC(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + key := new(DNSKEY) + key.Hdr.Rrtype = TypeDNSKEY + key.Hdr.Name = "miek.nl." + key.Hdr.Class = ClassINET + key.Hdr.Ttl = 14400 + key.Flags = 256 + key.Protocol = 3 + key.Algorithm = ECDSAP256SHA256 + privkey, _ := key.Generate(256) + t.Log(key.String()) + t.Log(key.PrivateKeyString(privkey)) +} + +func TestGenerateDSA(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + key := new(DNSKEY) + key.Hdr.Rrtype = TypeDNSKEY + key.Hdr.Name = "miek.nl." + key.Hdr.Class = ClassINET + key.Hdr.Ttl = 14400 + key.Flags = 256 + key.Protocol = 3 + key.Algorithm = DSA + privkey, _ := key.Generate(1024) + t.Log(key.String()) + t.Log(key.PrivateKeyString(privkey)) +} + +func TestGenerateRSA(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + key := new(DNSKEY) + key.Hdr.Rrtype = TypeDNSKEY + key.Hdr.Name = "miek.nl." + key.Hdr.Class = ClassINET + key.Hdr.Ttl = 14400 + key.Flags = 256 + key.Protocol = 3 + key.Algorithm = RSASHA256 + privkey, _ := key.Generate(1024) + t.Log(key.String()) + t.Log(key.PrivateKeyString(privkey)) +} + func TestSecure(t *testing.T) { soa := getSoa() @@ -162,9 +211,10 @@ func TestSignVerify(t *testing.T) { continue } if err := sig.Verify(key, []RR{r}); err != nil { - t.Errorf("failure to validate: %s", r.Header().Name) + t.Error("failure to validate") continue } + t.Logf("validated: %s", r.Header().Name) } } @@ -198,7 +248,9 @@ func Test65534(t *testing.T) { } if err := sig.Verify(key, []RR{t6}); err != nil { t.Error(err) - t.Errorf("failure to validate %s", t6.Header().Name) + t.Error("failure to validate") + } else { + t.Logf("validated: %s", t6.Header().Name) } } @@ -329,7 +381,7 @@ Created: 20110302104537 Publish: 20110302104537 Activate: 20110302104537` - xk := testRR(pub) + xk, _ := NewRR(pub) k := xk.(*DNSKEY) p, err := k.NewPrivateKey(priv) if err != nil { @@ -380,7 +432,10 @@ func TestSignVerifyECDSA(t *testing.T) { Algorithm: 14 (ECDSAP384SHA384) PrivateKey: WURgWHCcYIYUPWgeLmiPY2DJJk02vgrmTfitxgqcL4vwW7BOrbawVmVe0d9V94SR` - eckey := testRR(pub) + eckey, err := NewRR(pub) + if err != nil { + t.Fatal(err) + } privkey, err := eckey.(*DNSKEY).NewPrivateKey(priv) if err != nil { t.Fatal(err) @@ -393,7 +448,7 @@ PrivateKey: WURgWHCcYIYUPWgeLmiPY2DJJk02vgrmTfitxgqcL4vwW7BOrbawVmVe0d9V94SR` if ds.Digest != "72d7b62976ce06438e9c0bf319013cf801f09ecc84b8d7e9495f27e305c6a9b0563a9b5f4d288405c3008a946df983d6" { t.Fatal("wrong DS Digest") } - a := testRR("www.example.net. 3600 IN A 192.0.2.1") + a, _ := NewRR("www.example.net. 3600 IN A 192.0.2.1") sig := new(RRSIG) sig.Hdr = RR_Header{"example.net.", TypeRRSIG, ClassINET, 14400, 0} sig.Expiration, _ = StringToTime("20100909102025") @@ -418,52 +473,6 @@ PrivateKey: WURgWHCcYIYUPWgeLmiPY2DJJk02vgrmTfitxgqcL4vwW7BOrbawVmVe0d9V94SR` } func TestSignVerifyECDSA2(t *testing.T) { - srv1 := testRR("srv.miek.nl. IN SRV 1000 800 0 web1.miek.nl.") - srv := srv1.(*SRV) - - // With this key - key := new(DNSKEY) - key.Hdr.Rrtype = TypeDNSKEY - key.Hdr.Name = "miek.nl." - key.Hdr.Class = ClassINET - key.Hdr.Ttl = 14400 - key.Flags = 256 - key.Protocol = 3 - key.Algorithm = ECDSAP256SHA256 - privkey, err := key.Generate(256) - if err != nil { - t.Fatal("failure to generate key") - } - - // Fill in the values of the Sig, before signing - sig := new(RRSIG) - sig.Hdr = RR_Header{"miek.nl.", TypeRRSIG, ClassINET, 14400, 0} - sig.TypeCovered = srv.Hdr.Rrtype - sig.Labels = uint8(CountLabel(srv.Hdr.Name)) // works for all 3 - sig.OrigTtl = srv.Hdr.Ttl - sig.Expiration = 1296534305 // date -u '+%s' -d"2011-02-01 04:25:05" - sig.Inception = 1293942305 // date -u '+%s' -d"2011-01-02 04:25:05" - sig.KeyTag = key.KeyTag() // Get the keyfrom the Key - sig.SignerName = key.Hdr.Name - sig.Algorithm = ECDSAP256SHA256 - - if sig.Sign(privkey.(*ecdsa.PrivateKey), []RR{srv}) != nil { - t.Fatal("failure to sign the record") - } - - err = sig.Verify(key, []RR{srv}) - if err != nil { - t.Errorf("failure to validate:\n%s\n%s\n%s\n\n%s\n\n%v", - key.String(), - srv.String(), - sig.String(), - key.PrivateKeyString(privkey), - err, - ) - } -} - -func TestSignVerifyEd25519(t *testing.T) { srv1, err := NewRR("srv.miek.nl. IN SRV 1000 800 0 web1.miek.nl.") if err != nil { t.Fatal(err) @@ -478,7 +487,7 @@ func TestSignVerifyEd25519(t *testing.T) { key.Hdr.Ttl = 14400 key.Flags = 256 key.Protocol = 3 - key.Algorithm = ED25519 + key.Algorithm = ECDSAP256SHA256 privkey, err := key.Generate(256) if err != nil { t.Fatal("failure to generate key") @@ -494,9 +503,9 @@ func TestSignVerifyEd25519(t *testing.T) { sig.Inception = 1293942305 // date -u '+%s' -d"2011-01-02 04:25:05" sig.KeyTag = key.KeyTag() // Get the keyfrom the Key sig.SignerName = key.Hdr.Name - sig.Algorithm = ED25519 + sig.Algorithm = ECDSAP256SHA256 - if sig.Sign(privkey.(ed25519.PrivateKey), []RR{srv}) != nil { + if sig.Sign(privkey.(*ecdsa.PrivateKey), []RR{srv}) != nil { t.Fatal("failure to sign the record") } @@ -521,7 +530,10 @@ func TestRFC6605P256(t *testing.T) { exPriv := `Private-key-format: v1.2 Algorithm: 13 (ECDSAP256SHA256) PrivateKey: GU6SnQ/Ou+xC5RumuIUIuJZteXT2z0O/ok1s38Et6mQ=` - rrDNSKEY := testRR(exDNSKEY) + rrDNSKEY, err := NewRR(exDNSKEY) + if err != nil { + t.Fatal(err) + } priv, err := rrDNSKEY.(*DNSKEY).NewPrivateKey(exPriv) if err != nil { t.Fatal(err) @@ -530,7 +542,10 @@ PrivateKey: GU6SnQ/Ou+xC5RumuIUIuJZteXT2z0O/ok1s38Et6mQ=` exDS := `example.net. 3600 IN DS 55648 13 2 ( b4c8c1fe2e7477127b27115656ad6256f424625bf5c1 e2770ce6d6e37df61d17 )` - rrDS := testRR(exDS) + rrDS, err := NewRR(exDS) + if err != nil { + t.Fatal(err) + } ourDS := rrDNSKEY.(*DNSKEY).ToDS(SHA256) if !reflect.DeepEqual(ourDS, rrDS.(*DS)) { t.Errorf("DS record differs:\n%v\n%v", ourDS, rrDS.(*DS)) @@ -541,9 +556,15 @@ PrivateKey: GU6SnQ/Ou+xC5RumuIUIuJZteXT2z0O/ok1s38Et6mQ=` 20100909100439 20100812100439 55648 example.net. qx6wLYqmh+l9oCKTN6qIc+bw6ya+KJ8oMz0YP107epXA yGmt+3SNruPFKG7tZoLBLlUzGGus7ZwmwWep666VCw== )` - rrA := testRR(exA) - rrRRSIG := testRR(exRRSIG) - if err := rrRRSIG.(*RRSIG).Verify(rrDNSKEY.(*DNSKEY), []RR{rrA}); err != nil { + rrA, err := NewRR(exA) + if err != nil { + t.Fatal(err) + } + rrRRSIG, err := NewRR(exRRSIG) + if err != nil { + t.Fatal(err) + } + if err = rrRRSIG.(*RRSIG).Verify(rrDNSKEY.(*DNSKEY), []RR{rrA}); err != nil { t.Errorf("failure to validate the spec RRSIG: %v", err) } @@ -583,7 +604,10 @@ func TestRFC6605P384(t *testing.T) { exPriv := `Private-key-format: v1.2 Algorithm: 14 (ECDSAP384SHA384) PrivateKey: WURgWHCcYIYUPWgeLmiPY2DJJk02vgrmTfitxgqcL4vwW7BOrbawVmVe0d9V94SR` - rrDNSKEY := testRR(exDNSKEY) + rrDNSKEY, err := NewRR(exDNSKEY) + if err != nil { + t.Fatal(err) + } priv, err := rrDNSKEY.(*DNSKEY).NewPrivateKey(exPriv) if err != nil { t.Fatal(err) @@ -593,7 +617,10 @@ PrivateKey: WURgWHCcYIYUPWgeLmiPY2DJJk02vgrmTfitxgqcL4vwW7BOrbawVmVe0d9V94SR` 72d7b62976ce06438e9c0bf319013cf801f09ecc84b8 d7e9495f27e305c6a9b0563a9b5f4d288405c3008a94 6df983d6 )` - rrDS := testRR(exDS) + rrDS, err := NewRR(exDS) + if err != nil { + t.Fatal(err) + } ourDS := rrDNSKEY.(*DNSKEY).ToDS(SHA384) if !reflect.DeepEqual(ourDS, rrDS.(*DS)) { t.Fatalf("DS record differs:\n%v\n%v", ourDS, rrDS.(*DS)) @@ -605,8 +632,11 @@ PrivateKey: WURgWHCcYIYUPWgeLmiPY2DJJk02vgrmTfitxgqcL4vwW7BOrbawVmVe0d9V94SR` /L5hDKIvGDyI1fcARX3z65qrmPsVz73QD1Mr5CEqOiLP 95hxQouuroGCeZOvzFaxsT8Glr74hbavRKayJNuydCuz WTSSPdz7wnqXL5bdcJzusdnI0RSMROxxwGipWcJm )` - rrA := testRR(exA) - rrRRSIG := testRR(exRRSIG) + rrA, err := NewRR(exA) + if err != nil { + t.Fatal(err) + } + rrRRSIG, err := NewRR(exRRSIG) if err != nil { t.Fatal(err) } @@ -641,144 +671,6 @@ PrivateKey: WURgWHCcYIYUPWgeLmiPY2DJJk02vgrmTfitxgqcL4vwW7BOrbawVmVe0d9V94SR` } } -// rfc8080 6.1 -func TestRFC8080Ed25519Example1(t *testing.T) { - exDNSKEY := `example.com. 3600 IN DNSKEY 257 3 15 ( - l02Woi0iS8Aa25FQkUd9RMzZHJpBoRQwAQEX1SxZJA4= )` - exPriv := `Private-key-format: v1.2 -Algorithm: 15 (ED25519) -PrivateKey: ODIyNjAzODQ2MjgwODAxMjI2NDUxOTAyMDQxNDIyNjI=` - rrDNSKEY, err := NewRR(exDNSKEY) - if err != nil { - t.Fatal(err) - } - priv, err := rrDNSKEY.(*DNSKEY).NewPrivateKey(exPriv) - if err != nil { - t.Fatal(err) - } - - exDS := `example.com. 3600 IN DS 3613 15 2 ( - 3aa5ab37efce57f737fc1627013fee07bdf241bd10f3b1964ab55c78e79 - a304b )` - rrDS, err := NewRR(exDS) - if err != nil { - t.Fatal(err) - } - ourDS := rrDNSKEY.(*DNSKEY).ToDS(SHA256) - if !reflect.DeepEqual(ourDS, rrDS.(*DS)) { - t.Fatalf("DS record differs:\n%v\n%v", ourDS, rrDS.(*DS)) - } - - exMX := `example.com. 3600 IN MX 10 mail.example.com.` - exRRSIG := `example.com. 3600 IN RRSIG MX 15 2 3600 ( - 1440021600 1438207200 3613 example.com. ( - oL9krJun7xfBOIWcGHi7mag5/hdZrKWw15jPGrHpjQeRAvTdszaPD+QLs3f - x8A4M3e23mRZ9VrbpMngwcrqNAg== ) )` - rrMX, err := NewRR(exMX) - if err != nil { - t.Fatal(err) - } - rrRRSIG, err := NewRR(exRRSIG) - if err != nil { - t.Fatal(err) - } - if err = rrRRSIG.(*RRSIG).Verify(rrDNSKEY.(*DNSKEY), []RR{rrMX}); err != nil { - t.Errorf("failure to validate the spec RRSIG: %v", err) - } - - ourRRSIG := &RRSIG{ - Hdr: RR_Header{ - Ttl: rrMX.Header().Ttl, - }, - KeyTag: rrDNSKEY.(*DNSKEY).KeyTag(), - SignerName: rrDNSKEY.(*DNSKEY).Hdr.Name, - Algorithm: rrDNSKEY.(*DNSKEY).Algorithm, - } - ourRRSIG.Expiration, _ = StringToTime("20150819220000") - ourRRSIG.Inception, _ = StringToTime("20150729220000") - err = ourRRSIG.Sign(priv.(ed25519.PrivateKey), []RR{rrMX}) - if err != nil { - t.Fatal(err) - } - - if err = ourRRSIG.Verify(rrDNSKEY.(*DNSKEY), []RR{rrMX}); err != nil { - t.Errorf("failure to validate our RRSIG: %v", err) - } - - if !reflect.DeepEqual(ourRRSIG, rrRRSIG.(*RRSIG)) { - t.Fatalf("RRSIG record differs:\n%v\n%v", ourRRSIG, rrRRSIG.(*RRSIG)) - } -} - -// rfc8080 6.1 -func TestRFC8080Ed25519Example2(t *testing.T) { - exDNSKEY := `example.com. 3600 IN DNSKEY 257 3 15 ( - zPnZ/QwEe7S8C5SPz2OfS5RR40ATk2/rYnE9xHIEijs= )` - exPriv := `Private-key-format: v1.2 -Algorithm: 15 (ED25519) -PrivateKey: DSSF3o0s0f+ElWzj9E/Osxw8hLpk55chkmx0LYN5WiY=` - rrDNSKEY, err := NewRR(exDNSKEY) - if err != nil { - t.Fatal(err) - } - priv, err := rrDNSKEY.(*DNSKEY).NewPrivateKey(exPriv) - if err != nil { - t.Fatal(err) - } - - exDS := `example.com. 3600 IN DS 35217 15 2 ( - 401781b934e392de492ec77ae2e15d70f6575a1c0bc59c5275c04ebe80c - 6614c )` - rrDS, err := NewRR(exDS) - if err != nil { - t.Fatal(err) - } - ourDS := rrDNSKEY.(*DNSKEY).ToDS(SHA256) - if !reflect.DeepEqual(ourDS, rrDS.(*DS)) { - t.Fatalf("DS record differs:\n%v\n%v", ourDS, rrDS.(*DS)) - } - - exMX := `example.com. 3600 IN MX 10 mail.example.com.` - exRRSIG := `example.com. 3600 IN RRSIG MX 15 2 3600 ( - 1440021600 1438207200 35217 example.com. ( - zXQ0bkYgQTEFyfLyi9QoiY6D8ZdYo4wyUhVioYZXFdT410QPRITQSqJSnzQ - oSm5poJ7gD7AQR0O7KuI5k2pcBg== ) )` - rrMX, err := NewRR(exMX) - if err != nil { - t.Fatal(err) - } - rrRRSIG, err := NewRR(exRRSIG) - if err != nil { - t.Fatal(err) - } - if err = rrRRSIG.(*RRSIG).Verify(rrDNSKEY.(*DNSKEY), []RR{rrMX}); err != nil { - t.Errorf("failure to validate the spec RRSIG: %v", err) - } - - ourRRSIG := &RRSIG{ - Hdr: RR_Header{ - Ttl: rrMX.Header().Ttl, - }, - KeyTag: rrDNSKEY.(*DNSKEY).KeyTag(), - SignerName: rrDNSKEY.(*DNSKEY).Hdr.Name, - Algorithm: rrDNSKEY.(*DNSKEY).Algorithm, - } - ourRRSIG.Expiration, _ = StringToTime("20150819220000") - ourRRSIG.Inception, _ = StringToTime("20150729220000") - err = ourRRSIG.Sign(priv.(ed25519.PrivateKey), []RR{rrMX}) - if err != nil { - t.Fatal(err) - } - - if err = ourRRSIG.Verify(rrDNSKEY.(*DNSKEY), []RR{rrMX}); err != nil { - t.Errorf("failure to validate our RRSIG: %v", err) - } - - if !reflect.DeepEqual(ourRRSIG, rrRRSIG.(*RRSIG)) { - t.Fatalf("RRSIG record differs:\n%v\n%v", ourRRSIG, rrRRSIG.(*RRSIG)) - } -} - func TestInvalidRRSet(t *testing.T) { goodRecords := make([]RR, 2) goodRecords[0] = &TXT{Hdr: RR_Header{Name: "name.cloudflare.com.", Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello world"}} @@ -839,22 +731,3 @@ func TestInvalidRRSet(t *testing.T) { t.Fatal("Verification did not return ErrRRset with inconsistent records") } } - -// Issue #688 - RSA exponent unpacked in reverse -func TestRsaExponentUnpack(t *testing.T) { - zskRrDnskey, _ := NewRR("isc.org. 7200 IN DNSKEY 256 3 5 AwEAAcdkaRUlsRD4gcF63PpPJJ1E6kOIb3yn/UHptVsPEQtEbgJ2y20O eix4unpwoQkz+bIAd2rrOU/95wgV530x0/qqKwBLWoGkxdcnNcvVT4hl 3SOTZy1VjwkAfyayHPU8VisXqJGbB3KWevBZlb6AtrXzFu8AHuBeeAAe /fOgreCh") - kskRrDnskey, _ := NewRR("isc.org. 7200 IN DNSKEY 257 3 5 BEAAAAOhHQDBrhQbtphgq2wQUpEQ5t4DtUHxoMVFu2hWLDMvoOMRXjGr hhCeFvAZih7yJHf8ZGfW6hd38hXG/xylYCO6Krpbdojwx8YMXLA5/kA+ u50WIL8ZR1R6KTbsYVMf/Qx5RiNbPClw+vT+U8eXEJmO20jIS1ULgqy3 47cBB1zMnnz/4LJpA0da9CbKj3A254T515sNIMcwsB8/2+2E63/zZrQz Bkj0BrN/9Bexjpiks3jRhZatEsXn3dTy47R09Uix5WcJt+xzqZ7+ysyL KOOedS39Z7SDmsn2eA0FKtQpwA6LXeG2w+jxmw3oA8lVUgEf/rzeC/bB yBNsO70aEFTd") - kskRrRrsig, _ := NewRR("isc.org. 7200 IN RRSIG DNSKEY 5 2 7200 20180627230244 20180528230244 12892 isc.org. ebKBlhYi1hPGTdPg6zSwvprOIkoFMs+WIhMSjoYW6/K5CS9lDDFdK4cu TgXJRT3etrltTuJiFe2HRpp+7t5cKLy+CeJZVzqrCz200MoHiFuLI9yI DJQGaS5YYCiFbw5+jUGU6aUhZ7Y5/YufeqATkRZzdrKwgK+zri8LPw9T WLoVJPAOW7GR0dgxl9WKmO7Fzi9P8BZR3NuwLV7329X94j+4zyswaw7q e5vif0ybzFveODLsEi/E0a2rTXc4QzzyM0fSVxRkVQyQ7ifIPP4ohnnT d5qpPUbE8xxBzTdWR/TaKADC5aCFkppG9lVAq5CPfClii2949X5RYzy1 rxhuSA==") - zskRrRrsig, _ := NewRR("isc.org. 7200 IN RRSIG DNSKEY 5 2 7200 20180627230244 20180528230244 19923 isc.org. RgCfzUeq4RJPGoe9RRB6cWf6d/Du+tHK5SxI5QL1waA3O5qVtQKFkY1C dq/yyVjwzfjD9F62TObujOaktv8X80ZMcNPmgHbvK1xOqelMBWv5hxj3 xRe+QQObLZ5NPfHFsphQKXvwgO5Sjk8py2B2iCr3BHCZ8S38oIfuSrQx sn8=") - - zsk, ksk := zskRrDnskey.(*DNSKEY), kskRrDnskey.(*DNSKEY) - zskSig, kskSig := zskRrRrsig.(*RRSIG), kskRrRrsig.(*RRSIG) - - if e := zskSig.Verify(zsk, []RR{zsk, ksk}); e != nil { - t.Fatalf("cannot verify RRSIG with keytag [%d]. Cause [%s]", zsk.KeyTag(), e.Error()) - } - - if e := kskSig.Verify(ksk, []RR{zsk, ksk}); e != nil { - t.Fatalf("cannot verify RRSIG with keytag [%d]. Cause [%s]", ksk.KeyTag(), e.Error()) - } -} diff --git a/vendor/github.com/miekg/dns/dnsutil/util.go b/vendor/github.com/miekg/dns/dnsutil/util.go index 76ac4de66f6f..9ed03f2969c0 100644 --- a/vendor/github.com/miekg/dns/dnsutil/util.go +++ b/vendor/github.com/miekg/dns/dnsutil/util.go @@ -11,7 +11,7 @@ import ( "github.com/miekg/dns" ) -// AddOrigin adds origin to s if s is not already a FQDN. +// AddDomain adds origin to s if s is not already a FQDN. // Note that the result may not be a FQDN. If origin does not end // with a ".", the result won't either. // This implements the zonefile convention (specified in RFC 1035, @@ -20,9 +20,7 @@ import ( func AddOrigin(s, origin string) string { // ("foo.", "origin.") -> "foo." (already a FQDN) // ("foo", "origin.") -> "foo.origin." - // ("foo", "origin") -> "foo.origin" - // ("foo", ".") -> "foo." (Same as dns.Fqdn()) - // ("foo.", ".") -> "foo." (Same as dns.Fqdn()) + // ("foo"), "origin" -> "foo.origin" // ("@", "origin.") -> "origin." (@ represents the apex (bare) domain) // ("", "origin.") -> "origin." (not obvious) // ("foo", "") -> "foo" (not obvious) @@ -36,34 +34,32 @@ func AddOrigin(s, origin string) string { if s == "@" || len(s) == 0 { return origin // Expand apex. } + if origin == "." { - return dns.Fqdn(s) + return s + origin // AddOrigin(s, ".") is an expensive way to add a ".". } return s + "." + origin // The simple case. } // TrimDomainName trims origin from s if s is a subdomain. -// This function will never return "", but returns "@" instead (@ represents the apex domain). +// This function will never return "", but returns "@" instead (@ represents the apex (bare) domain). func TrimDomainName(s, origin string) string { // An apex (bare) domain is always returned as "@". // If the return value ends in a ".", the domain was not the suffix. // origin can end in "." or not. Either way the results should be the same. if len(s) == 0 { - return "@" + return "@" // Return the apex (@) rather than "". } // Someone is using TrimDomainName(s, ".") to remove a dot if it exists. if origin == "." { return strings.TrimSuffix(s, origin) } - original := s - s = dns.Fqdn(s) - origin = dns.Fqdn(origin) - + // Dude, you aren't even if the right subdomain! if !dns.IsSubDomain(origin, s) { - return original + return s } slabels := dns.Split(s) diff --git a/vendor/github.com/miekg/dns/dnsutil/util_test.go b/vendor/github.com/miekg/dns/dnsutil/util_test.go index 6754789bc6ee..0f1ecec8e08a 100644 --- a/vendor/github.com/miekg/dns/dnsutil/util_test.go +++ b/vendor/github.com/miekg/dns/dnsutil/util_test.go @@ -10,8 +10,6 @@ func TestAddOrigin(t *testing.T) { {"@", "example.com.", "example.com."}, {"foo", "example.com.", "foo.example.com."}, {"foo.", "example.com.", "foo."}, - {"example.com", ".", "example.com."}, - {"example.com.", ".", "example.com."}, // Oddball tests: // In general origin should not be "" or "." but at least // these tests verify we don't crash and will keep results @@ -28,15 +26,16 @@ func TestAddOrigin(t *testing.T) { for _, test := range tests { actual := AddOrigin(test.e1, test.e2) if test.expected != actual { - t.Errorf("AddOrigin(%#v, %#v) expected %#v, got %#v\n", test.e1, test.e2, test.expected, actual) + t.Errorf("AddOrigin(%#v, %#v) expected %#v, go %#v\n", test.e1, test.e2, test.expected, actual) } } } func TestTrimDomainName(t *testing.T) { + // Basic tests. // Try trimming "example.com" and "example.com." from typical use cases. - testsEx := []struct{ experiment, expected string }{ + var tests_examplecom = []struct{ experiment, expected string }{ {"foo.example.com", "foo"}, {"foo.example.com.", "foo"}, {".foo.example.com", ".foo"}, @@ -52,10 +51,10 @@ func TestTrimDomainName(t *testing.T) { {".foo.ronco.com.", ".foo.ronco.com."}, } for _, dom := range []string{"example.com", "example.com."} { - for i, test := range testsEx { + for i, test := range tests_examplecom { actual := TrimDomainName(test.experiment, dom) if test.expected != actual { - t.Errorf("%d TrimDomainName(%#v, %#v): expected %v, got %v\n", i, test.experiment, dom, test.expected, actual) + t.Errorf("%d TrimDomainName(%#v, %#v): expected (%v) got (%v)\n", i, test.experiment, dom, test.expected, actual) } } } @@ -64,7 +63,7 @@ func TestTrimDomainName(t *testing.T) { // These test shouldn't be needed but I was weary of off-by-one errors. // In theory, these can't happen because there are no single-letter TLDs, // but it is good to exercize the code this way. - tests := []struct{ experiment, expected string }{ + var tests = []struct{ experiment, expected string }{ {"", "@"}, {".", "."}, {"a.b.c.d.e.f.", "a.b.c.d.e"}, @@ -106,7 +105,7 @@ func TestTrimDomainName(t *testing.T) { for i, test := range tests { actual := TrimDomainName(test.experiment, dom) if test.expected != actual { - t.Errorf("%d TrimDomainName(%#v, %#v): expected %v, got %v\n", i, test.experiment, dom, test.expected, actual) + t.Errorf("%d TrimDomainName(%#v, %#v): expected (%v) got (%v)\n", i, test.experiment, dom, test.expected, actual) } } } @@ -115,16 +114,17 @@ func TestTrimDomainName(t *testing.T) { // These test cases provide both origin, s, and the expected result. // If you find a bug in the while, this is probably the easiest place // to add it as a test case. - var testsWild = []struct{ e1, e2, expected string }{ + var tests_wild = []struct{ e1, e2, expected string }{ {"mathoverflow.net.", ".", "mathoverflow.net"}, {"mathoverflow.net", ".", "mathoverflow.net"}, {"", ".", "@"}, {"@", ".", "@"}, } - for i, test := range testsWild { + for i, test := range tests_wild { actual := TrimDomainName(test.e1, test.e2) if test.expected != actual { - t.Errorf("%d TrimDomainName(%#v, %#v): expected %v, got %v\n", i, test.e1, test.e2, test.expected, actual) + t.Errorf("%d TrimDomainName(%#v, %#v): expected (%v) got (%v)\n", i, test.e1, test.e2, test.expected, actual) } } + } diff --git a/vendor/github.com/miekg/dns/doc.go b/vendor/github.com/miekg/dns/doc.go index 0389d7248ef1..f3555e433992 100644 --- a/vendor/github.com/miekg/dns/doc.go +++ b/vendor/github.com/miekg/dns/doc.go @@ -1,7 +1,7 @@ /* Package dns implements a full featured interface to the Domain Name System. Server- and client-side programming is supported. -The package allows complete control over what is sent out to the DNS. The package +The package allows complete control over what is send out to the DNS. The package API follows the less-is-more principle, by presenting a small, clean interface. The package dns supports (asynchronous) querying/replying, incoming/outgoing zone transfers, @@ -14,7 +14,7 @@ Basic usage pattern for creating a new resource record: r := new(dns.MX) r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX, - Class: dns.ClassINET, Ttl: 3600} + Class: dns.ClassINET, Ttl: 3600} r.Preference = 10 r.Mx = "mx.miek.nl." @@ -22,16 +22,16 @@ Or directly from a string: mx, err := dns.NewRR("miek.nl. 3600 IN MX 10 mx.miek.nl.") -Or when the default origin (.) and TTL (3600) and class (IN) suit you: +Or when the default TTL (3600) and class (IN) suit you: - mx, err := dns.NewRR("miek.nl MX 10 mx.miek.nl") + mx, err := dns.NewRR("miek.nl. MX 10 mx.miek.nl.") Or even: mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek") In the DNS messages are exchanged, these messages contain resource -records (sets). Use pattern for creating a message: +records (sets). Use pattern for creating a message: m := new(dns.Msg) m.SetQuestion("miek.nl.", dns.TypeMX) @@ -51,7 +51,7 @@ The following is slightly more verbose, but more flexible: m1.Question = make([]dns.Question, 1) m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET} -After creating a message it can be sent. +After creating a message it can be send. Basic use pattern for synchronous querying the DNS at a server configured on 127.0.0.1 and port 53: @@ -63,23 +63,7 @@ class) is as easy as setting: c.SingleInflight = true -More advanced options are available using a net.Dialer and the corresponding API. -For example it is possible to set a timeout, or to specify a source IP address -and port to use for the connection: - - c := new(dns.Client) - laddr := net.UDPAddr{ - IP: net.ParseIP("[::1]"), - Port: 12345, - Zone: "", - } - c.Dialer := &net.Dialer{ - Timeout: 200 * time.Millisecond, - LocalAddr: &laddr, - } - in, rtt, err := c.Exchange(m1, "8.8.8.8:53") - -If these "advanced" features are not needed, a simple UDP query can be sent, +If these "advanced" features are not needed, a simple UDP query can be send, with: in, err := dns.Exchange(m1, "127.0.0.1:53") @@ -168,11 +152,6 @@ Basic use pattern when querying with a TSIG name "axfr." (note that these key na must be fully qualified - as they are domain names) and the base64 secret "so6ZGir4GPAqINNh9U5c3A==": -If an incoming message contains a TSIG record it MUST be the last record in -the additional section (RFC2845 3.2). This means that you should make the -call to SetTsig last, right before executing the query. If you make any -changes to the RRset after calling SetTsig() the signature will be incorrect. - c := new(dns.Client) c.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} m := new(dns.Msg) @@ -224,7 +203,7 @@ RFC 6895 sets aside a range of type codes for private use. This range is 65,280 - 65,534 (0xFF00 - 0xFFFE). When experimenting with new Resource Records these can be used, before requesting an official type code from IANA. -see http://miek.nl/2014/September/21/idn-and-private-rr-in-go-dns/ for more +see http://miek.nl/posts/2014/Sep/21/Private%20RRs%20and%20IDN%20in%20Go%20DNS/ for more information. EDNS0 diff --git a/vendor/github.com/miekg/dns/edns.go b/vendor/github.com/miekg/dns/edns.go index 55059eb14a5a..7a58aa9b17b6 100644 --- a/vendor/github.com/miekg/dns/edns.go +++ b/vendor/github.com/miekg/dns/edns.go @@ -4,27 +4,25 @@ import ( "encoding/binary" "encoding/hex" "errors" - "fmt" "net" "strconv" ) // EDNS0 Option codes. const ( - EDNS0LLQ = 0x1 // long lived queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01 - EDNS0UL = 0x2 // update lease draft: http://files.dns-sd.org/draft-sekar-dns-ul.txt - EDNS0NSID = 0x3 // nsid (See RFC 5001) - EDNS0DAU = 0x5 // DNSSEC Algorithm Understood - EDNS0DHU = 0x6 // DS Hash Understood - EDNS0N3U = 0x7 // NSEC3 Hash Understood - EDNS0SUBNET = 0x8 // client-subnet (See RFC 7871) - EDNS0EXPIRE = 0x9 // EDNS0 expire - EDNS0COOKIE = 0xa // EDNS0 Cookie - EDNS0TCPKEEPALIVE = 0xb // EDNS0 tcp keep alive (See RFC 7828) - EDNS0PADDING = 0xc // EDNS0 padding (See RFC 7830) - EDNS0LOCALSTART = 0xFDE9 // Beginning of range reserved for local/experimental use (See RFC 6891) - EDNS0LOCALEND = 0xFFFE // End of range reserved for local/experimental use (See RFC 6891) - _DO = 1 << 15 // DNSSEC OK + EDNS0LLQ = 0x1 // long lived queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01 + EDNS0UL = 0x2 // update lease draft: http://files.dns-sd.org/draft-sekar-dns-ul.txt + EDNS0NSID = 0x3 // nsid (RFC5001) + EDNS0DAU = 0x5 // DNSSEC Algorithm Understood + EDNS0DHU = 0x6 // DS Hash Understood + EDNS0N3U = 0x7 // NSEC3 Hash Understood + EDNS0SUBNET = 0x8 // client-subnet (RFC6891) + EDNS0EXPIRE = 0x9 // EDNS0 expire + EDNS0COOKIE = 0xa // EDNS0 Cookie + EDNS0SUBNETDRAFT = 0x50fa // Don't use! Use EDNS0SUBNET + EDNS0LOCALSTART = 0xFDE9 // Beginning of range reserved for local/experimental use (RFC6891) + EDNS0LOCALEND = 0xFFFE // End of range reserved for local/experimental use (RFC6891) + _DO = 1 << 15 // dnssec ok ) // OPT is the EDNS0 RR appended to messages to convey extra (meta) information. @@ -57,6 +55,9 @@ func (rr *OPT) String() string { } case *EDNS0_SUBNET: s += "\n; SUBNET: " + o.String() + if o.(*EDNS0_SUBNET).DraftOption { + s += " (draft)" + } case *EDNS0_COOKIE: s += "\n; COOKIE: " + o.String() case *EDNS0_UL: @@ -71,8 +72,6 @@ func (rr *OPT) String() string { s += "\n; NSEC3 HASH UNDERSTOOD: " + o.String() case *EDNS0_LOCAL: s += "\n; LOCAL OPT: " + o.String() - case *EDNS0_PADDING: - s += "\n; PADDING: " + o.String() } } return s @@ -129,18 +128,8 @@ func (rr *OPT) Do() bool { } // SetDo sets the DO (DNSSEC OK) bit. -// If we pass an argument, set the DO bit to that value. -// It is possible to pass 2 or more arguments. Any arguments after the 1st is silently ignored. -func (rr *OPT) SetDo(do ...bool) { - if len(do) == 1 { - if do[0] { - rr.Hdr.Ttl |= _DO - } else { - rr.Hdr.Ttl &^= _DO - } - } else { - rr.Hdr.Ttl |= _DO - } +func (rr *OPT) SetDo() { + rr.Hdr.Ttl |= _DO } // EDNS0 defines an EDNS0 Option. An OPT RR can have multiple options appended to it. @@ -156,7 +145,7 @@ type EDNS0 interface { String() string } -// EDNS0_NSID option is used to retrieve a nameserver +// The nsid EDNS0 option is used to retrieve a nameserver // identifier. When sending a request Nsid must be set to the empty string // The identifier is an opaque string encoded as hex. // Basic use pattern for creating an nsid option: @@ -181,13 +170,12 @@ func (e *EDNS0_NSID) pack() ([]byte, error) { return h, nil } -// Option implements the EDNS0 interface. -func (e *EDNS0_NSID) Option() uint16 { return EDNS0NSID } // Option returns the option code. +func (e *EDNS0_NSID) Option() uint16 { return EDNS0NSID } func (e *EDNS0_NSID) unpack(b []byte) error { e.Nsid = hex.EncodeToString(b); return nil } func (e *EDNS0_NSID) String() string { return string(e.Nsid) } // EDNS0_SUBNET is the subnet option that is used to give the remote nameserver -// an idea of where the client lives. See RFC 7871. It can then give back a different +// an idea of where the client lives. It can then give back a different // answer depending on the location or network topology. // Basic use pattern for creating an subnet option: // @@ -197,25 +185,31 @@ func (e *EDNS0_NSID) String() string { return string(e.Nsid) } // e := new(dns.EDNS0_SUBNET) // e.Code = dns.EDNS0SUBNET // e.Family = 1 // 1 for IPv4 source address, 2 for IPv6 -// e.SourceNetmask = 32 // 32 for IPV4, 128 for IPv6 +// e.NetMask = 32 // 32 for IPV4, 128 for IPv6 // e.SourceScope = 0 // e.Address = net.ParseIP("127.0.0.1").To4() // for IPv4 // // e.Address = net.ParseIP("2001:7b8:32a::2") // for IPV6 // o.Option = append(o.Option, e) // -// This code will parse all the available bits when unpacking (up to optlen). -// When packing it will apply SourceNetmask. If you need more advanced logic, -// patches welcome and good luck. +// Note: the spec (draft-ietf-dnsop-edns-client-subnet-00) has some insane logic +// for which netmask applies to the address. This code will parse all the +// available bits when unpacking (up to optlen). When packing it will apply +// SourceNetmask. If you need more advanced logic, patches welcome and good luck. type EDNS0_SUBNET struct { Code uint16 // Always EDNS0SUBNET Family uint16 // 1 for IP, 2 for IP6 SourceNetmask uint8 SourceScope uint8 Address net.IP + DraftOption bool // Set to true if using the old (0x50fa) option code } -// Option implements the EDNS0 interface. -func (e *EDNS0_SUBNET) Option() uint16 { return EDNS0SUBNET } +func (e *EDNS0_SUBNET) Option() uint16 { + if e.DraftOption { + return EDNS0SUBNETDRAFT + } + return EDNS0SUBNET +} func (e *EDNS0_SUBNET) pack() ([]byte, error) { b := make([]byte, 4) @@ -223,12 +217,6 @@ func (e *EDNS0_SUBNET) pack() ([]byte, error) { b[2] = e.SourceNetmask b[3] = e.SourceScope switch e.Family { - case 0: - // "dig" sets AddressFamily to 0 if SourceNetmask is also 0 - // We might don't need to complain either - if e.SourceNetmask != 0 { - return nil, errors.New("dns: bad address family") - } case 1: if e.SourceNetmask > net.IPv4len*8 { return nil, errors.New("dns: bad netmask") @@ -263,13 +251,6 @@ func (e *EDNS0_SUBNET) unpack(b []byte) error { e.SourceNetmask = b[2] e.SourceScope = b[3] switch e.Family { - case 0: - // "dig" sets AddressFamily to 0 if SourceNetmask is also 0 - // It's okay to accept such a packet - if e.SourceNetmask != 0 { - return errors.New("dns: bad address family") - } - e.Address = net.IPv4(0, 0, 0, 0) case 1: if e.SourceNetmask > net.IPv4len*8 || e.SourceScope > net.IPv4len*8 { return errors.New("dns: bad netmask") @@ -308,7 +289,7 @@ func (e *EDNS0_SUBNET) String() (s string) { return } -// The EDNS0_COOKIE option is used to add a DNS Cookie to a message. +// The Cookie EDNS0 option // // o := new(dns.OPT) // o.Hdr.Name = "." @@ -339,7 +320,6 @@ func (e *EDNS0_COOKIE) pack() ([]byte, error) { return h, nil } -// Option implements the EDNS0 interface. func (e *EDNS0_COOKIE) Option() uint16 { return EDNS0COOKIE } func (e *EDNS0_COOKIE) unpack(b []byte) error { e.Cookie = hex.EncodeToString(b); return nil } func (e *EDNS0_COOKIE) String() string { return e.Cookie } @@ -361,7 +341,6 @@ type EDNS0_UL struct { Lease uint32 } -// Option implements the EDNS0 interface. func (e *EDNS0_UL) Option() uint16 { return EDNS0UL } func (e *EDNS0_UL) String() string { return strconv.FormatUint(uint64(e.Lease), 10) } @@ -391,7 +370,6 @@ type EDNS0_LLQ struct { LeaseLife uint32 } -// Option implements the EDNS0 interface. func (e *EDNS0_LLQ) Option() uint16 { return EDNS0LLQ } func (e *EDNS0_LLQ) pack() ([]byte, error) { @@ -423,13 +401,11 @@ func (e *EDNS0_LLQ) String() string { return s } -// EDNS0_DUA implements the EDNS0 "DNSSEC Algorithm Understood" option. See RFC 6975. type EDNS0_DAU struct { Code uint16 // Always EDNS0DAU AlgCode []uint8 } -// Option implements the EDNS0 interface. func (e *EDNS0_DAU) Option() uint16 { return EDNS0DAU } func (e *EDNS0_DAU) pack() ([]byte, error) { return e.AlgCode, nil } func (e *EDNS0_DAU) unpack(b []byte) error { e.AlgCode = b; return nil } @@ -446,13 +422,11 @@ func (e *EDNS0_DAU) String() string { return s } -// EDNS0_DHU implements the EDNS0 "DS Hash Understood" option. See RFC 6975. type EDNS0_DHU struct { Code uint16 // Always EDNS0DHU AlgCode []uint8 } -// Option implements the EDNS0 interface. func (e *EDNS0_DHU) Option() uint16 { return EDNS0DHU } func (e *EDNS0_DHU) pack() ([]byte, error) { return e.AlgCode, nil } func (e *EDNS0_DHU) unpack(b []byte) error { e.AlgCode = b; return nil } @@ -469,13 +443,11 @@ func (e *EDNS0_DHU) String() string { return s } -// EDNS0_N3U implements the EDNS0 "NSEC3 Hash Understood" option. See RFC 6975. type EDNS0_N3U struct { Code uint16 // Always EDNS0N3U AlgCode []uint8 } -// Option implements the EDNS0 interface. func (e *EDNS0_N3U) Option() uint16 { return EDNS0N3U } func (e *EDNS0_N3U) pack() ([]byte, error) { return e.AlgCode, nil } func (e *EDNS0_N3U) unpack(b []byte) error { e.AlgCode = b; return nil } @@ -493,13 +465,11 @@ func (e *EDNS0_N3U) String() string { return s } -// EDNS0_EXPIRE implementes the EDNS0 option as described in RFC 7314. type EDNS0_EXPIRE struct { Code uint16 // Always EDNS0EXPIRE Expire uint32 } -// Option implements the EDNS0 interface. func (e *EDNS0_EXPIRE) Option() uint16 { return EDNS0EXPIRE } func (e *EDNS0_EXPIRE) String() string { return strconv.FormatUint(uint64(e.Expire), 10) } @@ -538,7 +508,6 @@ type EDNS0_LOCAL struct { Data []byte } -// Option implements the EDNS0 interface. func (e *EDNS0_LOCAL) Option() uint16 { return e.Code } func (e *EDNS0_LOCAL) String() string { return strconv.FormatInt(int64(e.Code), 10) + ":0x" + hex.EncodeToString(e.Data) @@ -561,70 +530,3 @@ func (e *EDNS0_LOCAL) unpack(b []byte) error { } return nil } - -// EDNS0_TCP_KEEPALIVE is an EDNS0 option that instructs the server to keep -// the TCP connection alive. See RFC 7828. -type EDNS0_TCP_KEEPALIVE struct { - Code uint16 // Always EDNSTCPKEEPALIVE - Length uint16 // the value 0 if the TIMEOUT is omitted, the value 2 if it is present; - Timeout uint16 // an idle timeout value for the TCP connection, specified in units of 100 milliseconds, encoded in network byte order. -} - -// Option implements the EDNS0 interface. -func (e *EDNS0_TCP_KEEPALIVE) Option() uint16 { return EDNS0TCPKEEPALIVE } - -func (e *EDNS0_TCP_KEEPALIVE) pack() ([]byte, error) { - if e.Timeout != 0 && e.Length != 2 { - return nil, errors.New("dns: timeout specified but length is not 2") - } - if e.Timeout == 0 && e.Length != 0 { - return nil, errors.New("dns: timeout not specified but length is not 0") - } - b := make([]byte, 4+e.Length) - binary.BigEndian.PutUint16(b[0:], e.Code) - binary.BigEndian.PutUint16(b[2:], e.Length) - if e.Length == 2 { - binary.BigEndian.PutUint16(b[4:], e.Timeout) - } - return b, nil -} - -func (e *EDNS0_TCP_KEEPALIVE) unpack(b []byte) error { - if len(b) < 4 { - return ErrBuf - } - e.Length = binary.BigEndian.Uint16(b[2:4]) - if e.Length != 0 && e.Length != 2 { - return errors.New("dns: length mismatch, want 0/2 but got " + strconv.FormatUint(uint64(e.Length), 10)) - } - if e.Length == 2 { - if len(b) < 6 { - return ErrBuf - } - e.Timeout = binary.BigEndian.Uint16(b[4:6]) - } - return nil -} - -func (e *EDNS0_TCP_KEEPALIVE) String() (s string) { - s = "use tcp keep-alive" - if e.Length == 0 { - s += ", timeout omitted" - } else { - s += fmt.Sprintf(", timeout %dms", e.Timeout*100) - } - return -} - -// EDNS0_PADDING option is used to add padding to a request/response. The default -// value of padding SHOULD be 0x0 but other values MAY be used, for instance if -// compression is applied before encryption which may break signatures. -type EDNS0_PADDING struct { - Padding []byte -} - -// Option implements the EDNS0 interface. -func (e *EDNS0_PADDING) Option() uint16 { return EDNS0PADDING } -func (e *EDNS0_PADDING) pack() ([]byte, error) { return e.Padding, nil } -func (e *EDNS0_PADDING) unpack(b []byte) error { e.Padding = b; return nil } -func (e *EDNS0_PADDING) String() string { return fmt.Sprintf("%0X", e.Padding) } diff --git a/vendor/github.com/miekg/dns/edns_test.go b/vendor/github.com/miekg/dns/edns_test.go index f7cf15754098..5fd75abb4559 100644 --- a/vendor/github.com/miekg/dns/edns_test.go +++ b/vendor/github.com/miekg/dns/edns_test.go @@ -7,46 +7,10 @@ func TestOPTTtl(t *testing.T) { e.Hdr.Name = "." e.Hdr.Rrtype = TypeOPT - // verify the default setting of DO=0 if e.Do() { t.Errorf("DO bit should be zero") } - // There are 6 possible invocations of SetDo(): - // - // 1. Starting with DO=0, using SetDo() - // 2. Starting with DO=0, using SetDo(true) - // 3. Starting with DO=0, using SetDo(false) - // 4. Starting with DO=1, using SetDo() - // 5. Starting with DO=1, using SetDo(true) - // 6. Starting with DO=1, using SetDo(false) - - // verify that invoking SetDo() sets DO=1 (TEST #1) - e.SetDo() - if !e.Do() { - t.Errorf("DO bit should be non-zero") - } - // verify that using SetDo(true) works when DO=1 (TEST #5) - e.SetDo(true) - if !e.Do() { - t.Errorf("DO bit should still be non-zero") - } - // verify that we can use SetDo(false) to set DO=0 (TEST #6) - e.SetDo(false) - if e.Do() { - t.Errorf("DO bit should be zero") - } - // verify that if we call SetDo(false) when DO=0 that it is unchanged (TEST #3) - e.SetDo(false) - if e.Do() { - t.Errorf("DO bit should still be zero") - } - // verify that using SetDo(true) works for DO=0 (TEST #2) - e.SetDo(true) - if !e.Do() { - t.Errorf("DO bit should be non-zero") - } - // verify that using SetDo() works for DO=1 (TEST #4) e.SetDo() if !e.Do() { t.Errorf("DO bit should be non-zero") @@ -63,6 +27,6 @@ func TestOPTTtl(t *testing.T) { e.SetExtendedRcode(42) if e.ExtendedRcode() != 42 { - t.Errorf("set 42, expected %d, got %d", 42, e.ExtendedRcode()) + t.Errorf("set 42, expected %d, got %d", 42-15, e.ExtendedRcode()) } } diff --git a/vendor/github.com/miekg/dns/fuzz.go b/vendor/github.com/miekg/dns/fuzz.go deleted file mode 100644 index a8a09184d404..000000000000 --- a/vendor/github.com/miekg/dns/fuzz.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build fuzz - -package dns - -func Fuzz(data []byte) int { - msg := new(Msg) - - if err := msg.Unpack(data); err != nil { - return 0 - } - if _, err := msg.Pack(); err != nil { - return 0 - } - - return 1 -} - -func FuzzNewRR(data []byte) int { - if _, err := NewRR(string(data)); err != nil { - return 0 - } - return 1 -} diff --git a/vendor/github.com/miekg/dns/fuzz_test.go b/vendor/github.com/miekg/dns/fuzz_test.go new file mode 100644 index 000000000000..255869730e68 --- /dev/null +++ b/vendor/github.com/miekg/dns/fuzz_test.go @@ -0,0 +1,25 @@ +package dns + +import "testing" + +func TestFuzzString(t *testing.T) { + testcases := []string{"", " MINFO ", " RP ", " NSEC 0 0", " \" NSEC 0 0\"", " \" MINFO \"", + ";a ", ";a����������", + " NSAP O ", " NSAP N ", + " TYPE4 TYPE6a789a3bc0045c8a5fb42c7d1bd998f5444 IN 9579b47d46817afbd17273e6", + " TYPE45 3 3 4147994 TYPE\\(\\)\\)\\(\\)\\(\\(\\)\\(\\)\\)\\)\\(\\)\\(\\)\\(\\(\\R 948\"\")\\(\\)\\)\\)\\(\\ ", + "$GENERATE 0-3 ${441189,5039418474430,o}", + "$INCLUDE 00 TYPE00000000000n ", + "$INCLUDE PE4 TYPE061463623/727071511 \\(\\)\\$GENERATE 6-462/0", + } + for i, tc := range testcases { + rr, err := NewRR(tc) + if err == nil { + // rr can be nil because we can (for instance) just parse a comment + if rr == nil { + continue + } + t.Fatalf("parsed mailformed RR %d: %s", i, rr.String()) + } + } +} diff --git a/vendor/github.com/miekg/dns/idn/code_points.go b/vendor/github.com/miekg/dns/idn/code_points.go new file mode 100644 index 000000000000..129c3742f588 --- /dev/null +++ b/vendor/github.com/miekg/dns/idn/code_points.go @@ -0,0 +1,2346 @@ +package idn + +const ( + propertyUnknown property = iota // unknown character property + propertyPVALID // allowed to be used in IDNs + propertyCONTEXTJ // invisible or problematic characters (join controls) + propertyCONTEXTO // invisible or problematic characters (others) + propertyDISALLOWED // should not be included in IDNs + propertyUNASSIGNED // code points that are not designated in the Unicode Standard +) + +// property stores the property of a code point, as described in RFC 5892, +// section 1 +type property int + +// codePoints list all code points in Unicode Character Database (UCD) Format +// according to RFC 5892, appendix B.1. Thanks to libidn2 (GNU) - +// http://www.gnu.org/software/libidn/libidn2/ +var codePoints = []struct { + start rune + end rune + state property +}{ + {0x0000, 0x002C, propertyDISALLOWED}, // ..COMMA + {0x002D, 0x0, propertyPVALID}, // HYPHEN-MINUS + {0x002E, 0x002F, propertyDISALLOWED}, // FULL STOP..SOLIDUS + {0x0030, 0x0039, propertyPVALID}, // DIGIT ZERO..DIGIT NINE + {0x003A, 0x0060, propertyDISALLOWED}, // COLON..GRAVE ACCENT + {0x0041, 0x005A, propertyPVALID}, // LATIN CAPITAL LETTER A..LATIN CAPITAL LETTER Z + {0x0061, 0x007A, propertyPVALID}, // LATIN SMALL LETTER A..LATIN SMALL LETTER Z + {0x007B, 0x00B6, propertyDISALLOWED}, // LEFT CURLY BRACKET..PILCROW SIGN + {0x00B7, 0x0, propertyCONTEXTO}, // MIDDLE DOT + {0x00B8, 0x00DE, propertyDISALLOWED}, // CEDILLA..LATIN CAPITAL LETTER THORN + {0x00DF, 0x00F6, propertyPVALID}, // LATIN SMALL LETTER SHARP S..LATIN SMALL LETT + {0x00F7, 0x0, propertyDISALLOWED}, // DIVISION SIGN + {0x00F8, 0x00FF, propertyPVALID}, // LATIN SMALL LETTER O WITH STROKE..LATIN SMAL + {0x0100, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH MACRON + {0x0101, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH MACRON + {0x0102, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH BREVE + {0x0103, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH BREVE + {0x0104, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH OGONEK + {0x0105, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH OGONEK + {0x0106, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER C WITH ACUTE + {0x0107, 0x0, propertyPVALID}, // LATIN SMALL LETTER C WITH ACUTE + {0x0108, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER C WITH CIRCUMFLEX + {0x0109, 0x0, propertyPVALID}, // LATIN SMALL LETTER C WITH CIRCUMFLEX + {0x010A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER C WITH DOT ABOVE + {0x010B, 0x0, propertyPVALID}, // LATIN SMALL LETTER C WITH DOT ABOVE + {0x010C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER C WITH CARON + {0x010D, 0x0, propertyPVALID}, // LATIN SMALL LETTER C WITH CARON + {0x010E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER D WITH CARON + {0x010F, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH CARON + {0x0110, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER D WITH STROKE + {0x0111, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH STROKE + {0x0112, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH MACRON + {0x0113, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH MACRON + {0x0114, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH BREVE + {0x0115, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH BREVE + {0x0116, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH DOT ABOVE + {0x0117, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH DOT ABOVE + {0x0118, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH OGONEK + {0x0119, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH OGONEK + {0x011A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CARON + {0x011B, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CARON + {0x011C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH CIRCUMFLEX + {0x011D, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH CIRCUMFLEX + {0x011E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH BREVE + {0x011F, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH BREVE + {0x0120, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH DOT ABOVE + {0x0121, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH DOT ABOVE + {0x0122, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH CEDILLA + {0x0123, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH CEDILLA + {0x0124, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH CIRCUMFLEX + {0x0125, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH CIRCUMFLEX + {0x0126, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH STROKE + {0x0127, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH STROKE + {0x0128, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH TILDE + {0x0129, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH TILDE + {0x012A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH MACRON + {0x012B, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH MACRON + {0x012C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH BREVE + {0x012D, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH BREVE + {0x012E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH OGONEK + {0x012F, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH OGONEK + {0x0130, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH DOT ABOVE + {0x0131, 0x0, propertyPVALID}, // LATIN SMALL LETTER DOTLESS I + {0x0132, 0x0134, propertyDISALLOWED}, // LATIN CAPITAL LIGATURE IJ..LATIN CAPITAL LET + {0x0135, 0x0, propertyPVALID}, // LATIN SMALL LETTER J WITH CIRCUMFLEX + {0x0136, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH CEDILLA + {0x0137, 0x0138, propertyPVALID}, // LATIN SMALL LETTER K WITH CEDILLA..LATIN SMA + {0x0139, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH ACUTE + {0x013A, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH ACUTE + {0x013B, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH CEDILLA + {0x013C, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH CEDILLA + {0x013D, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH CARON + {0x013E, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH CARON + {0x013F, 0x0141, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH MIDDLE DOT..LATI + {0x0142, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH STROKE + {0x0143, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH ACUTE + {0x0144, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH ACUTE + {0x0145, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH CEDILLA + {0x0146, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH CEDILLA + {0x0147, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH CARON + {0x0148, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH CARON + {0x0149, 0x014A, propertyDISALLOWED}, // LATIN SMALL LETTER N PRECEDED BY APOSTROPHE. + {0x014B, 0x0, propertyPVALID}, // LATIN SMALL LETTER ENG + {0x014C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH MACRON + {0x014D, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH MACRON + {0x014E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH BREVE + {0x014F, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH BREVE + {0x0150, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH DOUBLE ACUTE + {0x0151, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH DOUBLE ACUTE + {0x0152, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LIGATURE OE + {0x0153, 0x0, propertyPVALID}, // LATIN SMALL LIGATURE OE + {0x0154, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH ACUTE + {0x0155, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH ACUTE + {0x0156, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH CEDILLA + {0x0157, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH CEDILLA + {0x0158, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH CARON + {0x0159, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH CARON + {0x015A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH ACUTE + {0x015B, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH ACUTE + {0x015C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH CIRCUMFLEX + {0x015D, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH CIRCUMFLEX + {0x015E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH CEDILLA + {0x015F, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH CEDILLA + {0x0160, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH CARON + {0x0161, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH CARON + {0x0162, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH CEDILLA + {0x0163, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH CEDILLA + {0x0164, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH CARON + {0x0165, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH CARON + {0x0166, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH STROKE + {0x0167, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH STROKE + {0x0168, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH TILDE + {0x0169, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH TILDE + {0x016A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH MACRON + {0x016B, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH MACRON + {0x016C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH BREVE + {0x016D, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH BREVE + {0x016E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH RING ABOVE + {0x016F, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH RING ABOVE + {0x0170, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DOUBLE ACUTE + {0x0171, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH DOUBLE ACUTE + {0x0172, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH OGONEK + {0x0173, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH OGONEK + {0x0174, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER W WITH CIRCUMFLEX + {0x0175, 0x0, propertyPVALID}, // LATIN SMALL LETTER W WITH CIRCUMFLEX + {0x0176, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH CIRCUMFLEX + {0x0177, 0x0, propertyPVALID}, // LATIN SMALL LETTER Y WITH CIRCUMFLEX + {0x0178, 0x0179, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH DIAERESIS..LATIN + {0x017A, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH ACUTE + {0x017B, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH DOT ABOVE + {0x017C, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH DOT ABOVE + {0x017D, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH CARON + {0x017E, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH CARON + {0x017F, 0x0, propertyDISALLOWED}, // LATIN SMALL LETTER LONG S + {0x0180, 0x0, propertyPVALID}, // LATIN SMALL LETTER B WITH STROKE + {0x0181, 0x0182, propertyDISALLOWED}, // LATIN CAPITAL LETTER B WITH HOOK..LATIN CAPI + {0x0183, 0x0, propertyPVALID}, // LATIN SMALL LETTER B WITH TOPBAR + {0x0184, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER TONE SIX + {0x0185, 0x0, propertyPVALID}, // LATIN SMALL LETTER TONE SIX + {0x0186, 0x0187, propertyDISALLOWED}, // LATIN CAPITAL LETTER OPEN O..LATIN CAPITAL L + {0x0188, 0x0, propertyPVALID}, // LATIN SMALL LETTER C WITH HOOK + {0x0189, 0x018B, propertyDISALLOWED}, // LATIN CAPITAL LETTER AFRICAN D..LATIN CAPITA + {0x018C, 0x018D, propertyPVALID}, // LATIN SMALL LETTER D WITH TOPBAR..LATIN SMAL + {0x018E, 0x0191, propertyDISALLOWED}, // LATIN CAPITAL LETTER REVERSED E..LATIN CAPIT + {0x0192, 0x0, propertyPVALID}, // LATIN SMALL LETTER F WITH HOOK + {0x0193, 0x0194, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH HOOK..LATIN CAPI + {0x0195, 0x0, propertyPVALID}, // LATIN SMALL LETTER HV + {0x0196, 0x0198, propertyDISALLOWED}, // LATIN CAPITAL LETTER IOTA..LATIN CAPITAL LET + {0x0199, 0x019B, propertyPVALID}, // LATIN SMALL LETTER K WITH HOOK..LATIN SMALL + {0x019C, 0x019D, propertyDISALLOWED}, // LATIN CAPITAL LETTER TURNED M..LATIN CAPITAL + {0x019E, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH LONG RIGHT LEG + {0x019F, 0x01A0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH MIDDLE TILDE..LA + {0x01A1, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH HORN + {0x01A2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER OI + {0x01A3, 0x0, propertyPVALID}, // LATIN SMALL LETTER OI + {0x01A4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER P WITH HOOK + {0x01A5, 0x0, propertyPVALID}, // LATIN SMALL LETTER P WITH HOOK + {0x01A6, 0x01A7, propertyDISALLOWED}, // LATIN LETTER YR..LATIN CAPITAL LETTER TONE T + {0x01A8, 0x0, propertyPVALID}, // LATIN SMALL LETTER TONE TWO + {0x01A9, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER ESH + {0x01AA, 0x01AB, propertyPVALID}, // LATIN LETTER REVERSED ESH LOOP..LATIN SMALL + {0x01AC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH HOOK + {0x01AD, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH HOOK + {0x01AE, 0x01AF, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH RETROFLEX HOOK.. + {0x01B0, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH HORN + {0x01B1, 0x01B3, propertyDISALLOWED}, // LATIN CAPITAL LETTER UPSILON..LATIN CAPITAL + {0x01B4, 0x0, propertyPVALID}, // LATIN SMALL LETTER Y WITH HOOK + {0x01B5, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH STROKE + {0x01B6, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH STROKE + {0x01B7, 0x01B8, propertyDISALLOWED}, // LATIN CAPITAL LETTER EZH..LATIN CAPITAL LETT + {0x01B9, 0x01BB, propertyPVALID}, // LATIN SMALL LETTER EZH REVERSED..LATIN LETTE + {0x01BC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER TONE FIVE + {0x01BD, 0x01C3, propertyPVALID}, // LATIN SMALL LETTER TONE FIVE..LATIN LETTER R + {0x01C4, 0x01CD, propertyDISALLOWED}, // LATIN CAPITAL LETTER DZ WITH CARON..LATIN CA + {0x01CE, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH CARON + {0x01CF, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH CARON + {0x01D0, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH CARON + {0x01D1, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH CARON + {0x01D2, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH CARON + {0x01D3, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH CARON + {0x01D4, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH CARON + {0x01D5, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DIAERESIS AND MA + {0x01D6, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH DIAERESIS AND MACR + {0x01D7, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DIAERESIS AND AC + {0x01D8, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH DIAERESIS AND ACUT + {0x01D9, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DIAERESIS AND CA + {0x01DA, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH DIAERESIS AND CARO + {0x01DB, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DIAERESIS AND GR + {0x01DC, 0x01DD, propertyPVALID}, // LATIN SMALL LETTER U WITH DIAERESIS AND GRAV + {0x01DE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH DIAERESIS AND MA + {0x01DF, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH DIAERESIS AND MACR + {0x01E0, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH DOT ABOVE AND MA + {0x01E1, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH DOT ABOVE AND MACR + {0x01E2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AE WITH MACRON + {0x01E3, 0x0, propertyPVALID}, // LATIN SMALL LETTER AE WITH MACRON + {0x01E4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH STROKE + {0x01E5, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH STROKE + {0x01E6, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH CARON + {0x01E7, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH CARON + {0x01E8, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH CARON + {0x01E9, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH CARON + {0x01EA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH OGONEK + {0x01EB, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH OGONEK + {0x01EC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH OGONEK AND MACRO + {0x01ED, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH OGONEK AND MACRON + {0x01EE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER EZH WITH CARON + {0x01EF, 0x01F0, propertyPVALID}, // LATIN SMALL LETTER EZH WITH CARON..LATIN SMA + {0x01F1, 0x01F4, propertyDISALLOWED}, // LATIN CAPITAL LETTER DZ..LATIN CAPITAL LETTE + {0x01F5, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH ACUTE + {0x01F6, 0x01F8, propertyDISALLOWED}, // LATIN CAPITAL LETTER HWAIR..LATIN CAPITAL LE + {0x01F9, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH GRAVE + {0x01FA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH RING ABOVE AND A + {0x01FB, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH RING ABOVE AND ACU + {0x01FC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AE WITH ACUTE + {0x01FD, 0x0, propertyPVALID}, // LATIN SMALL LETTER AE WITH ACUTE + {0x01FE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH STROKE AND ACUTE + {0x01FF, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH STROKE AND ACUTE + {0x0200, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH DOUBLE GRAVE + {0x0201, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH DOUBLE GRAVE + {0x0202, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH INVERTED BREVE + {0x0203, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH INVERTED BREVE + {0x0204, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH DOUBLE GRAVE + {0x0205, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH DOUBLE GRAVE + {0x0206, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH INVERTED BREVE + {0x0207, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH INVERTED BREVE + {0x0208, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH DOUBLE GRAVE + {0x0209, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH DOUBLE GRAVE + {0x020A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH INVERTED BREVE + {0x020B, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH INVERTED BREVE + {0x020C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH DOUBLE GRAVE + {0x020D, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH DOUBLE GRAVE + {0x020E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH INVERTED BREVE + {0x020F, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH INVERTED BREVE + {0x0210, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH DOUBLE GRAVE + {0x0211, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH DOUBLE GRAVE + {0x0212, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH INVERTED BREVE + {0x0213, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH INVERTED BREVE + {0x0214, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DOUBLE GRAVE + {0x0215, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH DOUBLE GRAVE + {0x0216, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH INVERTED BREVE + {0x0217, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH INVERTED BREVE + {0x0218, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH COMMA BELOW + {0x0219, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH COMMA BELOW + {0x021A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH COMMA BELOW + {0x021B, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH COMMA BELOW + {0x021C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER YOGH + {0x021D, 0x0, propertyPVALID}, // LATIN SMALL LETTER YOGH + {0x021E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH CARON + {0x021F, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH CARON + {0x0220, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH LONG RIGHT LEG + {0x0221, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH CURL + {0x0222, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER OU + {0x0223, 0x0, propertyPVALID}, // LATIN SMALL LETTER OU + {0x0224, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH HOOK + {0x0225, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH HOOK + {0x0226, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH DOT ABOVE + {0x0227, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH DOT ABOVE + {0x0228, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CEDILLA + {0x0229, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CEDILLA + {0x022A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH DIAERESIS AND MA + {0x022B, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH DIAERESIS AND MACR + {0x022C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH TILDE AND MACRON + {0x022D, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH TILDE AND MACRON + {0x022E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH DOT ABOVE + {0x022F, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH DOT ABOVE + {0x0230, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH DOT ABOVE AND MA + {0x0231, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH DOT ABOVE AND MACR + {0x0232, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH MACRON + {0x0233, 0x0239, propertyPVALID}, // LATIN SMALL LETTER Y WITH MACRON..LATIN SMAL + {0x023A, 0x023B, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH STROKE..LATIN CA + {0x023C, 0x0, propertyPVALID}, // LATIN SMALL LETTER C WITH STROKE + {0x023D, 0x023E, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH BAR..LATIN CAPIT + {0x023F, 0x0240, propertyPVALID}, // LATIN SMALL LETTER S WITH SWASH TAIL..LATIN + {0x0241, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER GLOTTAL STOP + {0x0242, 0x0, propertyPVALID}, // LATIN SMALL LETTER GLOTTAL STOP + {0x0243, 0x0246, propertyDISALLOWED}, // LATIN CAPITAL LETTER B WITH STROKE..LATIN CA + {0x0247, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH STROKE + {0x0248, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER J WITH STROKE + {0x0249, 0x0, propertyPVALID}, // LATIN SMALL LETTER J WITH STROKE + {0x024A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER SMALL Q WITH HOOK TAIL + {0x024B, 0x0, propertyPVALID}, // LATIN SMALL LETTER Q WITH HOOK TAIL + {0x024C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH STROKE + {0x024D, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH STROKE + {0x024E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH STROKE + {0x024F, 0x02AF, propertyPVALID}, // LATIN SMALL LETTER Y WITH STROKE..LATIN SMAL + {0x02B0, 0x02B8, propertyDISALLOWED}, // MODIFIER LETTER SMALL H..MODIFIER LETTER SMA + {0x02B9, 0x02C1, propertyPVALID}, // MODIFIER LETTER PRIME..MODIFIER LETTER REVER + {0x02C2, 0x02C5, propertyDISALLOWED}, // MODIFIER LETTER LEFT ARROWHEAD..MODIFIER LET + {0x02C6, 0x02D1, propertyPVALID}, // MODIFIER LETTER CIRCUMFLEX ACCENT..MODIFIER + {0x02D2, 0x02EB, propertyDISALLOWED}, // MODIFIER LETTER CENTRED RIGHT HALF RING..MOD + {0x02EC, 0x0, propertyPVALID}, // MODIFIER LETTER VOICING + {0x02ED, 0x0, propertyDISALLOWED}, // MODIFIER LETTER UNASPIRATED + {0x02EE, 0x0, propertyPVALID}, // MODIFIER LETTER DOUBLE APOSTROPHE + {0x02EF, 0x02FF, propertyDISALLOWED}, // MODIFIER LETTER LOW DOWN ARROWHEAD..MODIFIER + {0x0300, 0x033F, propertyPVALID}, // COMBINING GRAVE ACCENT..COMBINING DOUBLE OVE + {0x0340, 0x0341, propertyDISALLOWED}, // COMBINING GRAVE TONE MARK..COMBINING ACUTE T + {0x0342, 0x0, propertyPVALID}, // COMBINING GREEK PERISPOMENI + {0x0343, 0x0345, propertyDISALLOWED}, // COMBINING GREEK KORONIS..COMBINING GREEK YPO + {0x0346, 0x034E, propertyPVALID}, // COMBINING BRIDGE ABOVE..COMBINING UPWARDS AR + {0x034F, 0x0, propertyDISALLOWED}, // COMBINING GRAPHEME JOINER + {0x0350, 0x036F, propertyPVALID}, // COMBINING RIGHT ARROWHEAD ABOVE..COMBINING L + {0x0370, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER HETA + {0x0371, 0x0, propertyPVALID}, // GREEK SMALL LETTER HETA + {0x0372, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER ARCHAIC SAMPI + {0x0373, 0x0, propertyPVALID}, // GREEK SMALL LETTER ARCHAIC SAMPI + {0x0374, 0x0, propertyDISALLOWED}, // GREEK NUMERAL SIGN + {0x0375, 0x0, propertyCONTEXTO}, // GREEK LOWER NUMERAL SIGN + {0x0376, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER PAMPHYLIAN DIGAMMA + {0x0377, 0x0, propertyPVALID}, // GREEK SMALL LETTER PAMPHYLIAN DIGAMMA + {0x0378, 0x0379, propertyUNASSIGNED}, // .. + {0x037A, 0x0, propertyDISALLOWED}, // GREEK YPOGEGRAMMENI + {0x037B, 0x037D, propertyPVALID}, // GREEK SMALL REVERSED LUNATE SIGMA SYMBOL..GR + {0x037E, 0x0, propertyDISALLOWED}, // GREEK QUESTION MARK + {0x037F, 0x0383, propertyUNASSIGNED}, // .. + {0x0384, 0x038A, propertyDISALLOWED}, // GREEK TONOS..GREEK CAPITAL LETTER IOTA WITH + {0x038B, 0x0, propertyUNASSIGNED}, // + {0x038C, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER OMICRON WITH TONOS + {0x038D, 0x0, propertyUNASSIGNED}, // + {0x038E, 0x038F, propertyDISALLOWED}, // GREEK CAPITAL LETTER UPSILON WITH TONOS..GRE + {0x0390, 0x0, propertyPVALID}, // GREEK SMALL LETTER IOTA WITH DIALYTIKA AND T + {0x0391, 0x03A1, propertyDISALLOWED}, // GREEK CAPITAL LETTER ALPHA..GREEK CAPITAL LE + {0x03A2, 0x0, propertyUNASSIGNED}, // + {0x03A3, 0x03AB, propertyDISALLOWED}, // GREEK CAPITAL LETTER SIGMA..GREEK CAPITAL LE + {0x03AC, 0x03CE, propertyPVALID}, // GREEK SMALL LETTER ALPHA WITH TONOS..GREEK S + {0x03CF, 0x03D6, propertyDISALLOWED}, // GREEK CAPITAL KAI SYMBOL..GREEK PI SYMBOL + {0x03D7, 0x0, propertyPVALID}, // GREEK KAI SYMBOL + {0x03D8, 0x0, propertyDISALLOWED}, // GREEK LETTER ARCHAIC KOPPA + {0x03D9, 0x0, propertyPVALID}, // GREEK SMALL LETTER ARCHAIC KOPPA + {0x03DA, 0x0, propertyDISALLOWED}, // GREEK LETTER STIGMA + {0x03DB, 0x0, propertyPVALID}, // GREEK SMALL LETTER STIGMA + {0x03DC, 0x0, propertyDISALLOWED}, // GREEK LETTER DIGAMMA + {0x03DD, 0x0, propertyPVALID}, // GREEK SMALL LETTER DIGAMMA + {0x03DE, 0x0, propertyDISALLOWED}, // GREEK LETTER KOPPA + {0x03DF, 0x0, propertyPVALID}, // GREEK SMALL LETTER KOPPA + {0x03E0, 0x0, propertyDISALLOWED}, // GREEK LETTER SAMPI + {0x03E1, 0x0, propertyPVALID}, // GREEK SMALL LETTER SAMPI + {0x03E2, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER SHEI + {0x03E3, 0x0, propertyPVALID}, // COPTIC SMALL LETTER SHEI + {0x03E4, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER FEI + {0x03E5, 0x0, propertyPVALID}, // COPTIC SMALL LETTER FEI + {0x03E6, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER KHEI + {0x03E7, 0x0, propertyPVALID}, // COPTIC SMALL LETTER KHEI + {0x03E8, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER HORI + {0x03E9, 0x0, propertyPVALID}, // COPTIC SMALL LETTER HORI + {0x03EA, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER GANGIA + {0x03EB, 0x0, propertyPVALID}, // COPTIC SMALL LETTER GANGIA + {0x03EC, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER SHIMA + {0x03ED, 0x0, propertyPVALID}, // COPTIC SMALL LETTER SHIMA + {0x03EE, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER DEI + {0x03EF, 0x0, propertyPVALID}, // COPTIC SMALL LETTER DEI + {0x03F0, 0x03F2, propertyDISALLOWED}, // GREEK KAPPA SYMBOL..GREEK LUNATE SIGMA SYMBO + {0x03F3, 0x0, propertyPVALID}, // GREEK LETTER YOT + {0x03F4, 0x03F7, propertyDISALLOWED}, // GREEK CAPITAL THETA SYMBOL..GREEK CAPITAL LE + {0x03F8, 0x0, propertyPVALID}, // GREEK SMALL LETTER SHO + {0x03F9, 0x03FA, propertyDISALLOWED}, // GREEK CAPITAL LUNATE SIGMA SYMBOL..GREEK CAP + {0x03FB, 0x03FC, propertyPVALID}, // GREEK SMALL LETTER SAN..GREEK RHO WITH STROK + {0x03FD, 0x042F, propertyDISALLOWED}, // GREEK CAPITAL REVERSED LUNATE SIGMA SYMBOL.. + {0x0430, 0x045F, propertyPVALID}, // CYRILLIC SMALL LETTER A..CYRILLIC SMALL LETT + {0x0460, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER OMEGA + {0x0461, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER OMEGA + {0x0462, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER YAT + {0x0463, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER YAT + {0x0464, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IOTIFIED E + {0x0465, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IOTIFIED E + {0x0466, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER LITTLE YUS + {0x0467, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER LITTLE YUS + {0x0468, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IOTIFIED LITTLE YUS + {0x0469, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IOTIFIED LITTLE YUS + {0x046A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER BIG YUS + {0x046B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER BIG YUS + {0x046C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IOTIFIED BIG YUS + {0x046D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IOTIFIED BIG YUS + {0x046E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KSI + {0x046F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KSI + {0x0470, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER PSI + {0x0471, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER PSI + {0x0472, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER FITA + {0x0473, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER FITA + {0x0474, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IZHITSA + {0x0475, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IZHITSA + {0x0476, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IZHITSA WITH DOUBLE + {0x0477, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IZHITSA WITH DOUBLE GR + {0x0478, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER UK + {0x0479, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER UK + {0x047A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ROUND OMEGA + {0x047B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ROUND OMEGA + {0x047C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER OMEGA WITH TITLO + {0x047D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER OMEGA WITH TITLO + {0x047E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER OT + {0x047F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER OT + {0x0480, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOPPA + {0x0481, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOPPA + {0x0482, 0x0, propertyDISALLOWED}, // CYRILLIC THOUSANDS SIGN + {0x0483, 0x0487, propertyPVALID}, // COMBINING CYRILLIC TITLO..COMBINING CYRILLIC + {0x0488, 0x048A, propertyDISALLOWED}, // COMBINING CYRILLIC HUNDRED THOUSANDS SIGN..C + {0x048B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SHORT I WITH TAIL + {0x048C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SEMISOFT SIGN + {0x048D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SEMISOFT SIGN + {0x048E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ER WITH TICK + {0x048F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ER WITH TICK + {0x0490, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER GHE WITH UPTURN + {0x0491, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER GHE WITH UPTURN + {0x0492, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER GHE WITH STROKE + {0x0493, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER GHE WITH STROKE + {0x0494, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER GHE WITH MIDDLE HOOK + {0x0495, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER GHE WITH MIDDLE HOOK + {0x0496, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ZHE WITH DESCENDER + {0x0497, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ZHE WITH DESCENDER + {0x0498, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ZE WITH DESCENDER + {0x0499, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ZE WITH DESCENDER + {0x049A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KA WITH DESCENDER + {0x049B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KA WITH DESCENDER + {0x049C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KA WITH VERTICAL STR + {0x049D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KA WITH VERTICAL STROK + {0x049E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KA WITH STROKE + {0x049F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KA WITH STROKE + {0x04A0, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER BASHKIR KA + {0x04A1, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER BASHKIR KA + {0x04A2, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EN WITH DESCENDER + {0x04A3, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER EN WITH DESCENDER + {0x04A4, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LIGATURE EN GHE + {0x04A5, 0x0, propertyPVALID}, // CYRILLIC SMALL LIGATURE EN GHE + {0x04A6, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER PE WITH MIDDLE HOOK + {0x04A7, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER PE WITH MIDDLE HOOK + {0x04A8, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ABKHASIAN HA + {0x04A9, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ABKHASIAN HA + {0x04AA, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ES WITH DESCENDER + {0x04AB, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ES WITH DESCENDER + {0x04AC, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER TE WITH DESCENDER + {0x04AD, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER TE WITH DESCENDER + {0x04AE, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER STRAIGHT U + {0x04AF, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER STRAIGHT U + {0x04B0, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER STRAIGHT U WITH STRO + {0x04B1, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER STRAIGHT U WITH STROKE + {0x04B2, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER HA WITH DESCENDER + {0x04B3, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER HA WITH DESCENDER + {0x04B4, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LIGATURE TE TSE + {0x04B5, 0x0, propertyPVALID}, // CYRILLIC SMALL LIGATURE TE TSE + {0x04B6, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER CHE WITH DESCENDER + {0x04B7, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER CHE WITH DESCENDER + {0x04B8, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER CHE WITH VERTICAL ST + {0x04B9, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER CHE WITH VERTICAL STRO + {0x04BA, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SHHA + {0x04BB, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SHHA + {0x04BC, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ABKHASIAN CHE + {0x04BD, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ABKHASIAN CHE + {0x04BE, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ABKHASIAN CHE WITH D + {0x04BF, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ABKHASIAN CHE WITH DES + {0x04C0, 0x04C1, propertyDISALLOWED}, // CYRILLIC LETTER PALOCHKA..CYRILLIC CAPITAL L + {0x04C2, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ZHE WITH BREVE + {0x04C3, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KA WITH HOOK + {0x04C4, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KA WITH HOOK + {0x04C5, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EL WITH TAIL + {0x04C6, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER EL WITH TAIL + {0x04C7, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EN WITH HOOK + {0x04C8, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER EN WITH HOOK + {0x04C9, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EN WITH TAIL + {0x04CA, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER EN WITH TAIL + {0x04CB, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KHAKASSIAN CHE + {0x04CC, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KHAKASSIAN CHE + {0x04CD, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EM WITH TAIL + {0x04CE, 0x04CF, propertyPVALID}, // CYRILLIC SMALL LETTER EM WITH TAIL..CYRILLIC + {0x04D0, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER A WITH BREVE + {0x04D1, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER A WITH BREVE + {0x04D2, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER A WITH DIAERESIS + {0x04D3, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER A WITH DIAERESIS + {0x04D4, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LIGATURE A IE + {0x04D5, 0x0, propertyPVALID}, // CYRILLIC SMALL LIGATURE A IE + {0x04D6, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IE WITH BREVE + {0x04D7, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IE WITH BREVE + {0x04D8, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SCHWA + {0x04D9, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SCHWA + {0x04DA, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SCHWA WITH DIAERESIS + {0x04DB, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SCHWA WITH DIAERESIS + {0x04DC, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ZHE WITH DIAERESIS + {0x04DD, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ZHE WITH DIAERESIS + {0x04DE, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ZE WITH DIAERESIS + {0x04DF, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ZE WITH DIAERESIS + {0x04E0, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ABKHASIAN DZE + {0x04E1, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ABKHASIAN DZE + {0x04E2, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER I WITH MACRON + {0x04E3, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER I WITH MACRON + {0x04E4, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER I WITH DIAERESIS + {0x04E5, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER I WITH DIAERESIS + {0x04E6, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER O WITH DIAERESIS + {0x04E7, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER O WITH DIAERESIS + {0x04E8, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER BARRED O + {0x04E9, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER BARRED O + {0x04EA, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER BARRED O WITH DIAERE + {0x04EB, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER BARRED O WITH DIAERESI + {0x04EC, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER E WITH DIAERESIS + {0x04ED, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER E WITH DIAERESIS + {0x04EE, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER U WITH MACRON + {0x04EF, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER U WITH MACRON + {0x04F0, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER U WITH DIAERESIS + {0x04F1, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER U WITH DIAERESIS + {0x04F2, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER U WITH DOUBLE ACUTE + {0x04F3, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER U WITH DOUBLE ACUTE + {0x04F4, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER CHE WITH DIAERESIS + {0x04F5, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER CHE WITH DIAERESIS + {0x04F6, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER GHE WITH DESCENDER + {0x04F7, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER GHE WITH DESCENDER + {0x04F8, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER YERU WITH DIAERESIS + {0x04F9, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER YERU WITH DIAERESIS + {0x04FA, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER GHE WITH STROKE AND + {0x04FB, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER GHE WITH STROKE AND HO + {0x04FC, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER HA WITH HOOK + {0x04FD, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER HA WITH HOOK + {0x04FE, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER HA WITH STROKE + {0x04FF, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER HA WITH STROKE + {0x0500, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI DE + {0x0501, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI DE + {0x0502, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI DJE + {0x0503, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI DJE + {0x0504, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI ZJE + {0x0505, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI ZJE + {0x0506, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI DZJE + {0x0507, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI DZJE + {0x0508, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI LJE + {0x0509, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI LJE + {0x050A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI NJE + {0x050B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI NJE + {0x050C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI SJE + {0x050D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI SJE + {0x050E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI TJE + {0x050F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI TJE + {0x0510, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER REVERSED ZE + {0x0511, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER REVERSED ZE + {0x0512, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EL WITH HOOK + {0x0513, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER EL WITH HOOK + {0x0514, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER LHA + {0x0515, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER LHA + {0x0516, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER RHA + {0x0517, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER RHA + {0x0518, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER YAE + {0x0519, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER YAE + {0x051A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER QA + {0x051B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER QA + {0x051C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER WE + {0x051D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER WE + {0x051E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ALEUT KA + {0x051F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ALEUT KA + {0x0520, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EL WITH MIDDLE HOOK + {0x0521, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER EL WITH MIDDLE HOOK + {0x0522, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EN WITH MIDDLE HOOK + {0x0523, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER EN WITH MIDDLE HOOK + {0x0524, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER PE WITH DESCENDER + {0x0525, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER PE WITH DESCENDER + {0x0526, 0x0530, propertyUNASSIGNED}, // .. + {0x0531, 0x0556, propertyDISALLOWED}, // ARMENIAN CAPITAL LETTER AYB..ARMENIAN CAPITA + {0x0557, 0x0558, propertyUNASSIGNED}, // .. + {0x0559, 0x0, propertyPVALID}, // ARMENIAN MODIFIER LETTER LEFT HALF RING + {0x055A, 0x055F, propertyDISALLOWED}, // ARMENIAN APOSTROPHE..ARMENIAN ABBREVIATION M + {0x0560, 0x0, propertyUNASSIGNED}, // + {0x0561, 0x0586, propertyPVALID}, // ARMENIAN SMALL LETTER AYB..ARMENIAN SMALL LE + {0x0587, 0x0, propertyDISALLOWED}, // ARMENIAN SMALL LIGATURE ECH YIWN + {0x0588, 0x0, propertyUNASSIGNED}, // + {0x0589, 0x058A, propertyDISALLOWED}, // ARMENIAN FULL STOP..ARMENIAN HYPHEN + {0x058B, 0x0590, propertyUNASSIGNED}, // .. + {0x0591, 0x05BD, propertyPVALID}, // HEBREW ACCENT ETNAHTA..HEBREW POINT METEG + {0x05BE, 0x0, propertyDISALLOWED}, // HEBREW PUNCTUATION MAQAF + {0x05BF, 0x0, propertyPVALID}, // HEBREW POINT RAFE + {0x05C0, 0x0, propertyDISALLOWED}, // HEBREW PUNCTUATION PASEQ + {0x05C1, 0x05C2, propertyPVALID}, // HEBREW POINT SHIN DOT..HEBREW POINT SIN DOT + {0x05C3, 0x0, propertyDISALLOWED}, // HEBREW PUNCTUATION SOF PASUQ + {0x05C4, 0x05C5, propertyPVALID}, // HEBREW MARK UPPER DOT..HEBREW MARK LOWER DOT + {0x05C6, 0x0, propertyDISALLOWED}, // HEBREW PUNCTUATION NUN HAFUKHA + {0x05C7, 0x0, propertyPVALID}, // HEBREW POINT QAMATS QATAN + {0x05C8, 0x05CF, propertyUNASSIGNED}, // .. + {0x05D0, 0x05EA, propertyPVALID}, // HEBREW LETTER ALEF..HEBREW LETTER TAV + {0x05EB, 0x05EF, propertyUNASSIGNED}, // .. + {0x05F0, 0x05F2, propertyPVALID}, // HEBREW LIGATURE YIDDISH DOUBLE VAV..HEBREW L + {0x05F3, 0x05F4, propertyCONTEXTO}, // HEBREW PUNCTUATION GERESH..HEBREW PUNCTUATIO + {0x05F5, 0x05FF, propertyUNASSIGNED}, // .. + {0x0600, 0x0603, propertyDISALLOWED}, // ARABIC NUMBER SIGN..ARABIC SIGN SAFHA + {0x0604, 0x0605, propertyUNASSIGNED}, // .. + {0x0606, 0x060F, propertyDISALLOWED}, // ARABIC-INDIC CUBE ROOT..ARABIC SIGN MISRA + {0x0610, 0x061A, propertyPVALID}, // ARABIC SIGN SALLALLAHOU ALAYHE WASSALLAM..AR + {0x061B, 0x0, propertyDISALLOWED}, // ARABIC SEMICOLON + {0x061C, 0x061D, propertyUNASSIGNED}, // .. + {0x061E, 0x061F, propertyDISALLOWED}, // ARABIC TRIPLE DOT PUNCTUATION MARK..ARABIC Q + {0x0620, 0x0, propertyUNASSIGNED}, // + {0x0621, 0x063F, propertyPVALID}, // ARABIC LETTER HAMZA..ARABIC LETTER FARSI YEH + {0x0640, 0x0, propertyDISALLOWED}, // ARABIC TATWEEL + {0x0641, 0x065E, propertyPVALID}, // ARABIC LETTER FEH..ARABIC FATHA WITH TWO DOT + {0x065F, 0x0, propertyUNASSIGNED}, // + {0x0660, 0x0669, propertyCONTEXTO}, // ARABIC-INDIC DIGIT ZERO..ARABIC-INDIC DIGIT + {0x066A, 0x066D, propertyDISALLOWED}, // ARABIC PERCENT SIGN..ARABIC FIVE POINTED STA + {0x066E, 0x0674, propertyPVALID}, // ARABIC LETTER DOTLESS BEH..ARABIC LETTER HIG + {0x0675, 0x0678, propertyDISALLOWED}, // ARABIC LETTER HIGH HAMZA ALEF..ARABIC LETTER + {0x0679, 0x06D3, propertyPVALID}, // ARABIC LETTER TTEH..ARABIC LETTER YEH BARREE + {0x06D4, 0x0, propertyDISALLOWED}, // ARABIC FULL STOP + {0x06D5, 0x06DC, propertyPVALID}, // ARABIC LETTER AE..ARABIC SMALL HIGH SEEN + {0x06DD, 0x06DE, propertyDISALLOWED}, // ARABIC END OF AYAH..ARABIC START OF RUB EL H + {0x06DF, 0x06E8, propertyPVALID}, // ARABIC SMALL HIGH ROUNDED ZERO..ARABIC SMALL + {0x06E9, 0x0, propertyDISALLOWED}, // ARABIC PLACE OF SAJDAH + {0x06EA, 0x06EF, propertyPVALID}, // ARABIC EMPTY CENTRE LOW STOP..ARABIC LETTER + {0x06F0, 0x06F9, propertyCONTEXTO}, // EXTENDED ARABIC-INDIC DIGIT ZERO..EXTENDED A + {0x06FA, 0x06FF, propertyPVALID}, // ARABIC LETTER SHEEN WITH DOT BELOW..ARABIC L + {0x0700, 0x070D, propertyDISALLOWED}, // SYRIAC END OF PARAGRAPH..SYRIAC HARKLEAN AST + {0x070E, 0x0, propertyUNASSIGNED}, // + {0x070F, 0x0, propertyDISALLOWED}, // SYRIAC ABBREVIATION MARK + {0x0710, 0x074A, propertyPVALID}, // SYRIAC LETTER ALAPH..SYRIAC BARREKH + {0x074B, 0x074C, propertyUNASSIGNED}, // .. + {0x074D, 0x07B1, propertyPVALID}, // SYRIAC LETTER SOGDIAN ZHAIN..THAANA LETTER N + {0x07B2, 0x07BF, propertyUNASSIGNED}, // .. + {0x07C0, 0x07F5, propertyPVALID}, // NKO DIGIT ZERO..NKO LOW TONE APOSTROPHE + {0x07F6, 0x07FA, propertyDISALLOWED}, // NKO SYMBOL OO DENNEN..NKO LAJANYALAN + {0x07FB, 0x07FF, propertyUNASSIGNED}, // .. + {0x0800, 0x082D, propertyPVALID}, // SAMARITAN LETTER ALAF..SAMARITAN MARK NEQUDA + {0x082E, 0x082F, propertyUNASSIGNED}, // .. + {0x0830, 0x083E, propertyDISALLOWED}, // SAMARITAN PUNCTUATION NEQUDAA..SAMARITAN PUN + {0x083F, 0x08FF, propertyUNASSIGNED}, // .. + {0x0900, 0x0939, propertyPVALID}, // DEVANAGARI SIGN INVERTED CANDRABINDU..DEVANA + {0x093A, 0x093B, propertyUNASSIGNED}, // .. + {0x093C, 0x094E, propertyPVALID}, // DEVANAGARI SIGN NUKTA..DEVANAGARI VOWEL SIGN + {0x094F, 0x0, propertyUNASSIGNED}, // + {0x0950, 0x0955, propertyPVALID}, // DEVANAGARI OM..DEVANAGARI VOWEL SIGN CANDRA + {0x0956, 0x0957, propertyUNASSIGNED}, // .. + {0x0958, 0x095F, propertyDISALLOWED}, // DEVANAGARI LETTER QA..DEVANAGARI LETTER YYA + {0x0960, 0x0963, propertyPVALID}, // DEVANAGARI LETTER VOCALIC RR..DEVANAGARI VOW + {0x0964, 0x0965, propertyDISALLOWED}, // DEVANAGARI DANDA..DEVANAGARI DOUBLE DANDA + {0x0966, 0x096F, propertyPVALID}, // DEVANAGARI DIGIT ZERO..DEVANAGARI DIGIT NINE + {0x0970, 0x0, propertyDISALLOWED}, // DEVANAGARI ABBREVIATION SIGN + {0x0971, 0x0972, propertyPVALID}, // DEVANAGARI SIGN HIGH SPACING DOT..DEVANAGARI + {0x0973, 0x0978, propertyUNASSIGNED}, // .. + {0x0979, 0x097F, propertyPVALID}, // DEVANAGARI LETTER ZHA..DEVANAGARI LETTER BBA + {0x0980, 0x0, propertyUNASSIGNED}, // + {0x0981, 0x0983, propertyPVALID}, // BENGALI SIGN CANDRABINDU..BENGALI SIGN VISAR + {0x0984, 0x0, propertyUNASSIGNED}, // + {0x0985, 0x098C, propertyPVALID}, // BENGALI LETTER A..BENGALI LETTER VOCALIC L + {0x098D, 0x098E, propertyUNASSIGNED}, // .. + {0x098F, 0x0990, propertyPVALID}, // BENGALI LETTER E..BENGALI LETTER AI + {0x0991, 0x0992, propertyUNASSIGNED}, // .. + {0x0993, 0x09A8, propertyPVALID}, // BENGALI LETTER O..BENGALI LETTER NA + {0x09A9, 0x0, propertyUNASSIGNED}, // + {0x09AA, 0x09B0, propertyPVALID}, // BENGALI LETTER PA..BENGALI LETTER RA + {0x09B1, 0x0, propertyUNASSIGNED}, // + {0x09B2, 0x0, propertyPVALID}, // BENGALI LETTER LA + {0x09B3, 0x09B5, propertyUNASSIGNED}, // .. + {0x09B6, 0x09B9, propertyPVALID}, // BENGALI LETTER SHA..BENGALI LETTER HA + {0x09BA, 0x09BB, propertyUNASSIGNED}, // .. + {0x09BC, 0x09C4, propertyPVALID}, // BENGALI SIGN NUKTA..BENGALI VOWEL SIGN VOCAL + {0x09C5, 0x09C6, propertyUNASSIGNED}, // .. + {0x09C7, 0x09C8, propertyPVALID}, // BENGALI VOWEL SIGN E..BENGALI VOWEL SIGN AI + {0x09C9, 0x09CA, propertyUNASSIGNED}, // .. + {0x09CB, 0x09CE, propertyPVALID}, // BENGALI VOWEL SIGN O..BENGALI LETTER KHANDA + {0x09CF, 0x09D6, propertyUNASSIGNED}, // .. + {0x09D7, 0x0, propertyPVALID}, // BENGALI AU LENGTH MARK + {0x09D8, 0x09DB, propertyUNASSIGNED}, // .. + {0x09DC, 0x09DD, propertyDISALLOWED}, // BENGALI LETTER RRA..BENGALI LETTER RHA + {0x09DE, 0x0, propertyUNASSIGNED}, // + {0x09DF, 0x0, propertyDISALLOWED}, // BENGALI LETTER YYA + {0x09E0, 0x09E3, propertyPVALID}, // BENGALI LETTER VOCALIC RR..BENGALI VOWEL SIG + {0x09E4, 0x09E5, propertyUNASSIGNED}, // .. + {0x09E6, 0x09F1, propertyPVALID}, // BENGALI DIGIT ZERO..BENGALI LETTER RA WITH L + {0x09F2, 0x09FB, propertyDISALLOWED}, // BENGALI RUPEE MARK..BENGALI GANDA MARK + {0x09FC, 0x0A00, propertyUNASSIGNED}, // .. + {0x0A01, 0x0A03, propertyPVALID}, // GURMUKHI SIGN ADAK BINDI..GURMUKHI SIGN VISA + {0x0A04, 0x0, propertyUNASSIGNED}, // + {0x0A05, 0x0A0A, propertyPVALID}, // GURMUKHI LETTER A..GURMUKHI LETTER UU + {0x0A0B, 0x0A0E, propertyUNASSIGNED}, // .. + {0x0A0F, 0x0A10, propertyPVALID}, // GURMUKHI LETTER EE..GURMUKHI LETTER AI + {0x0A11, 0x0A12, propertyUNASSIGNED}, // .. + {0x0A13, 0x0A28, propertyPVALID}, // GURMUKHI LETTER OO..GURMUKHI LETTER NA + {0x0A29, 0x0, propertyUNASSIGNED}, // + {0x0A2A, 0x0A30, propertyPVALID}, // GURMUKHI LETTER PA..GURMUKHI LETTER RA + {0x0A31, 0x0, propertyUNASSIGNED}, // + {0x0A32, 0x0, propertyPVALID}, // GURMUKHI LETTER LA + {0x0A33, 0x0, propertyDISALLOWED}, // GURMUKHI LETTER LLA + {0x0A34, 0x0, propertyUNASSIGNED}, // + {0x0A35, 0x0, propertyPVALID}, // GURMUKHI LETTER VA + {0x0A36, 0x0, propertyDISALLOWED}, // GURMUKHI LETTER SHA + {0x0A37, 0x0, propertyUNASSIGNED}, // + {0x0A38, 0x0A39, propertyPVALID}, // GURMUKHI LETTER SA..GURMUKHI LETTER HA + {0x0A3A, 0x0A3B, propertyUNASSIGNED}, // .. + {0x0A3C, 0x0, propertyPVALID}, // GURMUKHI SIGN NUKTA + {0x0A3D, 0x0, propertyUNASSIGNED}, // + {0x0A3E, 0x0A42, propertyPVALID}, // GURMUKHI VOWEL SIGN AA..GURMUKHI VOWEL SIGN + {0x0A43, 0x0A46, propertyUNASSIGNED}, // .. + {0x0A47, 0x0A48, propertyPVALID}, // GURMUKHI VOWEL SIGN EE..GURMUKHI VOWEL SIGN + {0x0A49, 0x0A4A, propertyUNASSIGNED}, // .. + {0x0A4B, 0x0A4D, propertyPVALID}, // GURMUKHI VOWEL SIGN OO..GURMUKHI SIGN VIRAMA + {0x0A4E, 0x0A50, propertyUNASSIGNED}, // .. + {0x0A51, 0x0, propertyPVALID}, // GURMUKHI SIGN UDAAT + {0x0A52, 0x0A58, propertyUNASSIGNED}, // .. + {0x0A59, 0x0A5B, propertyDISALLOWED}, // GURMUKHI LETTER KHHA..GURMUKHI LETTER ZA + {0x0A5C, 0x0, propertyPVALID}, // GURMUKHI LETTER RRA + {0x0A5D, 0x0, propertyUNASSIGNED}, // + {0x0A5E, 0x0, propertyDISALLOWED}, // GURMUKHI LETTER FA + {0x0A5F, 0x0A65, propertyUNASSIGNED}, // .. + {0x0A66, 0x0A75, propertyPVALID}, // GURMUKHI DIGIT ZERO..GURMUKHI SIGN YAKASH + {0x0A76, 0x0A80, propertyUNASSIGNED}, // .. + {0x0A81, 0x0A83, propertyPVALID}, // GUJARATI SIGN CANDRABINDU..GUJARATI SIGN VIS + {0x0A84, 0x0, propertyUNASSIGNED}, // + {0x0A85, 0x0A8D, propertyPVALID}, // GUJARATI LETTER A..GUJARATI VOWEL CANDRA E + {0x0A8E, 0x0, propertyUNASSIGNED}, // + {0x0A8F, 0x0A91, propertyPVALID}, // GUJARATI LETTER E..GUJARATI VOWEL CANDRA O + {0x0A92, 0x0, propertyUNASSIGNED}, // + {0x0A93, 0x0AA8, propertyPVALID}, // GUJARATI LETTER O..GUJARATI LETTER NA + {0x0AA9, 0x0, propertyUNASSIGNED}, // + {0x0AAA, 0x0AB0, propertyPVALID}, // GUJARATI LETTER PA..GUJARATI LETTER RA + {0x0AB1, 0x0, propertyUNASSIGNED}, // + {0x0AB2, 0x0AB3, propertyPVALID}, // GUJARATI LETTER LA..GUJARATI LETTER LLA + {0x0AB4, 0x0, propertyUNASSIGNED}, // + {0x0AB5, 0x0AB9, propertyPVALID}, // GUJARATI LETTER VA..GUJARATI LETTER HA + {0x0ABA, 0x0ABB, propertyUNASSIGNED}, // .. + {0x0ABC, 0x0AC5, propertyPVALID}, // GUJARATI SIGN NUKTA..GUJARATI VOWEL SIGN CAN + {0x0AC6, 0x0, propertyUNASSIGNED}, // + {0x0AC7, 0x0AC9, propertyPVALID}, // GUJARATI VOWEL SIGN E..GUJARATI VOWEL SIGN C + {0x0ACA, 0x0, propertyUNASSIGNED}, // + {0x0ACB, 0x0ACD, propertyPVALID}, // GUJARATI VOWEL SIGN O..GUJARATI SIGN VIRAMA + {0x0ACE, 0x0ACF, propertyUNASSIGNED}, // .. + {0x0AD0, 0x0, propertyPVALID}, // GUJARATI OM + {0x0AD1, 0x0ADF, propertyUNASSIGNED}, // .. + {0x0AE0, 0x0AE3, propertyPVALID}, // GUJARATI LETTER VOCALIC RR..GUJARATI VOWEL S + {0x0AE4, 0x0AE5, propertyUNASSIGNED}, // .. + {0x0AE6, 0x0AEF, propertyPVALID}, // GUJARATI DIGIT ZERO..GUJARATI DIGIT NINE + {0x0AF0, 0x0, propertyUNASSIGNED}, // + {0x0AF1, 0x0, propertyDISALLOWED}, // GUJARATI RUPEE SIGN + {0x0AF2, 0x0B00, propertyUNASSIGNED}, // .. + {0x0B01, 0x0B03, propertyPVALID}, // ORIYA SIGN CANDRABINDU..ORIYA SIGN VISARGA + {0x0B04, 0x0, propertyUNASSIGNED}, // + {0x0B05, 0x0B0C, propertyPVALID}, // ORIYA LETTER A..ORIYA LETTER VOCALIC L + {0x0B0D, 0x0B0E, propertyUNASSIGNED}, // .. + {0x0B0F, 0x0B10, propertyPVALID}, // ORIYA LETTER E..ORIYA LETTER AI + {0x0B11, 0x0B12, propertyUNASSIGNED}, // .. + {0x0B13, 0x0B28, propertyPVALID}, // ORIYA LETTER O..ORIYA LETTER NA + {0x0B29, 0x0, propertyUNASSIGNED}, // + {0x0B2A, 0x0B30, propertyPVALID}, // ORIYA LETTER PA..ORIYA LETTER RA + {0x0B31, 0x0, propertyUNASSIGNED}, // + {0x0B32, 0x0B33, propertyPVALID}, // ORIYA LETTER LA..ORIYA LETTER LLA + {0x0B34, 0x0, propertyUNASSIGNED}, // + {0x0B35, 0x0B39, propertyPVALID}, // ORIYA LETTER VA..ORIYA LETTER HA + {0x0B3A, 0x0B3B, propertyUNASSIGNED}, // .. + {0x0B3C, 0x0B44, propertyPVALID}, // ORIYA SIGN NUKTA..ORIYA VOWEL SIGN VOCALIC R + {0x0B45, 0x0B46, propertyUNASSIGNED}, // .. + {0x0B47, 0x0B48, propertyPVALID}, // ORIYA VOWEL SIGN E..ORIYA VOWEL SIGN AI + {0x0B49, 0x0B4A, propertyUNASSIGNED}, // .. + {0x0B4B, 0x0B4D, propertyPVALID}, // ORIYA VOWEL SIGN O..ORIYA SIGN VIRAMA + {0x0B4E, 0x0B55, propertyUNASSIGNED}, // .. + {0x0B56, 0x0B57, propertyPVALID}, // ORIYA AI LENGTH MARK..ORIYA AU LENGTH MARK + {0x0B58, 0x0B5B, propertyUNASSIGNED}, // .. + {0x0B5C, 0x0B5D, propertyDISALLOWED}, // ORIYA LETTER RRA..ORIYA LETTER RHA + {0x0B5E, 0x0, propertyUNASSIGNED}, // + {0x0B5F, 0x0B63, propertyPVALID}, // ORIYA LETTER YYA..ORIYA VOWEL SIGN VOCALIC L + {0x0B64, 0x0B65, propertyUNASSIGNED}, // .. + {0x0B66, 0x0B6F, propertyPVALID}, // ORIYA DIGIT ZERO..ORIYA DIGIT NINE + {0x0B70, 0x0, propertyDISALLOWED}, // ORIYA ISSHAR + {0x0B71, 0x0, propertyPVALID}, // ORIYA LETTER WA + {0x0B72, 0x0B81, propertyUNASSIGNED}, // .. + {0x0B82, 0x0B83, propertyPVALID}, // TAMIL SIGN ANUSVARA..TAMIL SIGN VISARGA + {0x0B84, 0x0, propertyUNASSIGNED}, // + {0x0B85, 0x0B8A, propertyPVALID}, // TAMIL LETTER A..TAMIL LETTER UU + {0x0B8B, 0x0B8D, propertyUNASSIGNED}, // .. + {0x0B8E, 0x0B90, propertyPVALID}, // TAMIL LETTER E..TAMIL LETTER AI + {0x0B91, 0x0, propertyUNASSIGNED}, // + {0x0B92, 0x0B95, propertyPVALID}, // TAMIL LETTER O..TAMIL LETTER KA + {0x0B96, 0x0B98, propertyUNASSIGNED}, // .. + {0x0B99, 0x0B9A, propertyPVALID}, // TAMIL LETTER NGA..TAMIL LETTER CA + {0x0B9B, 0x0, propertyUNASSIGNED}, // + {0x0B9C, 0x0, propertyPVALID}, // TAMIL LETTER JA + {0x0B9D, 0x0, propertyUNASSIGNED}, // + {0x0B9E, 0x0B9F, propertyPVALID}, // TAMIL LETTER NYA..TAMIL LETTER TTA + {0x0BA0, 0x0BA2, propertyUNASSIGNED}, // .. + {0x0BA3, 0x0BA4, propertyPVALID}, // TAMIL LETTER NNA..TAMIL LETTER TA + {0x0BA5, 0x0BA7, propertyUNASSIGNED}, // .. + {0x0BA8, 0x0BAA, propertyPVALID}, // TAMIL LETTER NA..TAMIL LETTER PA + {0x0BAB, 0x0BAD, propertyUNASSIGNED}, // .. + {0x0BAE, 0x0BB9, propertyPVALID}, // TAMIL LETTER MA..TAMIL LETTER HA + {0x0BBA, 0x0BBD, propertyUNASSIGNED}, // .. + {0x0BBE, 0x0BC2, propertyPVALID}, // TAMIL VOWEL SIGN AA..TAMIL VOWEL SIGN UU + {0x0BC3, 0x0BC5, propertyUNASSIGNED}, // .. + {0x0BC6, 0x0BC8, propertyPVALID}, // TAMIL VOWEL SIGN E..TAMIL VOWEL SIGN AI + {0x0BC9, 0x0, propertyUNASSIGNED}, // + {0x0BCA, 0x0BCD, propertyPVALID}, // TAMIL VOWEL SIGN O..TAMIL SIGN VIRAMA + {0x0BCE, 0x0BCF, propertyUNASSIGNED}, // .. + {0x0BD0, 0x0, propertyPVALID}, // TAMIL OM + {0x0BD1, 0x0BD6, propertyUNASSIGNED}, // .. + {0x0BD7, 0x0, propertyPVALID}, // TAMIL AU LENGTH MARK + {0x0BD8, 0x0BE5, propertyUNASSIGNED}, // .. + {0x0BE6, 0x0BEF, propertyPVALID}, // TAMIL DIGIT ZERO..TAMIL DIGIT NINE + {0x0BF0, 0x0BFA, propertyDISALLOWED}, // TAMIL NUMBER TEN..TAMIL NUMBER SIGN + {0x0BFB, 0x0C00, propertyUNASSIGNED}, // .. + {0x0C01, 0x0C03, propertyPVALID}, // TELUGU SIGN CANDRABINDU..TELUGU SIGN VISARGA + {0x0C04, 0x0, propertyUNASSIGNED}, // + {0x0C05, 0x0C0C, propertyPVALID}, // TELUGU LETTER A..TELUGU LETTER VOCALIC L + {0x0C0D, 0x0, propertyUNASSIGNED}, // + {0x0C0E, 0x0C10, propertyPVALID}, // TELUGU LETTER E..TELUGU LETTER AI + {0x0C11, 0x0, propertyUNASSIGNED}, // + {0x0C12, 0x0C28, propertyPVALID}, // TELUGU LETTER O..TELUGU LETTER NA + {0x0C29, 0x0, propertyUNASSIGNED}, // + {0x0C2A, 0x0C33, propertyPVALID}, // TELUGU LETTER PA..TELUGU LETTER LLA + {0x0C34, 0x0, propertyUNASSIGNED}, // + {0x0C35, 0x0C39, propertyPVALID}, // TELUGU LETTER VA..TELUGU LETTER HA + {0x0C3A, 0x0C3C, propertyUNASSIGNED}, // .. + {0x0C3D, 0x0C44, propertyPVALID}, // TELUGU SIGN AVAGRAHA..TELUGU VOWEL SIGN VOCA + {0x0C45, 0x0, propertyUNASSIGNED}, // + {0x0C46, 0x0C48, propertyPVALID}, // TELUGU VOWEL SIGN E..TELUGU VOWEL SIGN AI + {0x0C49, 0x0, propertyUNASSIGNED}, // + {0x0C4A, 0x0C4D, propertyPVALID}, // TELUGU VOWEL SIGN O..TELUGU SIGN VIRAMA + {0x0C4E, 0x0C54, propertyUNASSIGNED}, // .. + {0x0C55, 0x0C56, propertyPVALID}, // TELUGU LENGTH MARK..TELUGU AI LENGTH MARK + {0x0C57, 0x0, propertyUNASSIGNED}, // + {0x0C58, 0x0C59, propertyPVALID}, // TELUGU LETTER TSA..TELUGU LETTER DZA + {0x0C5A, 0x0C5F, propertyUNASSIGNED}, // .. + {0x0C60, 0x0C63, propertyPVALID}, // TELUGU LETTER VOCALIC RR..TELUGU VOWEL SIGN + {0x0C64, 0x0C65, propertyUNASSIGNED}, // .. + {0x0C66, 0x0C6F, propertyPVALID}, // TELUGU DIGIT ZERO..TELUGU DIGIT NINE + {0x0C70, 0x0C77, propertyUNASSIGNED}, // .. + {0x0C78, 0x0C7F, propertyDISALLOWED}, // TELUGU FRACTION DIGIT ZERO FOR ODD POWERS OF + {0x0C80, 0x0C81, propertyUNASSIGNED}, // .. + {0x0C82, 0x0C83, propertyPVALID}, // KANNADA SIGN ANUSVARA..KANNADA SIGN VISARGA + {0x0C84, 0x0, propertyUNASSIGNED}, // + {0x0C85, 0x0C8C, propertyPVALID}, // KANNADA LETTER A..KANNADA LETTER VOCALIC L + {0x0C8D, 0x0, propertyUNASSIGNED}, // + {0x0C8E, 0x0C90, propertyPVALID}, // KANNADA LETTER E..KANNADA LETTER AI + {0x0C91, 0x0, propertyUNASSIGNED}, // + {0x0C92, 0x0CA8, propertyPVALID}, // KANNADA LETTER O..KANNADA LETTER NA + {0x0CA9, 0x0, propertyUNASSIGNED}, // + {0x0CAA, 0x0CB3, propertyPVALID}, // KANNADA LETTER PA..KANNADA LETTER LLA + {0x0CB4, 0x0, propertyUNASSIGNED}, // + {0x0CB5, 0x0CB9, propertyPVALID}, // KANNADA LETTER VA..KANNADA LETTER HA + {0x0CBA, 0x0CBB, propertyUNASSIGNED}, // .. + {0x0CBC, 0x0CC4, propertyPVALID}, // KANNADA SIGN NUKTA..KANNADA VOWEL SIGN VOCAL + {0x0CC5, 0x0, propertyUNASSIGNED}, // + {0x0CC6, 0x0CC8, propertyPVALID}, // KANNADA VOWEL SIGN E..KANNADA VOWEL SIGN AI + {0x0CC9, 0x0, propertyUNASSIGNED}, // + {0x0CCA, 0x0CCD, propertyPVALID}, // KANNADA VOWEL SIGN O..KANNADA SIGN VIRAMA + {0x0CCE, 0x0CD4, propertyUNASSIGNED}, // .. + {0x0CD5, 0x0CD6, propertyPVALID}, // KANNADA LENGTH MARK..KANNADA AI LENGTH MARK + {0x0CD7, 0x0CDD, propertyUNASSIGNED}, // .. + {0x0CDE, 0x0, propertyPVALID}, // KANNADA LETTER FA + {0x0CDF, 0x0, propertyUNASSIGNED}, // + {0x0CE0, 0x0CE3, propertyPVALID}, // KANNADA LETTER VOCALIC RR..KANNADA VOWEL SIG + {0x0CE4, 0x0CE5, propertyUNASSIGNED}, // .. + {0x0CE6, 0x0CEF, propertyPVALID}, // KANNADA DIGIT ZERO..KANNADA DIGIT NINE + {0x0CF0, 0x0, propertyUNASSIGNED}, // + {0x0CF1, 0x0CF2, propertyDISALLOWED}, // KANNADA SIGN JIHVAMULIYA..KANNADA SIGN UPADH + {0x0CF3, 0x0D01, propertyUNASSIGNED}, // .. + {0x0D02, 0x0D03, propertyPVALID}, // MALAYALAM SIGN ANUSVARA..MALAYALAM SIGN VISA + {0x0D04, 0x0, propertyUNASSIGNED}, // + {0x0D05, 0x0D0C, propertyPVALID}, // MALAYALAM LETTER A..MALAYALAM LETTER VOCALIC + {0x0D0D, 0x0, propertyUNASSIGNED}, // + {0x0D0E, 0x0D10, propertyPVALID}, // MALAYALAM LETTER E..MALAYALAM LETTER AI + {0x0D11, 0x0, propertyUNASSIGNED}, // + {0x0D12, 0x0D28, propertyPVALID}, // MALAYALAM LETTER O..MALAYALAM LETTER NA + {0x0D29, 0x0, propertyUNASSIGNED}, // + {0x0D2A, 0x0D39, propertyPVALID}, // MALAYALAM LETTER PA..MALAYALAM LETTER HA + {0x0D3A, 0x0D3C, propertyUNASSIGNED}, // .. + {0x0D3D, 0x0D44, propertyPVALID}, // MALAYALAM SIGN AVAGRAHA..MALAYALAM VOWEL SIG + {0x0D45, 0x0, propertyUNASSIGNED}, // + {0x0D46, 0x0D48, propertyPVALID}, // MALAYALAM VOWEL SIGN E..MALAYALAM VOWEL SIGN + {0x0D49, 0x0, propertyUNASSIGNED}, // + {0x0D4A, 0x0D4D, propertyPVALID}, // MALAYALAM VOWEL SIGN O..MALAYALAM SIGN VIRAM + {0x0D4E, 0x0D56, propertyUNASSIGNED}, // .. + {0x0D57, 0x0, propertyPVALID}, // MALAYALAM AU LENGTH MARK + {0x0D58, 0x0D5F, propertyUNASSIGNED}, // .. + {0x0D60, 0x0D63, propertyPVALID}, // MALAYALAM LETTER VOCALIC RR..MALAYALAM VOWEL + {0x0D64, 0x0D65, propertyUNASSIGNED}, // .. + {0x0D66, 0x0D6F, propertyPVALID}, // MALAYALAM DIGIT ZERO..MALAYALAM DIGIT NINE + {0x0D70, 0x0D75, propertyDISALLOWED}, // MALAYALAM NUMBER TEN..MALAYALAM FRACTION THR + {0x0D76, 0x0D78, propertyUNASSIGNED}, // .. + {0x0D79, 0x0, propertyDISALLOWED}, // MALAYALAM DATE MARK + {0x0D7A, 0x0D7F, propertyPVALID}, // MALAYALAM LETTER CHILLU NN..MALAYALAM LETTER + {0x0D80, 0x0D81, propertyUNASSIGNED}, // .. + {0x0D82, 0x0D83, propertyPVALID}, // SINHALA SIGN ANUSVARAYA..SINHALA SIGN VISARG + {0x0D84, 0x0, propertyUNASSIGNED}, // + {0x0D85, 0x0D96, propertyPVALID}, // SINHALA LETTER AYANNA..SINHALA LETTER AUYANN + {0x0D97, 0x0D99, propertyUNASSIGNED}, // .. + {0x0D9A, 0x0DB1, propertyPVALID}, // SINHALA LETTER ALPAPRAANA KAYANNA..SINHALA L + {0x0DB2, 0x0, propertyUNASSIGNED}, // + {0x0DB3, 0x0DBB, propertyPVALID}, // SINHALA LETTER SANYAKA DAYANNA..SINHALA LETT + {0x0DBC, 0x0, propertyUNASSIGNED}, // + {0x0DBD, 0x0, propertyPVALID}, // SINHALA LETTER DANTAJA LAYANNA + {0x0DBE, 0x0DBF, propertyUNASSIGNED}, // .. + {0x0DC0, 0x0DC6, propertyPVALID}, // SINHALA LETTER VAYANNA..SINHALA LETTER FAYAN + {0x0DC7, 0x0DC9, propertyUNASSIGNED}, // .. + {0x0DCA, 0x0, propertyPVALID}, // SINHALA SIGN AL-LAKUNA + {0x0DCB, 0x0DCE, propertyUNASSIGNED}, // .. + {0x0DCF, 0x0DD4, propertyPVALID}, // SINHALA VOWEL SIGN AELA-PILLA..SINHALA VOWEL + {0x0DD5, 0x0, propertyUNASSIGNED}, // + {0x0DD6, 0x0, propertyPVALID}, // SINHALA VOWEL SIGN DIGA PAA-PILLA + {0x0DD7, 0x0, propertyUNASSIGNED}, // + {0x0DD8, 0x0DDF, propertyPVALID}, // SINHALA VOWEL SIGN GAETTA-PILLA..SINHALA VOW + {0x0DE0, 0x0DF1, propertyUNASSIGNED}, // .. + {0x0DF2, 0x0DF3, propertyPVALID}, // SINHALA VOWEL SIGN DIGA GAETTA-PILLA..SINHAL + {0x0DF4, 0x0, propertyDISALLOWED}, // SINHALA PUNCTUATION KUNDDALIYA + {0x0DF5, 0x0E00, propertyUNASSIGNED}, // .. + {0x0E01, 0x0E32, propertyPVALID}, // THAI CHARACTER KO KAI..THAI CHARACTER SARA A + {0x0E33, 0x0, propertyDISALLOWED}, // THAI CHARACTER SARA AM + {0x0E34, 0x0E3A, propertyPVALID}, // THAI CHARACTER SARA I..THAI CHARACTER PHINTH + {0x0E3B, 0x0E3E, propertyUNASSIGNED}, // .. + {0x0E3F, 0x0, propertyDISALLOWED}, // THAI CURRENCY SYMBOL BAHT + {0x0E40, 0x0E4E, propertyPVALID}, // THAI CHARACTER SARA E..THAI CHARACTER YAMAKK + {0x0E4F, 0x0, propertyDISALLOWED}, // THAI CHARACTER FONGMAN + {0x0E50, 0x0E59, propertyPVALID}, // THAI DIGIT ZERO..THAI DIGIT NINE + {0x0E5A, 0x0E5B, propertyDISALLOWED}, // THAI CHARACTER ANGKHANKHU..THAI CHARACTER KH + {0x0E5C, 0x0E80, propertyUNASSIGNED}, // .. + {0x0E81, 0x0E82, propertyPVALID}, // LAO LETTER KO..LAO LETTER KHO SUNG + {0x0E83, 0x0, propertyUNASSIGNED}, // + {0x0E84, 0x0, propertyPVALID}, // LAO LETTER KHO TAM + {0x0E85, 0x0E86, propertyUNASSIGNED}, // .. + {0x0E87, 0x0E88, propertyPVALID}, // LAO LETTER NGO..LAO LETTER CO + {0x0E89, 0x0, propertyUNASSIGNED}, // + {0x0E8A, 0x0, propertyPVALID}, // LAO LETTER SO TAM + {0x0E8B, 0x0E8C, propertyUNASSIGNED}, // .. + {0x0E8D, 0x0, propertyPVALID}, // LAO LETTER NYO + {0x0E8E, 0x0E93, propertyUNASSIGNED}, // .. + {0x0E94, 0x0E97, propertyPVALID}, // LAO LETTER DO..LAO LETTER THO TAM + {0x0E98, 0x0, propertyUNASSIGNED}, // + {0x0E99, 0x0E9F, propertyPVALID}, // LAO LETTER NO..LAO LETTER FO SUNG + {0x0EA0, 0x0, propertyUNASSIGNED}, // + {0x0EA1, 0x0EA3, propertyPVALID}, // LAO LETTER MO..LAO LETTER LO LING + {0x0EA4, 0x0, propertyUNASSIGNED}, // + {0x0EA5, 0x0, propertyPVALID}, // LAO LETTER LO LOOT + {0x0EA6, 0x0, propertyUNASSIGNED}, // + {0x0EA7, 0x0, propertyPVALID}, // LAO LETTER WO + {0x0EA8, 0x0EA9, propertyUNASSIGNED}, // .. + {0x0EAA, 0x0EAB, propertyPVALID}, // LAO LETTER SO SUNG..LAO LETTER HO SUNG + {0x0EAC, 0x0, propertyUNASSIGNED}, // + {0x0EAD, 0x0EB2, propertyPVALID}, // LAO LETTER O..LAO VOWEL SIGN AA + {0x0EB3, 0x0, propertyDISALLOWED}, // LAO VOWEL SIGN AM + {0x0EB4, 0x0EB9, propertyPVALID}, // LAO VOWEL SIGN I..LAO VOWEL SIGN UU + {0x0EBA, 0x0, propertyUNASSIGNED}, // + {0x0EBB, 0x0EBD, propertyPVALID}, // LAO VOWEL SIGN MAI KON..LAO SEMIVOWEL SIGN N + {0x0EBE, 0x0EBF, propertyUNASSIGNED}, // .. + {0x0EC0, 0x0EC4, propertyPVALID}, // LAO VOWEL SIGN E..LAO VOWEL SIGN AI + {0x0EC5, 0x0, propertyUNASSIGNED}, // + {0x0EC6, 0x0, propertyPVALID}, // LAO KO LA + {0x0EC7, 0x0, propertyUNASSIGNED}, // + {0x0EC8, 0x0ECD, propertyPVALID}, // LAO TONE MAI EK..LAO NIGGAHITA + {0x0ECE, 0x0ECF, propertyUNASSIGNED}, // .. + {0x0ED0, 0x0ED9, propertyPVALID}, // LAO DIGIT ZERO..LAO DIGIT NINE + {0x0EDA, 0x0EDB, propertyUNASSIGNED}, // .. + {0x0EDC, 0x0EDD, propertyDISALLOWED}, // LAO HO NO..LAO HO MO + {0x0EDE, 0x0EFF, propertyUNASSIGNED}, // .. + {0x0F00, 0x0, propertyPVALID}, // TIBETAN SYLLABLE OM + {0x0F01, 0x0F0A, propertyDISALLOWED}, // TIBETAN MARK GTER YIG MGO TRUNCATED A..TIBET + {0x0F0B, 0x0, propertyPVALID}, // TIBETAN MARK INTERSYLLABIC TSHEG + {0x0F0C, 0x0F17, propertyDISALLOWED}, // TIBETAN MARK DELIMITER TSHEG BSTAR..TIBETAN + {0x0F18, 0x0F19, propertyPVALID}, // TIBETAN ASTROLOGICAL SIGN -KHYUD PA..TIBETAN + {0x0F1A, 0x0F1F, propertyDISALLOWED}, // TIBETAN SIGN RDEL DKAR GCIG..TIBETAN SIGN RD + {0x0F20, 0x0F29, propertyPVALID}, // TIBETAN DIGIT ZERO..TIBETAN DIGIT NINE + {0x0F2A, 0x0F34, propertyDISALLOWED}, // TIBETAN DIGIT HALF ONE..TIBETAN MARK BSDUS R + {0x0F35, 0x0, propertyPVALID}, // TIBETAN MARK NGAS BZUNG NYI ZLA + {0x0F36, 0x0, propertyDISALLOWED}, // TIBETAN MARK CARET -DZUD RTAGS BZHI MIG CAN + {0x0F37, 0x0, propertyPVALID}, // TIBETAN MARK NGAS BZUNG SGOR RTAGS + {0x0F38, 0x0, propertyDISALLOWED}, // TIBETAN MARK CHE MGO + {0x0F39, 0x0, propertyPVALID}, // TIBETAN MARK TSA -PHRU + {0x0F3A, 0x0F3D, propertyDISALLOWED}, // TIBETAN MARK GUG RTAGS GYON..TIBETAN MARK AN + {0x0F3E, 0x0F42, propertyPVALID}, // TIBETAN SIGN YAR TSHES..TIBETAN LETTER GA + {0x0F43, 0x0, propertyDISALLOWED}, // TIBETAN LETTER GHA + {0x0F44, 0x0F47, propertyPVALID}, // TIBETAN LETTER NGA..TIBETAN LETTER JA + {0x0F48, 0x0, propertyUNASSIGNED}, // + {0x0F49, 0x0F4C, propertyPVALID}, // TIBETAN LETTER NYA..TIBETAN LETTER DDA + {0x0F4D, 0x0, propertyDISALLOWED}, // TIBETAN LETTER DDHA + {0x0F4E, 0x0F51, propertyPVALID}, // TIBETAN LETTER NNA..TIBETAN LETTER DA + {0x0F52, 0x0, propertyDISALLOWED}, // TIBETAN LETTER DHA + {0x0F53, 0x0F56, propertyPVALID}, // TIBETAN LETTER NA..TIBETAN LETTER BA + {0x0F57, 0x0, propertyDISALLOWED}, // TIBETAN LETTER BHA + {0x0F58, 0x0F5B, propertyPVALID}, // TIBETAN LETTER MA..TIBETAN LETTER DZA + {0x0F5C, 0x0, propertyDISALLOWED}, // TIBETAN LETTER DZHA + {0x0F5D, 0x0F68, propertyPVALID}, // TIBETAN LETTER WA..TIBETAN LETTER A + {0x0F69, 0x0, propertyDISALLOWED}, // TIBETAN LETTER KSSA + {0x0F6A, 0x0F6C, propertyPVALID}, // TIBETAN LETTER FIXED-FORM RA..TIBETAN LETTER + {0x0F6D, 0x0F70, propertyUNASSIGNED}, // .. + {0x0F71, 0x0F72, propertyPVALID}, // TIBETAN VOWEL SIGN AA..TIBETAN VOWEL SIGN I + {0x0F73, 0x0, propertyDISALLOWED}, // TIBETAN VOWEL SIGN II + {0x0F74, 0x0, propertyPVALID}, // TIBETAN VOWEL SIGN U + {0x0F75, 0x0F79, propertyDISALLOWED}, // TIBETAN VOWEL SIGN UU..TIBETAN VOWEL SIGN VO + {0x0F7A, 0x0F80, propertyPVALID}, // TIBETAN VOWEL SIGN E..TIBETAN VOWEL SIGN REV + {0x0F81, 0x0, propertyDISALLOWED}, // TIBETAN VOWEL SIGN REVERSED II + {0x0F82, 0x0F84, propertyPVALID}, // TIBETAN SIGN NYI ZLA NAA DA..TIBETAN MARK HA + {0x0F85, 0x0, propertyDISALLOWED}, // TIBETAN MARK PALUTA + {0x0F86, 0x0F8B, propertyPVALID}, // TIBETAN SIGN LCI RTAGS..TIBETAN SIGN GRU MED + {0x0F8C, 0x0F8F, propertyUNASSIGNED}, // .. + {0x0F90, 0x0F92, propertyPVALID}, // TIBETAN SUBJOINED LETTER KA..TIBETAN SUBJOIN + {0x0F93, 0x0, propertyDISALLOWED}, // TIBETAN SUBJOINED LETTER GHA + {0x0F94, 0x0F97, propertyPVALID}, // TIBETAN SUBJOINED LETTER NGA..TIBETAN SUBJOI + {0x0F98, 0x0, propertyUNASSIGNED}, // + {0x0F99, 0x0F9C, propertyPVALID}, // TIBETAN SUBJOINED LETTER NYA..TIBETAN SUBJOI + {0x0F9D, 0x0, propertyDISALLOWED}, // TIBETAN SUBJOINED LETTER DDHA + {0x0F9E, 0x0FA1, propertyPVALID}, // TIBETAN SUBJOINED LETTER NNA..TIBETAN SUBJOI + {0x0FA2, 0x0, propertyDISALLOWED}, // TIBETAN SUBJOINED LETTER DHA + {0x0FA3, 0x0FA6, propertyPVALID}, // TIBETAN SUBJOINED LETTER NA..TIBETAN SUBJOIN + {0x0FA7, 0x0, propertyDISALLOWED}, // TIBETAN SUBJOINED LETTER BHA + {0x0FA8, 0x0FAB, propertyPVALID}, // TIBETAN SUBJOINED LETTER MA..TIBETAN SUBJOIN + {0x0FAC, 0x0, propertyDISALLOWED}, // TIBETAN SUBJOINED LETTER DZHA + {0x0FAD, 0x0FB8, propertyPVALID}, // TIBETAN SUBJOINED LETTER WA..TIBETAN SUBJOIN + {0x0FB9, 0x0, propertyDISALLOWED}, // TIBETAN SUBJOINED LETTER KSSA + {0x0FBA, 0x0FBC, propertyPVALID}, // TIBETAN SUBJOINED LETTER FIXED-FORM WA..TIBE + {0x0FBD, 0x0, propertyUNASSIGNED}, // + {0x0FBE, 0x0FC5, propertyDISALLOWED}, // TIBETAN KU RU KHA..TIBETAN SYMBOL RDO RJE + {0x0FC6, 0x0, propertyPVALID}, // TIBETAN SYMBOL PADMA GDAN + {0x0FC7, 0x0FCC, propertyDISALLOWED}, // TIBETAN SYMBOL RDO RJE RGYA GRAM..TIBETAN SY + {0x0FCD, 0x0, propertyUNASSIGNED}, // + {0x0FCE, 0x0FD8, propertyDISALLOWED}, // TIBETAN SIGN RDEL NAG RDEL DKAR..LEFT-FACING + {0x0FD9, 0x0FFF, propertyUNASSIGNED}, // .. + {0x1000, 0x1049, propertyPVALID}, // MYANMAR LETTER KA..MYANMAR DIGIT NINE + {0x104A, 0x104F, propertyDISALLOWED}, // MYANMAR SIGN LITTLE SECTION..MYANMAR SYMBOL + {0x1050, 0x109D, propertyPVALID}, // MYANMAR LETTER SHA..MYANMAR VOWEL SIGN AITON + {0x109E, 0x10C5, propertyDISALLOWED}, // MYANMAR SYMBOL SHAN ONE..GEORGIAN CAPITAL LE + {0x10C6, 0x10CF, propertyUNASSIGNED}, // .. + {0x10D0, 0x10FA, propertyPVALID}, // GEORGIAN LETTER AN..GEORGIAN LETTER AIN + {0x10FB, 0x10FC, propertyDISALLOWED}, // GEORGIAN PARAGRAPH SEPARATOR..MODIFIER LETTE + {0x10FD, 0x10FF, propertyUNASSIGNED}, // .. + {0x1100, 0x11FF, propertyDISALLOWED}, // HANGUL CHOSEONG KIYEOK..HANGUL JONGSEONG SSA + {0x1200, 0x1248, propertyPVALID}, // ETHIOPIC SYLLABLE HA..ETHIOPIC SYLLABLE QWA + {0x1249, 0x0, propertyUNASSIGNED}, // + {0x124A, 0x124D, propertyPVALID}, // ETHIOPIC SYLLABLE QWI..ETHIOPIC SYLLABLE QWE + {0x124E, 0x124F, propertyUNASSIGNED}, // .. + {0x1250, 0x1256, propertyPVALID}, // ETHIOPIC SYLLABLE QHA..ETHIOPIC SYLLABLE QHO + {0x1257, 0x0, propertyUNASSIGNED}, // + {0x1258, 0x0, propertyPVALID}, // ETHIOPIC SYLLABLE QHWA + {0x1259, 0x0, propertyUNASSIGNED}, // + {0x125A, 0x125D, propertyPVALID}, // ETHIOPIC SYLLABLE QHWI..ETHIOPIC SYLLABLE QH + {0x125E, 0x125F, propertyUNASSIGNED}, // .. + {0x1260, 0x1288, propertyPVALID}, // ETHIOPIC SYLLABLE BA..ETHIOPIC SYLLABLE XWA + {0x1289, 0x0, propertyUNASSIGNED}, // + {0x128A, 0x128D, propertyPVALID}, // ETHIOPIC SYLLABLE XWI..ETHIOPIC SYLLABLE XWE + {0x128E, 0x128F, propertyUNASSIGNED}, // .. + {0x1290, 0x12B0, propertyPVALID}, // ETHIOPIC SYLLABLE NA..ETHIOPIC SYLLABLE KWA + {0x12B1, 0x0, propertyUNASSIGNED}, // + {0x12B2, 0x12B5, propertyPVALID}, // ETHIOPIC SYLLABLE KWI..ETHIOPIC SYLLABLE KWE + {0x12B6, 0x12B7, propertyUNASSIGNED}, // .. + {0x12B8, 0x12BE, propertyPVALID}, // ETHIOPIC SYLLABLE KXA..ETHIOPIC SYLLABLE KXO + {0x12BF, 0x0, propertyUNASSIGNED}, // + {0x12C0, 0x0, propertyPVALID}, // ETHIOPIC SYLLABLE KXWA + {0x12C1, 0x0, propertyUNASSIGNED}, // + {0x12C2, 0x12C5, propertyPVALID}, // ETHIOPIC SYLLABLE KXWI..ETHIOPIC SYLLABLE KX + {0x12C6, 0x12C7, propertyUNASSIGNED}, // .. + {0x12C8, 0x12D6, propertyPVALID}, // ETHIOPIC SYLLABLE WA..ETHIOPIC SYLLABLE PHAR + {0x12D7, 0x0, propertyUNASSIGNED}, // + {0x12D8, 0x1310, propertyPVALID}, // ETHIOPIC SYLLABLE ZA..ETHIOPIC SYLLABLE GWA + {0x1311, 0x0, propertyUNASSIGNED}, // + {0x1312, 0x1315, propertyPVALID}, // ETHIOPIC SYLLABLE GWI..ETHIOPIC SYLLABLE GWE + {0x1316, 0x1317, propertyUNASSIGNED}, // .. + {0x1318, 0x135A, propertyPVALID}, // ETHIOPIC SYLLABLE GGA..ETHIOPIC SYLLABLE FYA + {0x135B, 0x135E, propertyUNASSIGNED}, // .. + {0x135F, 0x0, propertyPVALID}, // ETHIOPIC COMBINING GEMINATION MARK + {0x1360, 0x137C, propertyDISALLOWED}, // ETHIOPIC SECTION MARK..ETHIOPIC NUMBER TEN T + {0x137D, 0x137F, propertyUNASSIGNED}, // .. + {0x1380, 0x138F, propertyPVALID}, // ETHIOPIC SYLLABLE SEBATBEIT MWA..ETHIOPIC SY + {0x1390, 0x1399, propertyDISALLOWED}, // ETHIOPIC TONAL MARK YIZET..ETHIOPIC TONAL MA + {0x139A, 0x139F, propertyUNASSIGNED}, // .. + {0x13A0, 0x13F4, propertyPVALID}, // CHEROKEE LETTER A..CHEROKEE LETTER YV + {0x13F5, 0x13FF, propertyUNASSIGNED}, // .. + {0x1400, 0x0, propertyDISALLOWED}, // CANADIAN SYLLABICS HYPHEN + {0x1401, 0x166C, propertyPVALID}, // CANADIAN SYLLABICS E..CANADIAN SYLLABICS CAR + {0x166D, 0x166E, propertyDISALLOWED}, // CANADIAN SYLLABICS CHI SIGN..CANADIAN SYLLAB + {0x166F, 0x167F, propertyPVALID}, // CANADIAN SYLLABICS QAI..CANADIAN SYLLABICS B + {0x1680, 0x0, propertyDISALLOWED}, // OGHAM SPACE MARK + {0x1681, 0x169A, propertyPVALID}, // OGHAM LETTER BEITH..OGHAM LETTER PEITH + {0x169B, 0x169C, propertyDISALLOWED}, // OGHAM FEATHER MARK..OGHAM REVERSED FEATHER M + {0x169D, 0x169F, propertyUNASSIGNED}, // .. + {0x16A0, 0x16EA, propertyPVALID}, // RUNIC LETTER FEHU FEOH FE F..RUNIC LETTER X + {0x16EB, 0x16F0, propertyDISALLOWED}, // RUNIC SINGLE PUNCTUATION..RUNIC BELGTHOR SYM + {0x16F1, 0x16FF, propertyUNASSIGNED}, // .. + {0x1700, 0x170C, propertyPVALID}, // TAGALOG LETTER A..TAGALOG LETTER YA + {0x170D, 0x0, propertyUNASSIGNED}, // + {0x170E, 0x1714, propertyPVALID}, // TAGALOG LETTER LA..TAGALOG SIGN VIRAMA + {0x1715, 0x171F, propertyUNASSIGNED}, // .. + {0x1720, 0x1734, propertyPVALID}, // HANUNOO LETTER A..HANUNOO SIGN PAMUDPOD + {0x1735, 0x1736, propertyDISALLOWED}, // PHILIPPINE SINGLE PUNCTUATION..PHILIPPINE DO + {0x1737, 0x173F, propertyUNASSIGNED}, // .. + {0x1740, 0x1753, propertyPVALID}, // BUHID LETTER A..BUHID VOWEL SIGN U + {0x1754, 0x175F, propertyUNASSIGNED}, // .. + {0x1760, 0x176C, propertyPVALID}, // TAGBANWA LETTER A..TAGBANWA LETTER YA + {0x176D, 0x0, propertyUNASSIGNED}, // + {0x176E, 0x1770, propertyPVALID}, // TAGBANWA LETTER LA..TAGBANWA LETTER SA + {0x1771, 0x0, propertyUNASSIGNED}, // + {0x1772, 0x1773, propertyPVALID}, // TAGBANWA VOWEL SIGN I..TAGBANWA VOWEL SIGN U + {0x1774, 0x177F, propertyUNASSIGNED}, // .. + {0x1780, 0x17B3, propertyPVALID}, // KHMER LETTER KA..KHMER INDEPENDENT VOWEL QAU + {0x17B4, 0x17B5, propertyDISALLOWED}, // KHMER VOWEL INHERENT AQ..KHMER VOWEL INHEREN + {0x17B6, 0x17D3, propertyPVALID}, // KHMER VOWEL SIGN AA..KHMER SIGN BATHAMASAT + {0x17D4, 0x17D6, propertyDISALLOWED}, // KHMER SIGN KHAN..KHMER SIGN CAMNUC PII KUUH + {0x17D7, 0x0, propertyPVALID}, // KHMER SIGN LEK TOO + {0x17D8, 0x17DB, propertyDISALLOWED}, // KHMER SIGN BEYYAL..KHMER CURRENCY SYMBOL RIE + {0x17DC, 0x17DD, propertyPVALID}, // KHMER SIGN AVAKRAHASANYA..KHMER SIGN ATTHACA + {0x17DE, 0x17DF, propertyUNASSIGNED}, // .. + {0x17E0, 0x17E9, propertyPVALID}, // KHMER DIGIT ZERO..KHMER DIGIT NINE + {0x17EA, 0x17EF, propertyUNASSIGNED}, // .. + {0x17F0, 0x17F9, propertyDISALLOWED}, // KHMER SYMBOL LEK ATTAK SON..KHMER SYMBOL LEK + {0x17FA, 0x17FF, propertyUNASSIGNED}, // .. + {0x1800, 0x180E, propertyDISALLOWED}, // MONGOLIAN BIRGA..MONGOLIAN VOWEL SEPARATOR + {0x180F, 0x0, propertyUNASSIGNED}, // + {0x1810, 0x1819, propertyPVALID}, // MONGOLIAN DIGIT ZERO..MONGOLIAN DIGIT NINE + {0x181A, 0x181F, propertyUNASSIGNED}, // .. + {0x1820, 0x1877, propertyPVALID}, // MONGOLIAN LETTER A..MONGOLIAN LETTER MANCHU + {0x1878, 0x187F, propertyUNASSIGNED}, // .. + {0x1880, 0x18AA, propertyPVALID}, // MONGOLIAN LETTER ALI GALI ANUSVARA ONE..MONG + {0x18AB, 0x18AF, propertyUNASSIGNED}, // .. + {0x18B0, 0x18F5, propertyPVALID}, // CANADIAN SYLLABICS OY..CANADIAN SYLLABICS CA + {0x18F6, 0x18FF, propertyUNASSIGNED}, // .. + {0x1900, 0x191C, propertyPVALID}, // LIMBU VOWEL-CARRIER LETTER..LIMBU LETTER HA + {0x191D, 0x191F, propertyUNASSIGNED}, // .. + {0x1920, 0x192B, propertyPVALID}, // LIMBU VOWEL SIGN A..LIMBU SUBJOINED LETTER W + {0x192C, 0x192F, propertyUNASSIGNED}, // .. + {0x1930, 0x193B, propertyPVALID}, // LIMBU SMALL LETTER KA..LIMBU SIGN SA-I + {0x193C, 0x193F, propertyUNASSIGNED}, // .. + {0x1940, 0x0, propertyDISALLOWED}, // LIMBU SIGN LOO + {0x1941, 0x1943, propertyUNASSIGNED}, // .. + {0x1944, 0x1945, propertyDISALLOWED}, // LIMBU EXCLAMATION MARK..LIMBU QUESTION MARK + {0x1946, 0x196D, propertyPVALID}, // LIMBU DIGIT ZERO..TAI LE LETTER AI + {0x196E, 0x196F, propertyUNASSIGNED}, // .. + {0x1970, 0x1974, propertyPVALID}, // TAI LE LETTER TONE-2..TAI LE LETTER TONE-6 + {0x1975, 0x197F, propertyUNASSIGNED}, // .. + {0x1980, 0x19AB, propertyPVALID}, // NEW TAI LUE LETTER HIGH QA..NEW TAI LUE LETT + {0x19AC, 0x19AF, propertyUNASSIGNED}, // .. + {0x19B0, 0x19C9, propertyPVALID}, // NEW TAI LUE VOWEL SIGN VOWEL SHORTENER..NEW + {0x19CA, 0x19CF, propertyUNASSIGNED}, // .. + {0x19D0, 0x19DA, propertyPVALID}, // NEW TAI LUE DIGIT ZERO..NEW TAI LUE THAM DIG + {0x19DB, 0x19DD, propertyUNASSIGNED}, // .. + {0x19DE, 0x19FF, propertyDISALLOWED}, // NEW TAI LUE SIGN LAE..KHMER SYMBOL DAP-PRAM + {0x1A00, 0x1A1B, propertyPVALID}, // BUGINESE LETTER KA..BUGINESE VOWEL SIGN AE + {0x1A1C, 0x1A1D, propertyUNASSIGNED}, // .. + {0x1A1E, 0x1A1F, propertyDISALLOWED}, // BUGINESE PALLAWA..BUGINESE END OF SECTION + {0x1A20, 0x1A5E, propertyPVALID}, // TAI THAM LETTER HIGH KA..TAI THAM CONSONANT + {0x1A5F, 0x0, propertyUNASSIGNED}, // + {0x1A60, 0x1A7C, propertyPVALID}, // TAI THAM SIGN SAKOT..TAI THAM SIGN KHUEN-LUE + {0x1A7D, 0x1A7E, propertyUNASSIGNED}, // .. + {0x1A7F, 0x1A89, propertyPVALID}, // TAI THAM COMBINING CRYPTOGRAMMIC DOT..TAI TH + {0x1A8A, 0x1A8F, propertyUNASSIGNED}, // .. + {0x1A90, 0x1A99, propertyPVALID}, // TAI THAM THAM DIGIT ZERO..TAI THAM THAM DIGI + {0x1A9A, 0x1A9F, propertyUNASSIGNED}, // .. + {0x1AA0, 0x1AA6, propertyDISALLOWED}, // TAI THAM SIGN WIANG..TAI THAM SIGN REVERSED + {0x1AA7, 0x0, propertyPVALID}, // TAI THAM SIGN MAI YAMOK + {0x1AA8, 0x1AAD, propertyDISALLOWED}, // TAI THAM SIGN KAAN..TAI THAM SIGN CAANG + {0x1AAE, 0x1AFF, propertyUNASSIGNED}, // .. + {0x1B00, 0x1B4B, propertyPVALID}, // BALINESE SIGN ULU RICEM..BALINESE LETTER ASY + {0x1B4C, 0x1B4F, propertyUNASSIGNED}, // .. + {0x1B50, 0x1B59, propertyPVALID}, // BALINESE DIGIT ZERO..BALINESE DIGIT NINE + {0x1B5A, 0x1B6A, propertyDISALLOWED}, // BALINESE PANTI..BALINESE MUSICAL SYMBOL DANG + {0x1B6B, 0x1B73, propertyPVALID}, // BALINESE MUSICAL SYMBOL COMBINING TEGEH..BAL + {0x1B74, 0x1B7C, propertyDISALLOWED}, // BALINESE MUSICAL SYMBOL RIGHT-HAND OPEN DUG. + {0x1B7D, 0x1B7F, propertyUNASSIGNED}, // .. + {0x1B80, 0x1BAA, propertyPVALID}, // SUNDANESE SIGN PANYECEK..SUNDANESE SIGN PAMA + {0x1BAB, 0x1BAD, propertyUNASSIGNED}, // .. + {0x1BAE, 0x1BB9, propertyPVALID}, // SUNDANESE LETTER KHA..SUNDANESE DIGIT NINE + {0x1BBA, 0x1BFF, propertyUNASSIGNED}, // .. + {0x1C00, 0x1C37, propertyPVALID}, // LEPCHA LETTER KA..LEPCHA SIGN NUKTA + {0x1C38, 0x1C3A, propertyUNASSIGNED}, // .. + {0x1C3B, 0x1C3F, propertyDISALLOWED}, // LEPCHA PUNCTUATION TA-ROL..LEPCHA PUNCTUATIO + {0x1C40, 0x1C49, propertyPVALID}, // LEPCHA DIGIT ZERO..LEPCHA DIGIT NINE + {0x1C4A, 0x1C4C, propertyUNASSIGNED}, // .. + {0x1C4D, 0x1C7D, propertyPVALID}, // LEPCHA LETTER TTA..OL CHIKI AHAD + {0x1C7E, 0x1C7F, propertyDISALLOWED}, // OL CHIKI PUNCTUATION MUCAAD..OL CHIKI PUNCTU + {0x1C80, 0x1CCF, propertyUNASSIGNED}, // .. + {0x1CD0, 0x1CD2, propertyPVALID}, // VEDIC TONE KARSHANA..VEDIC TONE PRENKHA + {0x1CD3, 0x0, propertyDISALLOWED}, // VEDIC SIGN NIHSHVASA + {0x1CD4, 0x1CF2, propertyPVALID}, // VEDIC SIGN YAJURVEDIC MIDLINE SVARITA..VEDIC + {0x1CF3, 0x1CFF, propertyUNASSIGNED}, // .. + {0x1D00, 0x1D2B, propertyPVALID}, // LATIN LETTER SMALL CAPITAL A..CYRILLIC LETTE + {0x1D2C, 0x1D2E, propertyDISALLOWED}, // MODIFIER LETTER CAPITAL A..MODIFIER LETTER C + {0x1D2F, 0x0, propertyPVALID}, // MODIFIER LETTER CAPITAL BARRED B + {0x1D30, 0x1D3A, propertyDISALLOWED}, // MODIFIER LETTER CAPITAL D..MODIFIER LETTER C + {0x1D3B, 0x0, propertyPVALID}, // MODIFIER LETTER CAPITAL REVERSED N + {0x1D3C, 0x1D4D, propertyDISALLOWED}, // MODIFIER LETTER CAPITAL O..MODIFIER LETTER S + {0x1D4E, 0x0, propertyPVALID}, // MODIFIER LETTER SMALL TURNED I + {0x1D4F, 0x1D6A, propertyDISALLOWED}, // MODIFIER LETTER SMALL K..GREEK SUBSCRIPT SMA + {0x1D6B, 0x1D77, propertyPVALID}, // LATIN SMALL LETTER UE..LATIN SMALL LETTER TU + {0x1D78, 0x0, propertyDISALLOWED}, // MODIFIER LETTER CYRILLIC EN + {0x1D79, 0x1D9A, propertyPVALID}, // LATIN SMALL LETTER INSULAR G..LATIN SMALL LE + {0x1D9B, 0x1DBF, propertyDISALLOWED}, // MODIFIER LETTER SMALL TURNED ALPHA..MODIFIER + {0x1DC0, 0x1DE6, propertyPVALID}, // COMBINING DOTTED GRAVE ACCENT..COMBINING LAT + {0x1DE7, 0x1DFC, propertyUNASSIGNED}, // .. + {0x1DFD, 0x1DFF, propertyPVALID}, // COMBINING ALMOST EQUAL TO BELOW..COMBINING R + {0x1E00, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH RING BELOW + {0x1E01, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH RING BELOW + {0x1E02, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER B WITH DOT ABOVE + {0x1E03, 0x0, propertyPVALID}, // LATIN SMALL LETTER B WITH DOT ABOVE + {0x1E04, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER B WITH DOT BELOW + {0x1E05, 0x0, propertyPVALID}, // LATIN SMALL LETTER B WITH DOT BELOW + {0x1E06, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER B WITH LINE BELOW + {0x1E07, 0x0, propertyPVALID}, // LATIN SMALL LETTER B WITH LINE BELOW + {0x1E08, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER C WITH CEDILLA AND ACUT + {0x1E09, 0x0, propertyPVALID}, // LATIN SMALL LETTER C WITH CEDILLA AND ACUTE + {0x1E0A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER D WITH DOT ABOVE + {0x1E0B, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH DOT ABOVE + {0x1E0C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER D WITH DOT BELOW + {0x1E0D, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH DOT BELOW + {0x1E0E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER D WITH LINE BELOW + {0x1E0F, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH LINE BELOW + {0x1E10, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER D WITH CEDILLA + {0x1E11, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH CEDILLA + {0x1E12, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER D WITH CIRCUMFLEX BELOW + {0x1E13, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH CIRCUMFLEX BELOW + {0x1E14, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH MACRON AND GRAVE + {0x1E15, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH MACRON AND GRAVE + {0x1E16, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH MACRON AND ACUTE + {0x1E17, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH MACRON AND ACUTE + {0x1E18, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CIRCUMFLEX BELOW + {0x1E19, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CIRCUMFLEX BELOW + {0x1E1A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH TILDE BELOW + {0x1E1B, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH TILDE BELOW + {0x1E1C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CEDILLA AND BREV + {0x1E1D, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CEDILLA AND BREVE + {0x1E1E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER F WITH DOT ABOVE + {0x1E1F, 0x0, propertyPVALID}, // LATIN SMALL LETTER F WITH DOT ABOVE + {0x1E20, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH MACRON + {0x1E21, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH MACRON + {0x1E22, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH DOT ABOVE + {0x1E23, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH DOT ABOVE + {0x1E24, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH DOT BELOW + {0x1E25, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH DOT BELOW + {0x1E26, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH DIAERESIS + {0x1E27, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH DIAERESIS + {0x1E28, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH CEDILLA + {0x1E29, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH CEDILLA + {0x1E2A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH BREVE BELOW + {0x1E2B, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH BREVE BELOW + {0x1E2C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH TILDE BELOW + {0x1E2D, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH TILDE BELOW + {0x1E2E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH DIAERESIS AND AC + {0x1E2F, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH DIAERESIS AND ACUT + {0x1E30, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH ACUTE + {0x1E31, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH ACUTE + {0x1E32, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH DOT BELOW + {0x1E33, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH DOT BELOW + {0x1E34, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH LINE BELOW + {0x1E35, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH LINE BELOW + {0x1E36, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH DOT BELOW + {0x1E37, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH DOT BELOW + {0x1E38, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH DOT BELOW AND MA + {0x1E39, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH DOT BELOW AND MACR + {0x1E3A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH LINE BELOW + {0x1E3B, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH LINE BELOW + {0x1E3C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH CIRCUMFLEX BELOW + {0x1E3D, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH CIRCUMFLEX BELOW + {0x1E3E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER M WITH ACUTE + {0x1E3F, 0x0, propertyPVALID}, // LATIN SMALL LETTER M WITH ACUTE + {0x1E40, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER M WITH DOT ABOVE + {0x1E41, 0x0, propertyPVALID}, // LATIN SMALL LETTER M WITH DOT ABOVE + {0x1E42, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER M WITH DOT BELOW + {0x1E43, 0x0, propertyPVALID}, // LATIN SMALL LETTER M WITH DOT BELOW + {0x1E44, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH DOT ABOVE + {0x1E45, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH DOT ABOVE + {0x1E46, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH DOT BELOW + {0x1E47, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH DOT BELOW + {0x1E48, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH LINE BELOW + {0x1E49, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH LINE BELOW + {0x1E4A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH CIRCUMFLEX BELOW + {0x1E4B, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH CIRCUMFLEX BELOW + {0x1E4C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH TILDE AND ACUTE + {0x1E4D, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH TILDE AND ACUTE + {0x1E4E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH TILDE AND DIAERE + {0x1E4F, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH TILDE AND DIAERESI + {0x1E50, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH MACRON AND GRAVE + {0x1E51, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH MACRON AND GRAVE + {0x1E52, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH MACRON AND ACUTE + {0x1E53, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH MACRON AND ACUTE + {0x1E54, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER P WITH ACUTE + {0x1E55, 0x0, propertyPVALID}, // LATIN SMALL LETTER P WITH ACUTE + {0x1E56, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER P WITH DOT ABOVE + {0x1E57, 0x0, propertyPVALID}, // LATIN SMALL LETTER P WITH DOT ABOVE + {0x1E58, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH DOT ABOVE + {0x1E59, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH DOT ABOVE + {0x1E5A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH DOT BELOW + {0x1E5B, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH DOT BELOW + {0x1E5C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH DOT BELOW AND MA + {0x1E5D, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH DOT BELOW AND MACR + {0x1E5E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH LINE BELOW + {0x1E5F, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH LINE BELOW + {0x1E60, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH DOT ABOVE + {0x1E61, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH DOT ABOVE + {0x1E62, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH DOT BELOW + {0x1E63, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH DOT BELOW + {0x1E64, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH ACUTE AND DOT AB + {0x1E65, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH ACUTE AND DOT ABOV + {0x1E66, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH CARON AND DOT AB + {0x1E67, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH CARON AND DOT ABOV + {0x1E68, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH DOT BELOW AND DO + {0x1E69, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH DOT BELOW AND DOT + {0x1E6A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH DOT ABOVE + {0x1E6B, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH DOT ABOVE + {0x1E6C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH DOT BELOW + {0x1E6D, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH DOT BELOW + {0x1E6E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH LINE BELOW + {0x1E6F, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH LINE BELOW + {0x1E70, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH CIRCUMFLEX BELOW + {0x1E71, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH CIRCUMFLEX BELOW + {0x1E72, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DIAERESIS BELOW + {0x1E73, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH DIAERESIS BELOW + {0x1E74, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH TILDE BELOW + {0x1E75, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH TILDE BELOW + {0x1E76, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH CIRCUMFLEX BELOW + {0x1E77, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH CIRCUMFLEX BELOW + {0x1E78, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH TILDE AND ACUTE + {0x1E79, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH TILDE AND ACUTE + {0x1E7A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH MACRON AND DIAER + {0x1E7B, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH MACRON AND DIAERES + {0x1E7C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER V WITH TILDE + {0x1E7D, 0x0, propertyPVALID}, // LATIN SMALL LETTER V WITH TILDE + {0x1E7E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER V WITH DOT BELOW + {0x1E7F, 0x0, propertyPVALID}, // LATIN SMALL LETTER V WITH DOT BELOW + {0x1E80, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER W WITH GRAVE + {0x1E81, 0x0, propertyPVALID}, // LATIN SMALL LETTER W WITH GRAVE + {0x1E82, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER W WITH ACUTE + {0x1E83, 0x0, propertyPVALID}, // LATIN SMALL LETTER W WITH ACUTE + {0x1E84, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER W WITH DIAERESIS + {0x1E85, 0x0, propertyPVALID}, // LATIN SMALL LETTER W WITH DIAERESIS + {0x1E86, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER W WITH DOT ABOVE + {0x1E87, 0x0, propertyPVALID}, // LATIN SMALL LETTER W WITH DOT ABOVE + {0x1E88, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER W WITH DOT BELOW + {0x1E89, 0x0, propertyPVALID}, // LATIN SMALL LETTER W WITH DOT BELOW + {0x1E8A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER X WITH DOT ABOVE + {0x1E8B, 0x0, propertyPVALID}, // LATIN SMALL LETTER X WITH DOT ABOVE + {0x1E8C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER X WITH DIAERESIS + {0x1E8D, 0x0, propertyPVALID}, // LATIN SMALL LETTER X WITH DIAERESIS + {0x1E8E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH DOT ABOVE + {0x1E8F, 0x0, propertyPVALID}, // LATIN SMALL LETTER Y WITH DOT ABOVE + {0x1E90, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH CIRCUMFLEX + {0x1E91, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH CIRCUMFLEX + {0x1E92, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH DOT BELOW + {0x1E93, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH DOT BELOW + {0x1E94, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH LINE BELOW + {0x1E95, 0x1E99, propertyPVALID}, // LATIN SMALL LETTER Z WITH LINE BELOW..LATIN + {0x1E9A, 0x1E9B, propertyDISALLOWED}, // LATIN SMALL LETTER A WITH RIGHT HALF RING..L + {0x1E9C, 0x1E9D, propertyPVALID}, // LATIN SMALL LETTER LONG S WITH DIAGONAL STRO + {0x1E9E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER SHARP S + {0x1E9F, 0x0, propertyPVALID}, // LATIN SMALL LETTER DELTA + {0x1EA0, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH DOT BELOW + {0x1EA1, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH DOT BELOW + {0x1EA2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH HOOK ABOVE + {0x1EA3, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH HOOK ABOVE + {0x1EA4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND A + {0x1EA5, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH CIRCUMFLEX AND ACU + {0x1EA6, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND G + {0x1EA7, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH CIRCUMFLEX AND GRA + {0x1EA8, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND H + {0x1EA9, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH CIRCUMFLEX AND HOO + {0x1EAA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND T + {0x1EAB, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH CIRCUMFLEX AND TIL + {0x1EAC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND D + {0x1EAD, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH CIRCUMFLEX AND DOT + {0x1EAE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH BREVE AND ACUTE + {0x1EAF, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH BREVE AND ACUTE + {0x1EB0, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH BREVE AND GRAVE + {0x1EB1, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH BREVE AND GRAVE + {0x1EB2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH BREVE AND HOOK A + {0x1EB3, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH BREVE AND HOOK ABO + {0x1EB4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH BREVE AND TILDE + {0x1EB5, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH BREVE AND TILDE + {0x1EB6, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH BREVE AND DOT BE + {0x1EB7, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH BREVE AND DOT BELO + {0x1EB8, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH DOT BELOW + {0x1EB9, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH DOT BELOW + {0x1EBA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH HOOK ABOVE + {0x1EBB, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH HOOK ABOVE + {0x1EBC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH TILDE + {0x1EBD, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH TILDE + {0x1EBE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND A + {0x1EBF, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CIRCUMFLEX AND ACU + {0x1EC0, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND G + {0x1EC1, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CIRCUMFLEX AND GRA + {0x1EC2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND H + {0x1EC3, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CIRCUMFLEX AND HOO + {0x1EC4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND T + {0x1EC5, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CIRCUMFLEX AND TIL + {0x1EC6, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND D + {0x1EC7, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CIRCUMFLEX AND DOT + {0x1EC8, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH HOOK ABOVE + {0x1EC9, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH HOOK ABOVE + {0x1ECA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH DOT BELOW + {0x1ECB, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH DOT BELOW + {0x1ECC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH DOT BELOW + {0x1ECD, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH DOT BELOW + {0x1ECE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH HOOK ABOVE + {0x1ECF, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH HOOK ABOVE + {0x1ED0, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND A + {0x1ED1, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH CIRCUMFLEX AND ACU + {0x1ED2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND G + {0x1ED3, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH CIRCUMFLEX AND GRA + {0x1ED4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND H + {0x1ED5, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH CIRCUMFLEX AND HOO + {0x1ED6, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND T + {0x1ED7, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH CIRCUMFLEX AND TIL + {0x1ED8, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND D + {0x1ED9, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH CIRCUMFLEX AND DOT + {0x1EDA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH HORN AND ACUTE + {0x1EDB, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH HORN AND ACUTE + {0x1EDC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH HORN AND GRAVE + {0x1EDD, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH HORN AND GRAVE + {0x1EDE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH HORN AND HOOK AB + {0x1EDF, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH HORN AND HOOK ABOV + {0x1EE0, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH HORN AND TILDE + {0x1EE1, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH HORN AND TILDE + {0x1EE2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH HORN AND DOT BEL + {0x1EE3, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH HORN AND DOT BELOW + {0x1EE4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DOT BELOW + {0x1EE5, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH DOT BELOW + {0x1EE6, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH HOOK ABOVE + {0x1EE7, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH HOOK ABOVE + {0x1EE8, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH HORN AND ACUTE + {0x1EE9, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH HORN AND ACUTE + {0x1EEA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH HORN AND GRAVE + {0x1EEB, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH HORN AND GRAVE + {0x1EEC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH HORN AND HOOK AB + {0x1EED, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH HORN AND HOOK ABOV + {0x1EEE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH HORN AND TILDE + {0x1EEF, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH HORN AND TILDE + {0x1EF0, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH HORN AND DOT BEL + {0x1EF1, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH HORN AND DOT BELOW + {0x1EF2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH GRAVE + {0x1EF3, 0x0, propertyPVALID}, // LATIN SMALL LETTER Y WITH GRAVE + {0x1EF4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH DOT BELOW + {0x1EF5, 0x0, propertyPVALID}, // LATIN SMALL LETTER Y WITH DOT BELOW + {0x1EF6, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH HOOK ABOVE + {0x1EF7, 0x0, propertyPVALID}, // LATIN SMALL LETTER Y WITH HOOK ABOVE + {0x1EF8, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH TILDE + {0x1EF9, 0x0, propertyPVALID}, // LATIN SMALL LETTER Y WITH TILDE + {0x1EFA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER MIDDLE-WELSH LL + {0x1EFB, 0x0, propertyPVALID}, // LATIN SMALL LETTER MIDDLE-WELSH LL + {0x1EFC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER MIDDLE-WELSH V + {0x1EFD, 0x0, propertyPVALID}, // LATIN SMALL LETTER MIDDLE-WELSH V + {0x1EFE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH LOOP + {0x1EFF, 0x1F07, propertyPVALID}, // LATIN SMALL LETTER Y WITH LOOP..GREEK SMALL + {0x1F08, 0x1F0F, propertyDISALLOWED}, // GREEK CAPITAL LETTER ALPHA WITH PSILI..GREEK + {0x1F10, 0x1F15, propertyPVALID}, // GREEK SMALL LETTER EPSILON WITH PSILI..GREEK + {0x1F16, 0x1F17, propertyUNASSIGNED}, // .. + {0x1F18, 0x1F1D, propertyDISALLOWED}, // GREEK CAPITAL LETTER EPSILON WITH PSILI..GRE + {0x1F1E, 0x1F1F, propertyUNASSIGNED}, // .. + {0x1F20, 0x1F27, propertyPVALID}, // GREEK SMALL LETTER ETA WITH PSILI..GREEK SMA + {0x1F28, 0x1F2F, propertyDISALLOWED}, // GREEK CAPITAL LETTER ETA WITH PSILI..GREEK C + {0x1F30, 0x1F37, propertyPVALID}, // GREEK SMALL LETTER IOTA WITH PSILI..GREEK SM + {0x1F38, 0x1F3F, propertyDISALLOWED}, // GREEK CAPITAL LETTER IOTA WITH PSILI..GREEK + {0x1F40, 0x1F45, propertyPVALID}, // GREEK SMALL LETTER OMICRON WITH PSILI..GREEK + {0x1F46, 0x1F47, propertyUNASSIGNED}, // .. + {0x1F48, 0x1F4D, propertyDISALLOWED}, // GREEK CAPITAL LETTER OMICRON WITH PSILI..GRE + {0x1F4E, 0x1F4F, propertyUNASSIGNED}, // .. + {0x1F50, 0x1F57, propertyPVALID}, // GREEK SMALL LETTER UPSILON WITH PSILI..GREEK + {0x1F58, 0x0, propertyUNASSIGNED}, // + {0x1F59, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER UPSILON WITH DASIA + {0x1F5A, 0x0, propertyUNASSIGNED}, // + {0x1F5B, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER UPSILON WITH DASIA AND + {0x1F5C, 0x0, propertyUNASSIGNED}, // + {0x1F5D, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER UPSILON WITH DASIA AND + {0x1F5E, 0x0, propertyUNASSIGNED}, // + {0x1F5F, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER UPSILON WITH DASIA AND + {0x1F60, 0x1F67, propertyPVALID}, // GREEK SMALL LETTER OMEGA WITH PSILI..GREEK S + {0x1F68, 0x1F6F, propertyDISALLOWED}, // GREEK CAPITAL LETTER OMEGA WITH PSILI..GREEK + {0x1F70, 0x0, propertyPVALID}, // GREEK SMALL LETTER ALPHA WITH VARIA + {0x1F71, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER ALPHA WITH OXIA + {0x1F72, 0x0, propertyPVALID}, // GREEK SMALL LETTER EPSILON WITH VARIA + {0x1F73, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER EPSILON WITH OXIA + {0x1F74, 0x0, propertyPVALID}, // GREEK SMALL LETTER ETA WITH VARIA + {0x1F75, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER ETA WITH OXIA + {0x1F76, 0x0, propertyPVALID}, // GREEK SMALL LETTER IOTA WITH VARIA + {0x1F77, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER IOTA WITH OXIA + {0x1F78, 0x0, propertyPVALID}, // GREEK SMALL LETTER OMICRON WITH VARIA + {0x1F79, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER OMICRON WITH OXIA + {0x1F7A, 0x0, propertyPVALID}, // GREEK SMALL LETTER UPSILON WITH VARIA + {0x1F7B, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER UPSILON WITH OXIA + {0x1F7C, 0x0, propertyPVALID}, // GREEK SMALL LETTER OMEGA WITH VARIA + {0x1F7D, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER OMEGA WITH OXIA + {0x1F7E, 0x1F7F, propertyUNASSIGNED}, // .. + {0x1F80, 0x1FAF, propertyDISALLOWED}, // GREEK SMALL LETTER ALPHA WITH PSILI AND YPOG + {0x1FB0, 0x1FB1, propertyPVALID}, // GREEK SMALL LETTER ALPHA WITH VRACHY..GREEK + {0x1FB2, 0x1FB4, propertyDISALLOWED}, // GREEK SMALL LETTER ALPHA WITH VARIA AND YPOG + {0x1FB5, 0x0, propertyUNASSIGNED}, // + {0x1FB6, 0x0, propertyPVALID}, // GREEK SMALL LETTER ALPHA WITH PERISPOMENI + {0x1FB7, 0x1FC4, propertyDISALLOWED}, // GREEK SMALL LETTER ALPHA WITH PERISPOMENI AN + {0x1FC5, 0x0, propertyUNASSIGNED}, // + {0x1FC6, 0x0, propertyPVALID}, // GREEK SMALL LETTER ETA WITH PERISPOMENI + {0x1FC7, 0x1FCF, propertyDISALLOWED}, // GREEK SMALL LETTER ETA WITH PERISPOMENI AND + {0x1FD0, 0x1FD2, propertyPVALID}, // GREEK SMALL LETTER IOTA WITH VRACHY..GREEK S + {0x1FD3, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER IOTA WITH DIALYTIKA AND O + {0x1FD4, 0x1FD5, propertyUNASSIGNED}, // .. + {0x1FD6, 0x1FD7, propertyPVALID}, // GREEK SMALL LETTER IOTA WITH PERISPOMENI..GR + {0x1FD8, 0x1FDB, propertyDISALLOWED}, // GREEK CAPITAL LETTER IOTA WITH VRACHY..GREEK + {0x1FDC, 0x0, propertyUNASSIGNED}, // + {0x1FDD, 0x1FDF, propertyDISALLOWED}, // GREEK DASIA AND VARIA..GREEK DASIA AND PERIS + {0x1FE0, 0x1FE2, propertyPVALID}, // GREEK SMALL LETTER UPSILON WITH VRACHY..GREE + {0x1FE3, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER UPSILON WITH DIALYTIKA AN + {0x1FE4, 0x1FE7, propertyPVALID}, // GREEK SMALL LETTER RHO WITH PSILI..GREEK SMA + {0x1FE8, 0x1FEF, propertyDISALLOWED}, // GREEK CAPITAL LETTER UPSILON WITH VRACHY..GR + {0x1FF0, 0x1FF1, propertyUNASSIGNED}, // .. + {0x1FF2, 0x1FF4, propertyDISALLOWED}, // GREEK SMALL LETTER OMEGA WITH VARIA AND YPOG + {0x1FF5, 0x0, propertyUNASSIGNED}, // + {0x1FF6, 0x0, propertyPVALID}, // GREEK SMALL LETTER OMEGA WITH PERISPOMENI + {0x1FF7, 0x1FFE, propertyDISALLOWED}, // GREEK SMALL LETTER OMEGA WITH PERISPOMENI AN + {0x1FFF, 0x0, propertyUNASSIGNED}, // + {0x2000, 0x200B, propertyDISALLOWED}, // EN QUAD..ZERO WIDTH SPACE + {0x200C, 0x200D, propertyCONTEXTJ}, // ZERO WIDTH NON-JOINER..ZERO WIDTH JOINER + {0x200E, 0x2064, propertyDISALLOWED}, // LEFT-TO-RIGHT MARK..INVISIBLE PLUS + {0x2065, 0x2069, propertyUNASSIGNED}, // .. + {0x206A, 0x2071, propertyDISALLOWED}, // INHIBIT SYMMETRIC SWAPPING..SUPERSCRIPT LATI + {0x2072, 0x2073, propertyUNASSIGNED}, // .. + {0x2074, 0x208E, propertyDISALLOWED}, // SUPERSCRIPT FOUR..SUBSCRIPT RIGHT PARENTHESI + {0x208F, 0x0, propertyUNASSIGNED}, // + {0x2090, 0x2094, propertyDISALLOWED}, // LATIN SUBSCRIPT SMALL LETTER A..LATIN SUBSCR + {0x2095, 0x209F, propertyUNASSIGNED}, // .. + {0x20A0, 0x20B8, propertyDISALLOWED}, // EURO-CURRENCY SIGN..TENGE SIGN + {0x20B9, 0x20CF, propertyUNASSIGNED}, // .. + {0x20D0, 0x20F0, propertyDISALLOWED}, // COMBINING LEFT HARPOON ABOVE..COMBINING ASTE + {0x20F1, 0x20FF, propertyUNASSIGNED}, // .. + {0x2100, 0x214D, propertyDISALLOWED}, // ACCOUNT OF..AKTIESELSKAB + {0x214E, 0x0, propertyPVALID}, // TURNED SMALL F + {0x214F, 0x2183, propertyDISALLOWED}, // SYMBOL FOR SAMARITAN SOURCE..ROMAN NUMERAL R + {0x2184, 0x0, propertyPVALID}, // LATIN SMALL LETTER REVERSED C + {0x2185, 0x2189, propertyDISALLOWED}, // ROMAN NUMERAL SIX LATE FORM..VULGAR FRACTION + {0x218A, 0x218F, propertyUNASSIGNED}, // .. + {0x2190, 0x23E8, propertyDISALLOWED}, // LEFTWARDS ARROW..DECIMAL EXPONENT SYMBOL + {0x23E9, 0x23FF, propertyUNASSIGNED}, // .. + {0x2400, 0x2426, propertyDISALLOWED}, // SYMBOL FOR NULL..SYMBOL FOR SUBSTITUTE FORM + {0x2427, 0x243F, propertyUNASSIGNED}, // .. + {0x2440, 0x244A, propertyDISALLOWED}, // OCR HOOK..OCR DOUBLE BACKSLASH + {0x244B, 0x245F, propertyUNASSIGNED}, // .. + {0x2460, 0x26CD, propertyDISALLOWED}, // CIRCLED DIGIT ONE..DISABLED CAR + {0x26CE, 0x0, propertyUNASSIGNED}, // + {0x26CF, 0x26E1, propertyDISALLOWED}, // PICK..RESTRICTED LEFT ENTRY-2 + {0x26E2, 0x0, propertyUNASSIGNED}, // + {0x26E3, 0x0, propertyDISALLOWED}, // HEAVY CIRCLE WITH STROKE AND TWO DOTS ABOVE + {0x26E4, 0x26E7, propertyUNASSIGNED}, // .. + {0x26E8, 0x26FF, propertyDISALLOWED}, // BLACK CROSS ON SHIELD..WHITE FLAG WITH HORIZ + {0x2700, 0x0, propertyUNASSIGNED}, // + {0x2701, 0x2704, propertyDISALLOWED}, // UPPER BLADE SCISSORS..WHITE SCISSORS + {0x2705, 0x0, propertyUNASSIGNED}, // + {0x2706, 0x2709, propertyDISALLOWED}, // TELEPHONE LOCATION SIGN..ENVELOPE + {0x270A, 0x270B, propertyUNASSIGNED}, // .. + {0x270C, 0x2727, propertyDISALLOWED}, // VICTORY HAND..WHITE FOUR POINTED STAR + {0x2728, 0x0, propertyUNASSIGNED}, // + {0x2729, 0x274B, propertyDISALLOWED}, // STRESS OUTLINED WHITE STAR..HEAVY EIGHT TEAR + {0x274C, 0x0, propertyUNASSIGNED}, // + {0x274D, 0x0, propertyDISALLOWED}, // SHADOWED WHITE CIRCLE + {0x274E, 0x0, propertyUNASSIGNED}, // + {0x274F, 0x2752, propertyDISALLOWED}, // LOWER RIGHT DROP-SHADOWED WHITE SQUARE..UPPE + {0x2753, 0x2755, propertyUNASSIGNED}, // .. + {0x2756, 0x275E, propertyDISALLOWED}, // BLACK DIAMOND MINUS WHITE X..HEAVY DOUBLE CO + {0x275F, 0x2760, propertyUNASSIGNED}, // .. + {0x2761, 0x2794, propertyDISALLOWED}, // CURVED STEM PARAGRAPH SIGN ORNAMENT..HEAVY W + {0x2795, 0x2797, propertyUNASSIGNED}, // .. + {0x2798, 0x27AF, propertyDISALLOWED}, // HEAVY SOUTH EAST ARROW..NOTCHED LOWER RIGHT- + {0x27B0, 0x0, propertyUNASSIGNED}, // + {0x27B1, 0x27BE, propertyDISALLOWED}, // NOTCHED UPPER RIGHT-SHADOWED WHITE RIGHTWARD + {0x27BF, 0x0, propertyUNASSIGNED}, // + {0x27C0, 0x27CA, propertyDISALLOWED}, // THREE DIMENSIONAL ANGLE..VERTICAL BAR WITH H + {0x27CB, 0x0, propertyUNASSIGNED}, // + {0x27CC, 0x0, propertyDISALLOWED}, // LONG DIVISION + {0x27CD, 0x27CF, propertyUNASSIGNED}, // .. + {0x27D0, 0x2B4C, propertyDISALLOWED}, // WHITE DIAMOND WITH CENTRED DOT..RIGHTWARDS A + {0x2B4D, 0x2B4F, propertyUNASSIGNED}, // .. + {0x2B50, 0x2B59, propertyDISALLOWED}, // WHITE MEDIUM STAR..HEAVY CIRCLED SALTIRE + {0x2B5A, 0x2BFF, propertyUNASSIGNED}, // .. + {0x2C00, 0x2C2E, propertyDISALLOWED}, // GLAGOLITIC CAPITAL LETTER AZU..GLAGOLITIC CA + {0x2C2F, 0x0, propertyUNASSIGNED}, // + {0x2C30, 0x2C5E, propertyPVALID}, // GLAGOLITIC SMALL LETTER AZU..GLAGOLITIC SMAL + {0x2C5F, 0x0, propertyUNASSIGNED}, // + {0x2C60, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH DOUBLE BAR + {0x2C61, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH DOUBLE BAR + {0x2C62, 0x2C64, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH MIDDLE TILDE..LA + {0x2C65, 0x2C66, propertyPVALID}, // LATIN SMALL LETTER A WITH STROKE..LATIN SMAL + {0x2C67, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH DESCENDER + {0x2C68, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH DESCENDER + {0x2C69, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH DESCENDER + {0x2C6A, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH DESCENDER + {0x2C6B, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH DESCENDER + {0x2C6C, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH DESCENDER + {0x2C6D, 0x2C70, propertyDISALLOWED}, // LATIN CAPITAL LETTER ALPHA..LATIN CAPITAL LE + {0x2C71, 0x0, propertyPVALID}, // LATIN SMALL LETTER V WITH RIGHT HOOK + {0x2C72, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER W WITH HOOK + {0x2C73, 0x2C74, propertyPVALID}, // LATIN SMALL LETTER W WITH HOOK..LATIN SMALL + {0x2C75, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER HALF H + {0x2C76, 0x2C7B, propertyPVALID}, // LATIN SMALL LETTER HALF H..LATIN LETTER SMAL + {0x2C7C, 0x2C80, propertyDISALLOWED}, // LATIN SUBSCRIPT SMALL LETTER J..COPTIC CAPIT + {0x2C81, 0x0, propertyPVALID}, // COPTIC SMALL LETTER ALFA + {0x2C82, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER VIDA + {0x2C83, 0x0, propertyPVALID}, // COPTIC SMALL LETTER VIDA + {0x2C84, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER GAMMA + {0x2C85, 0x0, propertyPVALID}, // COPTIC SMALL LETTER GAMMA + {0x2C86, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER DALDA + {0x2C87, 0x0, propertyPVALID}, // COPTIC SMALL LETTER DALDA + {0x2C88, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER EIE + {0x2C89, 0x0, propertyPVALID}, // COPTIC SMALL LETTER EIE + {0x2C8A, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER SOU + {0x2C8B, 0x0, propertyPVALID}, // COPTIC SMALL LETTER SOU + {0x2C8C, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER ZATA + {0x2C8D, 0x0, propertyPVALID}, // COPTIC SMALL LETTER ZATA + {0x2C8E, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER HATE + {0x2C8F, 0x0, propertyPVALID}, // COPTIC SMALL LETTER HATE + {0x2C90, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER THETHE + {0x2C91, 0x0, propertyPVALID}, // COPTIC SMALL LETTER THETHE + {0x2C92, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER IAUDA + {0x2C93, 0x0, propertyPVALID}, // COPTIC SMALL LETTER IAUDA + {0x2C94, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER KAPA + {0x2C95, 0x0, propertyPVALID}, // COPTIC SMALL LETTER KAPA + {0x2C96, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER LAULA + {0x2C97, 0x0, propertyPVALID}, // COPTIC SMALL LETTER LAULA + {0x2C98, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER MI + {0x2C99, 0x0, propertyPVALID}, // COPTIC SMALL LETTER MI + {0x2C9A, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER NI + {0x2C9B, 0x0, propertyPVALID}, // COPTIC SMALL LETTER NI + {0x2C9C, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER KSI + {0x2C9D, 0x0, propertyPVALID}, // COPTIC SMALL LETTER KSI + {0x2C9E, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER O + {0x2C9F, 0x0, propertyPVALID}, // COPTIC SMALL LETTER O + {0x2CA0, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER PI + {0x2CA1, 0x0, propertyPVALID}, // COPTIC SMALL LETTER PI + {0x2CA2, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER RO + {0x2CA3, 0x0, propertyPVALID}, // COPTIC SMALL LETTER RO + {0x2CA4, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER SIMA + {0x2CA5, 0x0, propertyPVALID}, // COPTIC SMALL LETTER SIMA + {0x2CA6, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER TAU + {0x2CA7, 0x0, propertyPVALID}, // COPTIC SMALL LETTER TAU + {0x2CA8, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER UA + {0x2CA9, 0x0, propertyPVALID}, // COPTIC SMALL LETTER UA + {0x2CAA, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER FI + {0x2CAB, 0x0, propertyPVALID}, // COPTIC SMALL LETTER FI + {0x2CAC, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER KHI + {0x2CAD, 0x0, propertyPVALID}, // COPTIC SMALL LETTER KHI + {0x2CAE, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER PSI + {0x2CAF, 0x0, propertyPVALID}, // COPTIC SMALL LETTER PSI + {0x2CB0, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OOU + {0x2CB1, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OOU + {0x2CB2, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER DIALECT-P ALEF + {0x2CB3, 0x0, propertyPVALID}, // COPTIC SMALL LETTER DIALECT-P ALEF + {0x2CB4, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC AIN + {0x2CB5, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC AIN + {0x2CB6, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER CRYPTOGRAMMIC EIE + {0x2CB7, 0x0, propertyPVALID}, // COPTIC SMALL LETTER CRYPTOGRAMMIC EIE + {0x2CB8, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER DIALECT-P KAPA + {0x2CB9, 0x0, propertyPVALID}, // COPTIC SMALL LETTER DIALECT-P KAPA + {0x2CBA, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER DIALECT-P NI + {0x2CBB, 0x0, propertyPVALID}, // COPTIC SMALL LETTER DIALECT-P NI + {0x2CBC, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER CRYPTOGRAMMIC NI + {0x2CBD, 0x0, propertyPVALID}, // COPTIC SMALL LETTER CRYPTOGRAMMIC NI + {0x2CBE, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC OOU + {0x2CBF, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC OOU + {0x2CC0, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER SAMPI + {0x2CC1, 0x0, propertyPVALID}, // COPTIC SMALL LETTER SAMPI + {0x2CC2, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER CROSSED SHEI + {0x2CC3, 0x0, propertyPVALID}, // COPTIC SMALL LETTER CROSSED SHEI + {0x2CC4, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC SHEI + {0x2CC5, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC SHEI + {0x2CC6, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC ESH + {0x2CC7, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC ESH + {0x2CC8, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER AKHMIMIC KHEI + {0x2CC9, 0x0, propertyPVALID}, // COPTIC SMALL LETTER AKHMIMIC KHEI + {0x2CCA, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER DIALECT-P HORI + {0x2CCB, 0x0, propertyPVALID}, // COPTIC SMALL LETTER DIALECT-P HORI + {0x2CCC, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC HORI + {0x2CCD, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC HORI + {0x2CCE, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC HA + {0x2CCF, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC HA + {0x2CD0, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER L-SHAPED HA + {0x2CD1, 0x0, propertyPVALID}, // COPTIC SMALL LETTER L-SHAPED HA + {0x2CD2, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC HEI + {0x2CD3, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC HEI + {0x2CD4, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC HAT + {0x2CD5, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC HAT + {0x2CD6, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC GANGIA + {0x2CD7, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC GANGIA + {0x2CD8, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC DJA + {0x2CD9, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC DJA + {0x2CDA, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC SHIMA + {0x2CDB, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC SHIMA + {0x2CDC, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD NUBIAN SHIMA + {0x2CDD, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD NUBIAN SHIMA + {0x2CDE, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD NUBIAN NGI + {0x2CDF, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD NUBIAN NGI + {0x2CE0, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD NUBIAN NYI + {0x2CE1, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD NUBIAN NYI + {0x2CE2, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD NUBIAN WAU + {0x2CE3, 0x2CE4, propertyPVALID}, // COPTIC SMALL LETTER OLD NUBIAN WAU..COPTIC S + {0x2CE5, 0x2CEB, propertyDISALLOWED}, // COPTIC SYMBOL MI RO..COPTIC CAPITAL LETTER C + {0x2CEC, 0x0, propertyPVALID}, // COPTIC SMALL LETTER CRYPTOGRAMMIC SHEI + {0x2CED, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER CRYPTOGRAMMIC GANGIA + {0x2CEE, 0x2CF1, propertyPVALID}, // COPTIC SMALL LETTER CRYPTOGRAMMIC GANGIA..CO + {0x2CF2, 0x2CF8, propertyUNASSIGNED}, // .. + {0x2CF9, 0x2CFF, propertyDISALLOWED}, // COPTIC OLD NUBIAN FULL STOP..COPTIC MORPHOLO + {0x2D00, 0x2D25, propertyPVALID}, // GEORGIAN SMALL LETTER AN..GEORGIAN SMALL LET + {0x2D26, 0x2D2F, propertyUNASSIGNED}, // .. + {0x2D30, 0x2D65, propertyPVALID}, // TIFINAGH LETTER YA..TIFINAGH LETTER YAZZ + {0x2D66, 0x2D6E, propertyUNASSIGNED}, // .. + {0x2D6F, 0x0, propertyDISALLOWED}, // TIFINAGH MODIFIER LETTER LABIALIZATION MARK + {0x2D70, 0x2D7F, propertyUNASSIGNED}, // .. + {0x2D80, 0x2D96, propertyPVALID}, // ETHIOPIC SYLLABLE LOA..ETHIOPIC SYLLABLE GGW + {0x2D97, 0x2D9F, propertyUNASSIGNED}, // .. + {0x2DA0, 0x2DA6, propertyPVALID}, // ETHIOPIC SYLLABLE SSA..ETHIOPIC SYLLABLE SSO + {0x2DA7, 0x0, propertyUNASSIGNED}, // + {0x2DA8, 0x2DAE, propertyPVALID}, // ETHIOPIC SYLLABLE CCA..ETHIOPIC SYLLABLE CCO + {0x2DAF, 0x0, propertyUNASSIGNED}, // + {0x2DB0, 0x2DB6, propertyPVALID}, // ETHIOPIC SYLLABLE ZZA..ETHIOPIC SYLLABLE ZZO + {0x2DB7, 0x0, propertyUNASSIGNED}, // + {0x2DB8, 0x2DBE, propertyPVALID}, // ETHIOPIC SYLLABLE CCHA..ETHIOPIC SYLLABLE CC + {0x2DBF, 0x0, propertyUNASSIGNED}, // + {0x2DC0, 0x2DC6, propertyPVALID}, // ETHIOPIC SYLLABLE QYA..ETHIOPIC SYLLABLE QYO + {0x2DC7, 0x0, propertyUNASSIGNED}, // + {0x2DC8, 0x2DCE, propertyPVALID}, // ETHIOPIC SYLLABLE KYA..ETHIOPIC SYLLABLE KYO + {0x2DCF, 0x0, propertyUNASSIGNED}, // + {0x2DD0, 0x2DD6, propertyPVALID}, // ETHIOPIC SYLLABLE XYA..ETHIOPIC SYLLABLE XYO + {0x2DD7, 0x0, propertyUNASSIGNED}, // + {0x2DD8, 0x2DDE, propertyPVALID}, // ETHIOPIC SYLLABLE GYA..ETHIOPIC SYLLABLE GYO + {0x2DDF, 0x0, propertyUNASSIGNED}, // + {0x2DE0, 0x2DFF, propertyPVALID}, // COMBINING CYRILLIC LETTER BE..COMBINING CYRI + {0x2E00, 0x2E2E, propertyDISALLOWED}, // RIGHT ANGLE SUBSTITUTION MARKER..REVERSED QU + {0x2E2F, 0x0, propertyPVALID}, // VERTICAL TILDE + {0x2E30, 0x2E31, propertyDISALLOWED}, // RING POINT..WORD SEPARATOR MIDDLE DOT + {0x2E32, 0x2E7F, propertyUNASSIGNED}, // .. + {0x2E80, 0x2E99, propertyDISALLOWED}, // CJK RADICAL REPEAT..CJK RADICAL RAP + {0x2E9A, 0x0, propertyUNASSIGNED}, // + {0x2E9B, 0x2EF3, propertyDISALLOWED}, // CJK RADICAL CHOKE..CJK RADICAL C-SIMPLIFIED + {0x2EF4, 0x2EFF, propertyUNASSIGNED}, // .. + {0x2F00, 0x2FD5, propertyDISALLOWED}, // KANGXI RADICAL ONE..KANGXI RADICAL FLUTE + {0x2FD6, 0x2FEF, propertyUNASSIGNED}, // .. + {0x2FF0, 0x2FFB, propertyDISALLOWED}, // IDEOGRAPHIC DESCRIPTION CHARACTER LEFT TO RI + {0x2FFC, 0x2FFF, propertyUNASSIGNED}, // .. + {0x3000, 0x3004, propertyDISALLOWED}, // IDEOGRAPHIC SPACE..JAPANESE INDUSTRIAL STAND + {0x3005, 0x3007, propertyPVALID}, // IDEOGRAPHIC ITERATION MARK..IDEOGRAPHIC NUMB + {0x3008, 0x3029, propertyDISALLOWED}, // LEFT ANGLE BRACKET..HANGZHOU NUMERAL NINE + {0x302A, 0x302D, propertyPVALID}, // IDEOGRAPHIC LEVEL TONE MARK..IDEOGRAPHIC ENT + {0x302E, 0x303B, propertyDISALLOWED}, // HANGUL SINGLE DOT TONE MARK..VERTICAL IDEOGR + {0x303C, 0x0, propertyPVALID}, // MASU MARK + {0x303D, 0x303F, propertyDISALLOWED}, // PART ALTERNATION MARK..IDEOGRAPHIC HALF FILL + {0x3040, 0x0, propertyUNASSIGNED}, // + {0x3041, 0x3096, propertyPVALID}, // HIRAGANA LETTER SMALL A..HIRAGANA LETTER SMA + {0x3097, 0x3098, propertyUNASSIGNED}, // .. + {0x3099, 0x309A, propertyPVALID}, // COMBINING KATAKANA-HIRAGANA VOICED SOUND MAR + {0x309B, 0x309C, propertyDISALLOWED}, // KATAKANA-HIRAGANA VOICED SOUND MARK..KATAKAN + {0x309D, 0x309E, propertyPVALID}, // HIRAGANA ITERATION MARK..HIRAGANA VOICED ITE + {0x309F, 0x30A0, propertyDISALLOWED}, // HIRAGANA DIGRAPH YORI..KATAKANA-HIRAGANA DOU + {0x30A1, 0x30FA, propertyPVALID}, // KATAKANA LETTER SMALL A..KATAKANA LETTER VO + {0x30FB, 0x0, propertyCONTEXTO}, // KATAKANA MIDDLE DOT + {0x30FC, 0x30FE, propertyPVALID}, // KATAKANA-HIRAGANA PROLONGED SOUND MARK..KATA + {0x30FF, 0x0, propertyDISALLOWED}, // KATAKANA DIGRAPH KOTO + {0x3100, 0x3104, propertyUNASSIGNED}, // .. + {0x3105, 0x312D, propertyPVALID}, // BOPOMOFO LETTER B..BOPOMOFO LETTER IH + {0x312E, 0x3130, propertyUNASSIGNED}, // .. + {0x3131, 0x318E, propertyDISALLOWED}, // HANGUL LETTER KIYEOK..HANGUL LETTER ARAEAE + {0x318F, 0x0, propertyUNASSIGNED}, // + {0x3190, 0x319F, propertyDISALLOWED}, // IDEOGRAPHIC ANNOTATION LINKING MARK..IDEOGRA + {0x31A0, 0x31B7, propertyPVALID}, // BOPOMOFO LETTER BU..BOPOMOFO FINAL LETTER H + {0x31B8, 0x31BF, propertyUNASSIGNED}, // .. + {0x31C0, 0x31E3, propertyDISALLOWED}, // CJK STROKE T..CJK STROKE Q + {0x31E4, 0x31EF, propertyUNASSIGNED}, // .. + {0x31F0, 0x31FF, propertyPVALID}, // KATAKANA LETTER SMALL KU..KATAKANA LETTER SM + {0x3200, 0x321E, propertyDISALLOWED}, // PARENTHESIZED HANGUL KIYEOK..PARENTHESIZED K + {0x321F, 0x0, propertyUNASSIGNED}, // + {0x3220, 0x32FE, propertyDISALLOWED}, // PARENTHESIZED IDEOGRAPH ONE..CIRCLED KATAKAN + {0x32FF, 0x0, propertyUNASSIGNED}, // + {0x3300, 0x33FF, propertyDISALLOWED}, // SQUARE APAATO..SQUARE GAL + {0x3400, 0x4DB5, propertyPVALID}, // .... + {0x4DC0, 0x4DFF, propertyDISALLOWED}, // HEXAGRAM FOR THE CREATIVE HEAVEN..HEXAGRAM F + {0x4E00, 0x9FCB, propertyPVALID}, // .. + {0x9FCC, 0x9FFF, propertyUNASSIGNED}, // .. + {0xA000, 0xA48C, propertyPVALID}, // YI SYLLABLE IT..YI SYLLABLE YYR + {0xA48D, 0xA48F, propertyUNASSIGNED}, // .. + {0xA490, 0xA4C6, propertyDISALLOWED}, // YI RADICAL QOT..YI RADICAL KE + {0xA4C7, 0xA4CF, propertyUNASSIGNED}, // .. + {0xA4D0, 0xA4FD, propertyPVALID}, // LISU LETTER BA..LISU LETTER TONE MYA JEU + {0xA4FE, 0xA4FF, propertyDISALLOWED}, // LISU PUNCTUATION COMMA..LISU PUNCTUATION FUL + {0xA500, 0xA60C, propertyPVALID}, // VAI SYLLABLE EE..VAI SYLLABLE LENGTHENER + {0xA60D, 0xA60F, propertyDISALLOWED}, // VAI COMMA..VAI QUESTION MARK + {0xA610, 0xA62B, propertyPVALID}, // VAI SYLLABLE NDOLE FA..VAI SYLLABLE NDOLE DO + {0xA62C, 0xA63F, propertyUNASSIGNED}, // .. + {0xA640, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ZEMLYA + {0xA641, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ZEMLYA + {0xA642, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER DZELO + {0xA643, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER DZELO + {0xA644, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER REVERSED DZE + {0xA645, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER REVERSED DZE + {0xA646, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IOTA + {0xA647, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IOTA + {0xA648, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER DJERV + {0xA649, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER DJERV + {0xA64A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER MONOGRAPH UK + {0xA64B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER MONOGRAPH UK + {0xA64C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER BROAD OMEGA + {0xA64D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER BROAD OMEGA + {0xA64E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER NEUTRAL YER + {0xA64F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER NEUTRAL YER + {0xA650, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER YERU WITH BACK YER + {0xA651, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER YERU WITH BACK YER + {0xA652, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IOTIFIED YAT + {0xA653, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IOTIFIED YAT + {0xA654, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER REVERSED YU + {0xA655, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER REVERSED YU + {0xA656, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IOTIFIED A + {0xA657, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IOTIFIED A + {0xA658, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER CLOSED LITTLE YUS + {0xA659, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER CLOSED LITTLE YUS + {0xA65A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER BLENDED YUS + {0xA65B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER BLENDED YUS + {0xA65C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IOTIFIED CLOSED LITT + {0xA65D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IOTIFIED CLOSED LITTLE + {0xA65E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER YN + {0xA65F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER YN + {0xA660, 0xA661, propertyUNASSIGNED}, // .. + {0xA662, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SOFT DE + {0xA663, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SOFT DE + {0xA664, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SOFT EL + {0xA665, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SOFT EL + {0xA666, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SOFT EM + {0xA667, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SOFT EM + {0xA668, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER MONOCULAR O + {0xA669, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER MONOCULAR O + {0xA66A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER BINOCULAR O + {0xA66B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER BINOCULAR O + {0xA66C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER DOUBLE MONOCULAR O + {0xA66D, 0xA66F, propertyPVALID}, // CYRILLIC SMALL LETTER DOUBLE MONOCULAR O..CO + {0xA670, 0xA673, propertyDISALLOWED}, // COMBINING CYRILLIC TEN MILLIONS SIGN..SLAVON + {0xA674, 0xA67B, propertyUNASSIGNED}, // .. + {0xA67C, 0xA67D, propertyPVALID}, // COMBINING CYRILLIC KAVYKA..COMBINING CYRILLI + {0xA67E, 0x0, propertyDISALLOWED}, // CYRILLIC KAVYKA + {0xA67F, 0x0, propertyPVALID}, // CYRILLIC PAYEROK + {0xA680, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER DWE + {0xA681, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER DWE + {0xA682, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER DZWE + {0xA683, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER DZWE + {0xA684, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ZHWE + {0xA685, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ZHWE + {0xA686, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER CCHE + {0xA687, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER CCHE + {0xA688, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER DZZE + {0xA689, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER DZZE + {0xA68A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER TE WITH MIDDLE HOOK + {0xA68B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER TE WITH MIDDLE HOOK + {0xA68C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER TWE + {0xA68D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER TWE + {0xA68E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER TSWE + {0xA68F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER TSWE + {0xA690, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER TSSE + {0xA691, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER TSSE + {0xA692, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER TCHE + {0xA693, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER TCHE + {0xA694, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER HWE + {0xA695, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER HWE + {0xA696, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SHWE + {0xA697, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SHWE + {0xA698, 0xA69F, propertyUNASSIGNED}, // .. + {0xA6A0, 0xA6E5, propertyPVALID}, // BAMUM LETTER A..BAMUM LETTER KI + {0xA6E6, 0xA6EF, propertyDISALLOWED}, // BAMUM LETTER MO..BAMUM LETTER KOGHOM + {0xA6F0, 0xA6F1, propertyPVALID}, // BAMUM COMBINING MARK KOQNDON..BAMUM COMBININ + {0xA6F2, 0xA6F7, propertyDISALLOWED}, // BAMUM NJAEMLI..BAMUM QUESTION MARK + {0xA6F8, 0xA6FF, propertyUNASSIGNED}, // .. + {0xA700, 0xA716, propertyDISALLOWED}, // MODIFIER LETTER CHINESE TONE YIN PING..MODIF + {0xA717, 0xA71F, propertyPVALID}, // MODIFIER LETTER DOT VERTICAL BAR..MODIFIER L + {0xA720, 0xA722, propertyDISALLOWED}, // MODIFIER LETTER STRESS AND HIGH TONE..LATIN + {0xA723, 0x0, propertyPVALID}, // LATIN SMALL LETTER EGYPTOLOGICAL ALEF + {0xA724, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER EGYPTOLOGICAL AIN + {0xA725, 0x0, propertyPVALID}, // LATIN SMALL LETTER EGYPTOLOGICAL AIN + {0xA726, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER HENG + {0xA727, 0x0, propertyPVALID}, // LATIN SMALL LETTER HENG + {0xA728, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER TZ + {0xA729, 0x0, propertyPVALID}, // LATIN SMALL LETTER TZ + {0xA72A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER TRESILLO + {0xA72B, 0x0, propertyPVALID}, // LATIN SMALL LETTER TRESILLO + {0xA72C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER CUATRILLO + {0xA72D, 0x0, propertyPVALID}, // LATIN SMALL LETTER CUATRILLO + {0xA72E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER CUATRILLO WITH COMMA + {0xA72F, 0xA731, propertyPVALID}, // LATIN SMALL LETTER CUATRILLO WITH COMMA..LAT + {0xA732, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AA + {0xA733, 0x0, propertyPVALID}, // LATIN SMALL LETTER AA + {0xA734, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AO + {0xA735, 0x0, propertyPVALID}, // LATIN SMALL LETTER AO + {0xA736, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AU + {0xA737, 0x0, propertyPVALID}, // LATIN SMALL LETTER AU + {0xA738, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AV + {0xA739, 0x0, propertyPVALID}, // LATIN SMALL LETTER AV + {0xA73A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AV WITH HORIZONTAL BAR + {0xA73B, 0x0, propertyPVALID}, // LATIN SMALL LETTER AV WITH HORIZONTAL BAR + {0xA73C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AY + {0xA73D, 0x0, propertyPVALID}, // LATIN SMALL LETTER AY + {0xA73E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER REVERSED C WITH DOT + {0xA73F, 0x0, propertyPVALID}, // LATIN SMALL LETTER REVERSED C WITH DOT + {0xA740, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH STROKE + {0xA741, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH STROKE + {0xA742, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH DIAGONAL STROKE + {0xA743, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH DIAGONAL STROKE + {0xA744, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH STROKE AND DIAGO + {0xA745, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH STROKE AND DIAGONA + {0xA746, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER BROKEN L + {0xA747, 0x0, propertyPVALID}, // LATIN SMALL LETTER BROKEN L + {0xA748, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH HIGH STROKE + {0xA749, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH HIGH STROKE + {0xA74A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH LONG STROKE OVER + {0xA74B, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH LONG STROKE OVERLA + {0xA74C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH LOOP + {0xA74D, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH LOOP + {0xA74E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER OO + {0xA74F, 0x0, propertyPVALID}, // LATIN SMALL LETTER OO + {0xA750, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER P WITH STROKE THROUGH D + {0xA751, 0x0, propertyPVALID}, // LATIN SMALL LETTER P WITH STROKE THROUGH DES + {0xA752, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER P WITH FLOURISH + {0xA753, 0x0, propertyPVALID}, // LATIN SMALL LETTER P WITH FLOURISH + {0xA754, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER P WITH SQUIRREL TAIL + {0xA755, 0x0, propertyPVALID}, // LATIN SMALL LETTER P WITH SQUIRREL TAIL + {0xA756, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Q WITH STROKE THROUGH D + {0xA757, 0x0, propertyPVALID}, // LATIN SMALL LETTER Q WITH STROKE THROUGH DES + {0xA758, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Q WITH DIAGONAL STROKE + {0xA759, 0x0, propertyPVALID}, // LATIN SMALL LETTER Q WITH DIAGONAL STROKE + {0xA75A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R ROTUNDA + {0xA75B, 0x0, propertyPVALID}, // LATIN SMALL LETTER R ROTUNDA + {0xA75C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER RUM ROTUNDA + {0xA75D, 0x0, propertyPVALID}, // LATIN SMALL LETTER RUM ROTUNDA + {0xA75E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER V WITH DIAGONAL STROKE + {0xA75F, 0x0, propertyPVALID}, // LATIN SMALL LETTER V WITH DIAGONAL STROKE + {0xA760, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER VY + {0xA761, 0x0, propertyPVALID}, // LATIN SMALL LETTER VY + {0xA762, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER VISIGOTHIC Z + {0xA763, 0x0, propertyPVALID}, // LATIN SMALL LETTER VISIGOTHIC Z + {0xA764, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER THORN WITH STROKE + {0xA765, 0x0, propertyPVALID}, // LATIN SMALL LETTER THORN WITH STROKE + {0xA766, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER THORN WITH STROKE THROU + {0xA767, 0x0, propertyPVALID}, // LATIN SMALL LETTER THORN WITH STROKE THROUGH + {0xA768, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER VEND + {0xA769, 0x0, propertyPVALID}, // LATIN SMALL LETTER VEND + {0xA76A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER ET + {0xA76B, 0x0, propertyPVALID}, // LATIN SMALL LETTER ET + {0xA76C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER IS + {0xA76D, 0x0, propertyPVALID}, // LATIN SMALL LETTER IS + {0xA76E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER CON + {0xA76F, 0x0, propertyPVALID}, // LATIN SMALL LETTER CON + {0xA770, 0x0, propertyDISALLOWED}, // MODIFIER LETTER US + {0xA771, 0xA778, propertyPVALID}, // LATIN SMALL LETTER DUM..LATIN SMALL LETTER U + {0xA779, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER INSULAR D + {0xA77A, 0x0, propertyPVALID}, // LATIN SMALL LETTER INSULAR D + {0xA77B, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER INSULAR F + {0xA77C, 0x0, propertyPVALID}, // LATIN SMALL LETTER INSULAR F + {0xA77D, 0xA77E, propertyDISALLOWED}, // LATIN CAPITAL LETTER INSULAR G..LATIN CAPITA + {0xA77F, 0x0, propertyPVALID}, // LATIN SMALL LETTER TURNED INSULAR G + {0xA780, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER TURNED L + {0xA781, 0x0, propertyPVALID}, // LATIN SMALL LETTER TURNED L + {0xA782, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER INSULAR R + {0xA783, 0x0, propertyPVALID}, // LATIN SMALL LETTER INSULAR R + {0xA784, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER INSULAR S + {0xA785, 0x0, propertyPVALID}, // LATIN SMALL LETTER INSULAR S + {0xA786, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER INSULAR T + {0xA787, 0xA788, propertyPVALID}, // LATIN SMALL LETTER INSULAR T..MODIFIER LETTE + {0xA789, 0xA78B, propertyDISALLOWED}, // MODIFIER LETTER COLON..LATIN CAPITAL LETTER + {0xA78C, 0x0, propertyPVALID}, // LATIN SMALL LETTER SALTILLO + {0xA78D, 0xA7FA, propertyUNASSIGNED}, // .. + {0xA7FB, 0xA827, propertyPVALID}, // LATIN EPIGRAPHIC LETTER REVERSED F..SYLOTI N + {0xA828, 0xA82B, propertyDISALLOWED}, // SYLOTI NAGRI POETRY MARK-1..SYLOTI NAGRI POE + {0xA82C, 0xA82F, propertyUNASSIGNED}, // .. + {0xA830, 0xA839, propertyDISALLOWED}, // NORTH INDIC FRACTION ONE QUARTER..NORTH INDI + {0xA83A, 0xA83F, propertyUNASSIGNED}, // .. + {0xA840, 0xA873, propertyPVALID}, // PHAGS-PA LETTER KA..PHAGS-PA LETTER CANDRABI + {0xA874, 0xA877, propertyDISALLOWED}, // PHAGS-PA SINGLE HEAD MARK..PHAGS-PA MARK DOU + {0xA878, 0xA87F, propertyUNASSIGNED}, // .. + {0xA880, 0xA8C4, propertyPVALID}, // SAURASHTRA SIGN ANUSVARA..SAURASHTRA SIGN VI + {0xA8C5, 0xA8CD, propertyUNASSIGNED}, // .. + {0xA8CE, 0xA8CF, propertyDISALLOWED}, // SAURASHTRA DANDA..SAURASHTRA DOUBLE DANDA + {0xA8D0, 0xA8D9, propertyPVALID}, // SAURASHTRA DIGIT ZERO..SAURASHTRA DIGIT NINE + {0xA8DA, 0xA8DF, propertyUNASSIGNED}, // .. + {0xA8E0, 0xA8F7, propertyPVALID}, // COMBINING DEVANAGARI DIGIT ZERO..DEVANAGARI + {0xA8F8, 0xA8FA, propertyDISALLOWED}, // DEVANAGARI SIGN PUSHPIKA..DEVANAGARI CARET + {0xA8FB, 0x0, propertyPVALID}, // DEVANAGARI HEADSTROKE + {0xA8FC, 0xA8FF, propertyUNASSIGNED}, // .. + {0xA900, 0xA92D, propertyPVALID}, // KAYAH LI DIGIT ZERO..KAYAH LI TONE CALYA PLO + {0xA92E, 0xA92F, propertyDISALLOWED}, // KAYAH LI SIGN CWI..KAYAH LI SIGN SHYA + {0xA930, 0xA953, propertyPVALID}, // REJANG LETTER KA..REJANG VIRAMA + {0xA954, 0xA95E, propertyUNASSIGNED}, // .. + {0xA95F, 0xA97C, propertyDISALLOWED}, // REJANG SECTION MARK..HANGUL CHOSEONG SSANGYE + {0xA97D, 0xA97F, propertyUNASSIGNED}, // .. + {0xA980, 0xA9C0, propertyPVALID}, // JAVANESE SIGN PANYANGGA..JAVANESE PANGKON + {0xA9C1, 0xA9CD, propertyDISALLOWED}, // JAVANESE LEFT RERENGGAN..JAVANESE TURNED PAD + {0xA9CE, 0x0, propertyUNASSIGNED}, // + {0xA9CF, 0xA9D9, propertyPVALID}, // JAVANESE PANGRANGKEP..JAVANESE DIGIT NINE + {0xA9DA, 0xA9DD, propertyUNASSIGNED}, // .. + {0xA9DE, 0xA9DF, propertyDISALLOWED}, // JAVANESE PADA TIRTA TUMETES..JAVANESE PADA I + {0xA9E0, 0xA9FF, propertyUNASSIGNED}, // .. + {0xAA00, 0xAA36, propertyPVALID}, // CHAM LETTER A..CHAM CONSONANT SIGN WA + {0xAA37, 0xAA3F, propertyUNASSIGNED}, // .. + {0xAA40, 0xAA4D, propertyPVALID}, // CHAM LETTER FINAL K..CHAM CONSONANT SIGN FIN + {0xAA4E, 0xAA4F, propertyUNASSIGNED}, // .. + {0xAA50, 0xAA59, propertyPVALID}, // CHAM DIGIT ZERO..CHAM DIGIT NINE + {0xAA5A, 0xAA5B, propertyUNASSIGNED}, // .. + {0xAA5C, 0xAA5F, propertyDISALLOWED}, // CHAM PUNCTUATION SPIRAL..CHAM PUNCTUATION TR + {0xAA60, 0xAA76, propertyPVALID}, // MYANMAR LETTER KHAMTI GA..MYANMAR LOGOGRAM K + {0xAA77, 0xAA79, propertyDISALLOWED}, // MYANMAR SYMBOL AITON EXCLAMATION..MYANMAR SY + {0xAA7A, 0xAA7B, propertyPVALID}, // MYANMAR LETTER AITON RA..MYANMAR SIGN PAO KA + {0xAA7C, 0xAA7F, propertyUNASSIGNED}, // .. + {0xAA80, 0xAAC2, propertyPVALID}, // TAI VIET LETTER LOW KO..TAI VIET TONE MAI SO + {0xAAC3, 0xAADA, propertyUNASSIGNED}, // .. + {0xAADB, 0xAADD, propertyPVALID}, // TAI VIET SYMBOL KON..TAI VIET SYMBOL SAM + {0xAADE, 0xAADF, propertyDISALLOWED}, // TAI VIET SYMBOL HO HOI..TAI VIET SYMBOL KOI + {0xAAE0, 0xABBF, propertyUNASSIGNED}, // .. + {0xABC0, 0xABEA, propertyPVALID}, // MEETEI MAYEK LETTER KOK..MEETEI MAYEK VOWEL + {0xABEB, 0x0, propertyDISALLOWED}, // MEETEI MAYEK CHEIKHEI + {0xABEC, 0xABED, propertyPVALID}, // MEETEI MAYEK LUM IYEK..MEETEI MAYEK APUN IYE + {0xABEE, 0xABEF, propertyUNASSIGNED}, // .. + {0xABF0, 0xABF9, propertyPVALID}, // MEETEI MAYEK DIGIT ZERO..MEETEI MAYEK DIGIT + {0xABFA, 0xABFF, propertyUNASSIGNED}, // .. + {0xAC00, 0xD7A3, propertyPVALID}, // .. + {0xD7A4, 0xD7AF, propertyUNASSIGNED}, // .. + {0xD7B0, 0xD7C6, propertyDISALLOWED}, // HANGUL JUNGSEONG O-YEO..HANGUL JUNGSEONG ARA + {0xD7C7, 0xD7CA, propertyUNASSIGNED}, // .. + {0xD7CB, 0xD7FB, propertyDISALLOWED}, // HANGUL JONGSEONG NIEUN-RIEUL..HANGUL JONGSEO + {0xD7FC, 0xD7FF, propertyUNASSIGNED}, // .. + {0xD800, 0xFA0D, propertyDISALLOWED}, // ..CJK COMPAT + {0xFA0E, 0xFA0F, propertyPVALID}, // CJK COMPATIBILITY IDEOGRAPH-FA0E..CJK COMPAT + {0xFA10, 0x0, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA10 + {0xFA11, 0x0, propertyPVALID}, // CJK COMPATIBILITY IDEOGRAPH-FA11 + {0xFA12, 0x0, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA12 + {0xFA13, 0xFA14, propertyPVALID}, // CJK COMPATIBILITY IDEOGRAPH-FA13..CJK COMPAT + {0xFA15, 0xFA1E, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA15..CJK COMPAT + {0xFA1F, 0x0, propertyPVALID}, // CJK COMPATIBILITY IDEOGRAPH-FA1F + {0xFA20, 0x0, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA20 + {0xFA21, 0x0, propertyPVALID}, // CJK COMPATIBILITY IDEOGRAPH-FA21 + {0xFA22, 0x0, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA22 + {0xFA23, 0xFA24, propertyPVALID}, // CJK COMPATIBILITY IDEOGRAPH-FA23..CJK COMPAT + {0xFA25, 0xFA26, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA25..CJK COMPAT + {0xFA27, 0xFA29, propertyPVALID}, // CJK COMPATIBILITY IDEOGRAPH-FA27..CJK COMPAT + {0xFA2A, 0xFA2D, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA2A..CJK COMPAT + {0xFA2E, 0xFA2F, propertyUNASSIGNED}, // .. + {0xFA30, 0xFA6D, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA30..CJK COMPAT + {0xFA6E, 0xFA6F, propertyUNASSIGNED}, // .. + {0xFA70, 0xFAD9, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA70..CJK COMPAT + {0xFADA, 0xFAFF, propertyUNASSIGNED}, // .. + {0xFB00, 0xFB06, propertyDISALLOWED}, // LATIN SMALL LIGATURE FF..LATIN SMALL LIGATUR + {0xFB07, 0xFB12, propertyUNASSIGNED}, // .. + {0xFB13, 0xFB17, propertyDISALLOWED}, // ARMENIAN SMALL LIGATURE MEN NOW..ARMENIAN SM + {0xFB18, 0xFB1C, propertyUNASSIGNED}, // .. + {0xFB1D, 0x0, propertyDISALLOWED}, // HEBREW LETTER YOD WITH HIRIQ + {0xFB1E, 0x0, propertyPVALID}, // HEBREW POINT JUDEO-SPANISH VARIKA + {0xFB1F, 0xFB36, propertyDISALLOWED}, // HEBREW LIGATURE YIDDISH YOD YOD PATAH..HEBRE + {0xFB37, 0x0, propertyUNASSIGNED}, // + {0xFB38, 0xFB3C, propertyDISALLOWED}, // HEBREW LETTER TET WITH DAGESH..HEBREW LETTER + {0xFB3D, 0x0, propertyUNASSIGNED}, // + {0xFB3E, 0x0, propertyDISALLOWED}, // HEBREW LETTER MEM WITH DAGESH + {0xFB3F, 0x0, propertyUNASSIGNED}, // + {0xFB40, 0xFB41, propertyDISALLOWED}, // HEBREW LETTER NUN WITH DAGESH..HEBREW LETTER + {0xFB42, 0x0, propertyUNASSIGNED}, // + {0xFB43, 0xFB44, propertyDISALLOWED}, // HEBREW LETTER FINAL PE WITH DAGESH..HEBREW L + {0xFB45, 0x0, propertyUNASSIGNED}, // + {0xFB46, 0xFBB1, propertyDISALLOWED}, // HEBREW LETTER TSADI WITH DAGESH..ARABIC LETT + {0xFBB2, 0xFBD2, propertyUNASSIGNED}, // .. + {0xFBD3, 0xFD3F, propertyDISALLOWED}, // ARABIC LETTER NG ISOLATED FORM..ORNATE RIGHT + {0xFD40, 0xFD4F, propertyUNASSIGNED}, // .. + {0xFD50, 0xFD8F, propertyDISALLOWED}, // ARABIC LIGATURE TEH WITH JEEM WITH MEEM INIT + {0xFD90, 0xFD91, propertyUNASSIGNED}, // .. + {0xFD92, 0xFDC7, propertyDISALLOWED}, // ARABIC LIGATURE MEEM WITH JEEM WITH KHAH INI + {0xFDC8, 0xFDCF, propertyUNASSIGNED}, // .. + {0xFDD0, 0xFDFD, propertyDISALLOWED}, // ..ARABIC LIGATURE BISMILLAH AR + {0xFDFE, 0xFDFF, propertyUNASSIGNED}, // .. + {0xFE00, 0xFE19, propertyDISALLOWED}, // VARIATION SELECTOR-1..PRESENTATION FORM FOR + {0xFE1A, 0xFE1F, propertyUNASSIGNED}, // .. + {0xFE20, 0xFE26, propertyPVALID}, // COMBINING LIGATURE LEFT HALF..COMBINING CONJ + {0xFE27, 0xFE2F, propertyUNASSIGNED}, // .. + {0xFE30, 0xFE52, propertyDISALLOWED}, // PRESENTATION FORM FOR VERTICAL TWO DOT LEADE + {0xFE53, 0x0, propertyUNASSIGNED}, // + {0xFE54, 0xFE66, propertyDISALLOWED}, // SMALL SEMICOLON..SMALL EQUALS SIGN + {0xFE67, 0x0, propertyUNASSIGNED}, // + {0xFE68, 0xFE6B, propertyDISALLOWED}, // SMALL REVERSE SOLIDUS..SMALL COMMERCIAL AT + {0xFE6C, 0xFE6F, propertyUNASSIGNED}, // .. + {0xFE70, 0xFE72, propertyDISALLOWED}, // ARABIC FATHATAN ISOLATED FORM..ARABIC DAMMAT + {0xFE73, 0x0, propertyPVALID}, // ARABIC TAIL FRAGMENT + {0xFE74, 0x0, propertyDISALLOWED}, // ARABIC KASRATAN ISOLATED FORM + {0xFE75, 0x0, propertyUNASSIGNED}, // + {0xFE76, 0xFEFC, propertyDISALLOWED}, // ARABIC FATHA ISOLATED FORM..ARABIC LIGATURE + {0xFEFD, 0xFEFE, propertyUNASSIGNED}, // .. + {0xFEFF, 0x0, propertyDISALLOWED}, // ZERO WIDTH NO-BREAK SPACE + {0xFF00, 0x0, propertyUNASSIGNED}, // + {0xFF01, 0xFFBE, propertyDISALLOWED}, // FULLWIDTH EXCLAMATION MARK..HALFWIDTH HANGUL + {0xFFBF, 0xFFC1, propertyUNASSIGNED}, // .. + {0xFFC2, 0xFFC7, propertyDISALLOWED}, // HALFWIDTH HANGUL LETTER A..HALFWIDTH HANGUL + {0xFFC8, 0xFFC9, propertyUNASSIGNED}, // .. + {0xFFCA, 0xFFCF, propertyDISALLOWED}, // HALFWIDTH HANGUL LETTER YEO..HALFWIDTH HANGU + {0xFFD0, 0xFFD1, propertyUNASSIGNED}, // .. + {0xFFD2, 0xFFD7, propertyDISALLOWED}, // HALFWIDTH HANGUL LETTER YO..HALFWIDTH HANGUL + {0xFFD8, 0xFFD9, propertyUNASSIGNED}, // .. + {0xFFDA, 0xFFDC, propertyDISALLOWED}, // HALFWIDTH HANGUL LETTER EU..HALFWIDTH HANGUL + {0xFFDD, 0xFFDF, propertyUNASSIGNED}, // .. + {0xFFE0, 0xFFE6, propertyDISALLOWED}, // FULLWIDTH CENT SIGN..FULLWIDTH WON SIGN + {0xFFE7, 0x0, propertyUNASSIGNED}, // + {0xFFE8, 0xFFEE, propertyDISALLOWED}, // HALFWIDTH FORMS LIGHT VERTICAL..HALFWIDTH WH + {0xFFEF, 0xFFF8, propertyUNASSIGNED}, // .. + {0xFFF9, 0xFFFF, propertyDISALLOWED}, // INTERLINEAR ANNOTATION ANCHOR.. + {0x1000D, 0x10026, propertyPVALID}, // LINEAR B SYLLABLE B036 JO..LINEAR B SYLLABLE + {0x10027, 0x0, propertyUNASSIGNED}, // + {0x10028, 0x1003A, propertyPVALID}, // LINEAR B SYLLABLE B060 RA..LINEAR B SYLLABLE + {0x1003B, 0x0, propertyUNASSIGNED}, // + {0x1003C, 0x1003D, propertyPVALID}, // LINEAR B SYLLABLE B017 ZA..LINEAR B SYLLABLE + {0x1003E, 0x0, propertyUNASSIGNED}, // + {0x1003F, 0x1004D, propertyPVALID}, // LINEAR B SYLLABLE B020 ZO..LINEAR B SYLLABLE + {0x1004E, 0x1004F, propertyUNASSIGNED}, // .. + {0x10050, 0x1005D, propertyPVALID}, // LINEAR B SYMBOL B018..LINEAR B SYMBOL B089 + {0x1005E, 0x1007F, propertyUNASSIGNED}, // .. + {0x10080, 0x100FA, propertyPVALID}, // LINEAR B IDEOGRAM B100 MAN..LINEAR B IDEOGRA + {0x100FB, 0x100FF, propertyUNASSIGNED}, // .. + {0x10100, 0x10102, propertyDISALLOWED}, // AEGEAN WORD SEPARATOR LINE..AEGEAN CHECK MAR + {0x10103, 0x10106, propertyUNASSIGNED}, // .. + {0x10107, 0x10133, propertyDISALLOWED}, // AEGEAN NUMBER ONE..AEGEAN NUMBER NINETY THOU + {0x10134, 0x10136, propertyUNASSIGNED}, // .. + {0x10137, 0x1018A, propertyDISALLOWED}, // AEGEAN WEIGHT BASE UNIT..GREEK ZERO SIGN + {0x1018B, 0x1018F, propertyUNASSIGNED}, // .. + {0x10190, 0x1019B, propertyDISALLOWED}, // ROMAN SEXTANS SIGN..ROMAN CENTURIAL SIGN + {0x1019C, 0x101CF, propertyUNASSIGNED}, // .. + {0x101D0, 0x101FC, propertyDISALLOWED}, // PHAISTOS DISC SIGN PEDESTRIAN..PHAISTOS DISC + {0x101FD, 0x0, propertyPVALID}, // PHAISTOS DISC SIGN COMBINING OBLIQUE STROKE + {0x101FE, 0x1027F, propertyUNASSIGNED}, // .. + {0x10280, 0x1029C, propertyPVALID}, // LYCIAN LETTER A..LYCIAN LETTER X + {0x1029D, 0x1029F, propertyUNASSIGNED}, // .. + {0x102A0, 0x102D0, propertyPVALID}, // CARIAN LETTER A..CARIAN LETTER UUU3 + {0x102D1, 0x102FF, propertyUNASSIGNED}, // .. + {0x10300, 0x1031E, propertyPVALID}, // OLD ITALIC LETTER A..OLD ITALIC LETTER UU + {0x1031F, 0x0, propertyUNASSIGNED}, // + {0x10320, 0x10323, propertyDISALLOWED}, // OLD ITALIC NUMERAL ONE..OLD ITALIC NUMERAL F + {0x10324, 0x1032F, propertyUNASSIGNED}, // .. + {0x10330, 0x10340, propertyPVALID}, // GOTHIC LETTER AHSA..GOTHIC LETTER PAIRTHRA + {0x10341, 0x0, propertyDISALLOWED}, // GOTHIC LETTER NINETY + {0x10342, 0x10349, propertyPVALID}, // GOTHIC LETTER RAIDA..GOTHIC LETTER OTHAL + {0x1034A, 0x0, propertyDISALLOWED}, // GOTHIC LETTER NINE HUNDRED + {0x1034B, 0x1037F, propertyUNASSIGNED}, // .. + {0x10380, 0x1039D, propertyPVALID}, // UGARITIC LETTER ALPA..UGARITIC LETTER SSU + {0x1039E, 0x0, propertyUNASSIGNED}, // + {0x1039F, 0x0, propertyDISALLOWED}, // UGARITIC WORD DIVIDER + {0x103A0, 0x103C3, propertyPVALID}, // OLD PERSIAN SIGN A..OLD PERSIAN SIGN HA + {0x103C4, 0x103C7, propertyUNASSIGNED}, // .. + {0x103C8, 0x103CF, propertyPVALID}, // OLD PERSIAN SIGN AURAMAZDAA..OLD PERSIAN SIG + {0x103D0, 0x103D5, propertyDISALLOWED}, // OLD PERSIAN WORD DIVIDER..OLD PERSIAN NUMBER + {0x103D6, 0x103FF, propertyUNASSIGNED}, // .. + {0x10400, 0x10427, propertyDISALLOWED}, // DESERET CAPITAL LETTER LONG I..DESERET CAPIT + {0x10428, 0x1049D, propertyPVALID}, // DESERET SMALL LETTER LONG I..OSMANYA LETTER + {0x1049E, 0x1049F, propertyUNASSIGNED}, // .. + {0x104A0, 0x104A9, propertyPVALID}, // OSMANYA DIGIT ZERO..OSMANYA DIGIT NINE + {0x104AA, 0x107FF, propertyUNASSIGNED}, // .. + {0x10800, 0x10805, propertyPVALID}, // CYPRIOT SYLLABLE A..CYPRIOT SYLLABLE JA + {0x10806, 0x10807, propertyUNASSIGNED}, // .. + {0x10808, 0x0, propertyPVALID}, // CYPRIOT SYLLABLE JO + {0x10809, 0x0, propertyUNASSIGNED}, // + {0x1080A, 0x10835, propertyPVALID}, // CYPRIOT SYLLABLE KA..CYPRIOT SYLLABLE WO + {0x10836, 0x0, propertyUNASSIGNED}, // + {0x10837, 0x10838, propertyPVALID}, // CYPRIOT SYLLABLE XA..CYPRIOT SYLLABLE XE + {0x10839, 0x1083B, propertyUNASSIGNED}, // .. + {0x1083C, 0x0, propertyPVALID}, // CYPRIOT SYLLABLE ZA + {0x1083D, 0x1083E, propertyUNASSIGNED}, // .. + {0x1083F, 0x10855, propertyPVALID}, // CYPRIOT SYLLABLE ZO..IMPERIAL ARAMAIC LETTER + {0x10856, 0x0, propertyUNASSIGNED}, // + {0x10857, 0x1085F, propertyDISALLOWED}, // IMPERIAL ARAMAIC SECTION SIGN..IMPERIAL ARAM + {0x10860, 0x108FF, propertyUNASSIGNED}, // .. + {0x10900, 0x10915, propertyPVALID}, // PHOENICIAN LETTER ALF..PHOENICIAN LETTER TAU + {0x10916, 0x1091B, propertyDISALLOWED}, // PHOENICIAN NUMBER ONE..PHOENICIAN NUMBER THR + {0x1091C, 0x1091E, propertyUNASSIGNED}, // .. + {0x1091F, 0x0, propertyDISALLOWED}, // PHOENICIAN WORD SEPARATOR + {0x10920, 0x10939, propertyPVALID}, // LYDIAN LETTER A..LYDIAN LETTER C + {0x1093A, 0x1093E, propertyUNASSIGNED}, // .. + {0x1093F, 0x0, propertyDISALLOWED}, // LYDIAN TRIANGULAR MARK + {0x10940, 0x109FF, propertyUNASSIGNED}, // .. + {0x10A00, 0x10A03, propertyPVALID}, // KHAROSHTHI LETTER A..KHAROSHTHI VOWEL SIGN V + {0x10A04, 0x0, propertyUNASSIGNED}, // + {0x10A05, 0x10A06, propertyPVALID}, // KHAROSHTHI VOWEL SIGN E..KHAROSHTHI VOWEL SI + {0x10A07, 0x10A0B, propertyUNASSIGNED}, // .. + {0x10A0C, 0x10A13, propertyPVALID}, // KHAROSHTHI VOWEL LENGTH MARK..KHAROSHTHI LET + {0x10A14, 0x0, propertyUNASSIGNED}, // + {0x10A15, 0x10A17, propertyPVALID}, // KHAROSHTHI LETTER CA..KHAROSHTHI LETTER JA + {0x10A18, 0x0, propertyUNASSIGNED}, // + {0x10A19, 0x10A33, propertyPVALID}, // KHAROSHTHI LETTER NYA..KHAROSHTHI LETTER TTT + {0x10A34, 0x10A37, propertyUNASSIGNED}, // .. + {0x10A38, 0x10A3A, propertyPVALID}, // KHAROSHTHI SIGN BAR ABOVE..KHAROSHTHI SIGN D + {0x10A3B, 0x10A3E, propertyUNASSIGNED}, // .. + {0x10A3F, 0x0, propertyPVALID}, // KHAROSHTHI VIRAMA + {0x10A40, 0x10A47, propertyDISALLOWED}, // KHAROSHTHI DIGIT ONE..KHAROSHTHI NUMBER ONE + {0x10A48, 0x10A4F, propertyUNASSIGNED}, // .. + {0x10A50, 0x10A58, propertyDISALLOWED}, // KHAROSHTHI PUNCTUATION DOT..KHAROSHTHI PUNCT + {0x10A59, 0x10A5F, propertyUNASSIGNED}, // .. + {0x10A60, 0x10A7C, propertyPVALID}, // OLD SOUTH ARABIAN LETTER HE..OLD SOUTH ARABI + {0x10A7D, 0x10A7F, propertyDISALLOWED}, // OLD SOUTH ARABIAN NUMBER ONE..OLD SOUTH ARAB + {0x10A80, 0x10AFF, propertyUNASSIGNED}, // .. + {0x10B00, 0x10B35, propertyPVALID}, // AVESTAN LETTER A..AVESTAN LETTER HE + {0x10B36, 0x10B38, propertyUNASSIGNED}, // .. + {0x10B39, 0x10B3F, propertyDISALLOWED}, // AVESTAN ABBREVIATION MARK..LARGE ONE RING OV + {0x10B40, 0x10B55, propertyPVALID}, // INSCRIPTIONAL PARTHIAN LETTER ALEPH..INSCRIP + {0x10B56, 0x10B57, propertyUNASSIGNED}, // .. + {0x10B58, 0x10B5F, propertyDISALLOWED}, // INSCRIPTIONAL PARTHIAN NUMBER ONE..INSCRIPTI + {0x10B60, 0x10B72, propertyPVALID}, // INSCRIPTIONAL PAHLAVI LETTER ALEPH..INSCRIPT + {0x10B73, 0x10B77, propertyUNASSIGNED}, // .. + {0x10B78, 0x10B7F, propertyDISALLOWED}, // INSCRIPTIONAL PAHLAVI NUMBER ONE..INSCRIPTIO + {0x10B80, 0x10BFF, propertyUNASSIGNED}, // .. + {0x10C00, 0x10C48, propertyPVALID}, // OLD TURKIC LETTER ORKHON A..OLD TURKIC LETTE + {0x10C49, 0x10E5F, propertyUNASSIGNED}, // .. + {0x10E60, 0x10E7E, propertyDISALLOWED}, // RUMI DIGIT ONE..RUMI FRACTION TWO THIRDS + {0x10E7F, 0x1107F, propertyUNASSIGNED}, // .. + {0x11080, 0x110BA, propertyPVALID}, // KAITHI SIGN CANDRABINDU..KAITHI SIGN NUKTA + {0x110BB, 0x110C1, propertyDISALLOWED}, // KAITHI ABBREVIATION SIGN..KAITHI DOUBLE DAND + {0x110C2, 0x11FFF, propertyUNASSIGNED}, // .. + {0x12000, 0x1236E, propertyPVALID}, // CUNEIFORM SIGN A..CUNEIFORM SIGN ZUM + {0x1236F, 0x123FF, propertyUNASSIGNED}, // .. + {0x12400, 0x12462, propertyDISALLOWED}, // CUNEIFORM NUMERIC SIGN TWO ASH..CUNEIFORM NU + {0x12463, 0x1246F, propertyUNASSIGNED}, // .. + {0x12470, 0x12473, propertyDISALLOWED}, // CUNEIFORM PUNCTUATION SIGN OLD ASSYRIAN WORD + {0x12474, 0x12FFF, propertyUNASSIGNED}, // .. + {0x13000, 0x1342E, propertyPVALID}, // EGYPTIAN HIEROGLYPH A001..EGYPTIAN HIEROGLYP + {0x1342F, 0x1CFFF, propertyUNASSIGNED}, // .. + {0x1D000, 0x1D0F5, propertyDISALLOWED}, // BYZANTINE MUSICAL SYMBOL PSILI..BYZANTINE MU + {0x1D0F6, 0x1D0FF, propertyUNASSIGNED}, // .. + {0x1D100, 0x1D126, propertyDISALLOWED}, // MUSICAL SYMBOL SINGLE BARLINE..MUSICAL SYMBO + {0x1D127, 0x1D128, propertyUNASSIGNED}, // .. + {0x1D129, 0x1D1DD, propertyDISALLOWED}, // MUSICAL SYMBOL MULTIPLE MEASURE REST..MUSICA + {0x1D1DE, 0x1D1FF, propertyUNASSIGNED}, // .. + {0x1D200, 0x1D245, propertyDISALLOWED}, // GREEK VOCAL NOTATION SYMBOL-1..GREEK MUSICAL + {0x1D246, 0x1D2FF, propertyUNASSIGNED}, // .. + {0x1D300, 0x1D356, propertyDISALLOWED}, // MONOGRAM FOR EARTH..TETRAGRAM FOR FOSTERING + {0x1D357, 0x1D35F, propertyUNASSIGNED}, // .. + {0x1D360, 0x1D371, propertyDISALLOWED}, // COUNTING ROD UNIT DIGIT ONE..COUNTING ROD TE + {0x1D372, 0x1D3FF, propertyUNASSIGNED}, // .. + {0x1D400, 0x1D454, propertyDISALLOWED}, // MATHEMATICAL BOLD CAPITAL A..MATHEMATICAL IT + {0x1D455, 0x0, propertyUNASSIGNED}, // + {0x1D456, 0x1D49C, propertyDISALLOWED}, // MATHEMATICAL ITALIC SMALL I..MATHEMATICAL SC + {0x1D49D, 0x0, propertyUNASSIGNED}, // + {0x1D49E, 0x1D49F, propertyDISALLOWED}, // MATHEMATICAL SCRIPT CAPITAL C..MATHEMATICAL + {0x1D4A0, 0x1D4A1, propertyUNASSIGNED}, // .. + {0x1D4A2, 0x0, propertyDISALLOWED}, // MATHEMATICAL SCRIPT CAPITAL G + {0x1D4A3, 0x1D4A4, propertyUNASSIGNED}, // .. + {0x1D4A5, 0x1D4A6, propertyDISALLOWED}, // MATHEMATICAL SCRIPT CAPITAL J..MATHEMATICAL + {0x1D4A7, 0x1D4A8, propertyUNASSIGNED}, // .. + {0x1D4A9, 0x1D4AC, propertyDISALLOWED}, // MATHEMATICAL SCRIPT CAPITAL N..MATHEMATICAL + {0x1D4AD, 0x0, propertyUNASSIGNED}, // + {0x1D4AE, 0x1D4B9, propertyDISALLOWED}, // MATHEMATICAL SCRIPT CAPITAL S..MATHEMATICAL + {0x1D4BA, 0x0, propertyUNASSIGNED}, // + {0x1D4BB, 0x0, propertyDISALLOWED}, // MATHEMATICAL SCRIPT SMALL F + {0x1D4BC, 0x0, propertyUNASSIGNED}, // + {0x1D4BD, 0x1D4C3, propertyDISALLOWED}, // MATHEMATICAL SCRIPT SMALL H..MATHEMATICAL SC + {0x1D4C4, 0x0, propertyUNASSIGNED}, // + {0x1D4C5, 0x1D505, propertyDISALLOWED}, // MATHEMATICAL SCRIPT SMALL P..MATHEMATICAL FR + {0x1D506, 0x0, propertyUNASSIGNED}, // + {0x1D507, 0x1D50A, propertyDISALLOWED}, // MATHEMATICAL FRAKTUR CAPITAL D..MATHEMATICAL + {0x1D50B, 0x1D50C, propertyUNASSIGNED}, // .. + {0x1D50D, 0x1D514, propertyDISALLOWED}, // MATHEMATICAL FRAKTUR CAPITAL J..MATHEMATICAL + {0x1D515, 0x0, propertyUNASSIGNED}, // + {0x1D516, 0x1D51C, propertyDISALLOWED}, // MATHEMATICAL FRAKTUR CAPITAL S..MATHEMATICAL + {0x1D51D, 0x0, propertyUNASSIGNED}, // + {0x1D51E, 0x1D539, propertyDISALLOWED}, // MATHEMATICAL FRAKTUR SMALL A..MATHEMATICAL D + {0x1D53A, 0x0, propertyUNASSIGNED}, // + {0x1D53B, 0x1D53E, propertyDISALLOWED}, // MATHEMATICAL DOUBLE-STRUCK CAPITAL D..MATHEM + {0x1D53F, 0x0, propertyUNASSIGNED}, // + {0x1D540, 0x1D544, propertyDISALLOWED}, // MATHEMATICAL DOUBLE-STRUCK CAPITAL I..MATHEM + {0x1D545, 0x0, propertyUNASSIGNED}, // + {0x1D546, 0x0, propertyDISALLOWED}, // MATHEMATICAL DOUBLE-STRUCK CAPITAL O + {0x1D547, 0x1D549, propertyUNASSIGNED}, // .. + {0x1D54A, 0x1D550, propertyDISALLOWED}, // MATHEMATICAL DOUBLE-STRUCK CAPITAL S..MATHEM + {0x1D551, 0x0, propertyUNASSIGNED}, // + {0x1D552, 0x1D6A5, propertyDISALLOWED}, // MATHEMATICAL DOUBLE-STRUCK SMALL A..MATHEMAT + {0x1D6A6, 0x1D6A7, propertyUNASSIGNED}, // .. + {0x1D6A8, 0x1D7CB, propertyDISALLOWED}, // MATHEMATICAL BOLD CAPITAL ALPHA..MATHEMATICA + {0x1D7CC, 0x1D7CD, propertyUNASSIGNED}, // .. + {0x1D7CE, 0x1D7FF, propertyDISALLOWED}, // MATHEMATICAL BOLD DIGIT ZERO..MATHEMATICAL M + {0x1D800, 0x1EFFF, propertyUNASSIGNED}, // .. + {0x1F000, 0x1F02B, propertyDISALLOWED}, // MAHJONG TILE EAST WIND..MAHJONG TILE BACK + {0x1F02C, 0x1F02F, propertyUNASSIGNED}, // .. + {0x1F030, 0x1F093, propertyDISALLOWED}, // DOMINO TILE HORIZONTAL BACK..DOMINO TILE VER + {0x1F094, 0x1F0FF, propertyUNASSIGNED}, // .. + {0x1F100, 0x1F10A, propertyDISALLOWED}, // DIGIT ZERO FULL STOP..DIGIT NINE COMMA + {0x1F10B, 0x1F10F, propertyUNASSIGNED}, // .. + {0x1F110, 0x1F12E, propertyDISALLOWED}, // PARENTHESIZED LATIN CAPITAL LETTER A..CIRCLE + {0x1F12F, 0x1F130, propertyUNASSIGNED}, // .. + {0x1F131, 0x0, propertyDISALLOWED}, // SQUARED LATIN CAPITAL LETTER B + {0x1F132, 0x1F13C, propertyUNASSIGNED}, // .. + {0x1F13D, 0x0, propertyDISALLOWED}, // SQUARED LATIN CAPITAL LETTER N + {0x1F13E, 0x0, propertyUNASSIGNED}, // + {0x1F13F, 0x0, propertyDISALLOWED}, // SQUARED LATIN CAPITAL LETTER P + {0x1F140, 0x1F141, propertyUNASSIGNED}, // .. + {0x1F142, 0x0, propertyDISALLOWED}, // SQUARED LATIN CAPITAL LETTER S + {0x1F143, 0x1F145, propertyUNASSIGNED}, // .. + {0x1F146, 0x0, propertyDISALLOWED}, // SQUARED LATIN CAPITAL LETTER W + {0x1F147, 0x1F149, propertyUNASSIGNED}, // .. + {0x1F14A, 0x1F14E, propertyDISALLOWED}, // SQUARED HV..SQUARED PPV + {0x1F14F, 0x1F156, propertyUNASSIGNED}, // .. + {0x1F157, 0x0, propertyDISALLOWED}, // NEGATIVE CIRCLED LATIN CAPITAL LETTER H + {0x1F158, 0x1F15E, propertyUNASSIGNED}, // .. + {0x1F15F, 0x0, propertyDISALLOWED}, // NEGATIVE CIRCLED LATIN CAPITAL LETTER P + {0x1F160, 0x1F178, propertyUNASSIGNED}, // .. + {0x1F179, 0x0, propertyDISALLOWED}, // NEGATIVE SQUARED LATIN CAPITAL LETTER J + {0x1F17A, 0x0, propertyUNASSIGNED}, // + {0x1F17B, 0x1F17C, propertyDISALLOWED}, // NEGATIVE SQUARED LATIN CAPITAL LETTER L..NEG + {0x1F17D, 0x1F17E, propertyUNASSIGNED}, // .. + {0x1F17F, 0x0, propertyDISALLOWED}, // NEGATIVE SQUARED LATIN CAPITAL LETTER P + {0x1F180, 0x1F189, propertyUNASSIGNED}, // .. + {0x1F18A, 0x1F18D, propertyDISALLOWED}, // CROSSED NEGATIVE SQUARED LATIN CAPITAL LETTE + {0x1F18E, 0x1F18F, propertyUNASSIGNED}, // .. + {0x1F190, 0x0, propertyDISALLOWED}, // SQUARE DJ + {0x1F191, 0x1F1FF, propertyUNASSIGNED}, // .. + {0x1F200, 0x0, propertyDISALLOWED}, // SQUARE HIRAGANA HOKA + {0x1F201, 0x1F20F, propertyUNASSIGNED}, // .. + {0x1F210, 0x1F231, propertyDISALLOWED}, // SQUARED CJK UNIFIED IDEOGRAPH-624B..SQUARED + {0x1F232, 0x1F23F, propertyUNASSIGNED}, // .. + {0x1F240, 0x1F248, propertyDISALLOWED}, // TORTOISE SHELL BRACKETED CJK UNIFIED IDEOGRA + {0x1F249, 0x1FFFD, propertyUNASSIGNED}, // .. + {0x1FFFE, 0x1FFFF, propertyDISALLOWED}, // .. + {0x20000, 0x2A6D6, propertyPVALID}, // .... + {0x2A700, 0x2B734, propertyPVALID}, // .... + {0x2F800, 0x2FA1D, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-2F800..CJK COMPA + {0x2FA1E, 0x2FFFD, propertyUNASSIGNED}, // .. + {0x2FFFE, 0x2FFFF, propertyDISALLOWED}, // .. + {0x30000, 0x3FFFD, propertyUNASSIGNED}, // .. + {0x3FFFE, 0x3FFFF, propertyDISALLOWED}, // .. + {0x40000, 0x4FFFD, propertyUNASSIGNED}, // .. + {0x4FFFE, 0x4FFFF, propertyDISALLOWED}, // .. + {0x50000, 0x5FFFD, propertyUNASSIGNED}, // .. + {0x5FFFE, 0x5FFFF, propertyDISALLOWED}, // .. + {0x60000, 0x6FFFD, propertyUNASSIGNED}, // .. + {0x6FFFE, 0x6FFFF, propertyDISALLOWED}, // .. + {0x70000, 0x7FFFD, propertyUNASSIGNED}, // .. + {0x7FFFE, 0x7FFFF, propertyDISALLOWED}, // .. + {0x80000, 0x8FFFD, propertyUNASSIGNED}, // .. + {0x8FFFE, 0x8FFFF, propertyDISALLOWED}, // .. + {0x90000, 0x9FFFD, propertyUNASSIGNED}, // .. + {0x9FFFE, 0x9FFFF, propertyDISALLOWED}, // .. + {0xA0000, 0xAFFFD, propertyUNASSIGNED}, // .. + {0xAFFFE, 0xAFFFF, propertyDISALLOWED}, // .. + {0xB0000, 0xBFFFD, propertyUNASSIGNED}, // .. + {0xBFFFE, 0xBFFFF, propertyDISALLOWED}, // .. + {0xC0000, 0xCFFFD, propertyUNASSIGNED}, // .. + {0xCFFFE, 0xCFFFF, propertyDISALLOWED}, // .. + {0xD0000, 0xDFFFD, propertyUNASSIGNED}, // .. + {0xDFFFE, 0xDFFFF, propertyDISALLOWED}, // .. + {0xE0000, 0x0, propertyUNASSIGNED}, // + {0xE0001, 0x0, propertyDISALLOWED}, // LANGUAGE TAG + {0xE0002, 0xE001F, propertyUNASSIGNED}, // .. + {0xE0020, 0xE007F, propertyDISALLOWED}, // TAG SPACE..CANCEL TAG + {0xE0080, 0xE00FF, propertyUNASSIGNED}, // .. + {0xE0100, 0xE01EF, propertyDISALLOWED}, // VARIATION SELECTOR-17..VARIATION SELECTOR-25 + {0xE01F0, 0xEFFFD, propertyUNASSIGNED}, // .. + {0xEFFFE, 0x10FFFF, propertyDISALLOWED}, // .. +} diff --git a/vendor/github.com/miekg/dns/idn/example_test.go b/vendor/github.com/miekg/dns/idn/example_test.go new file mode 100644 index 000000000000..8833cd91de13 --- /dev/null +++ b/vendor/github.com/miekg/dns/idn/example_test.go @@ -0,0 +1,18 @@ +package idn_test + +import ( + "fmt" + "github.com/miekg/dns/idn" +) + +func ExampleToPunycode() { + name := "インターネット.テスト" + fmt.Printf("%s -> %s", name, idn.ToPunycode(name)) + // Output: インターネット.テスト -> xn--eckucmux0ukc.xn--zckzah +} + +func ExampleFromPunycode() { + name := "xn--mgbaja8a1hpac.xn--mgbachtv" + fmt.Printf("%s -> %s", name, idn.FromPunycode(name)) + // Output: xn--mgbaja8a1hpac.xn--mgbachtv -> الانترنت.اختبار +} diff --git a/vendor/github.com/miekg/dns/idn/punycode.go b/vendor/github.com/miekg/dns/idn/punycode.go new file mode 100644 index 000000000000..7e5c263fc858 --- /dev/null +++ b/vendor/github.com/miekg/dns/idn/punycode.go @@ -0,0 +1,373 @@ +// Package idn implements encoding from and to punycode as speficied by RFC 3492. +package idn + +import ( + "bytes" + "strings" + "unicode" + "unicode/utf8" + + "github.com/miekg/dns" +) + +// Implementation idea from RFC itself and from from IDNA::Punycode created by +// Tatsuhiko Miyagawa and released under Perl Artistic +// License in 2002. + +const ( + _MIN rune = 1 + _MAX rune = 26 + _SKEW rune = 38 + _BASE rune = 36 + _BIAS rune = 72 + _N rune = 128 + _DAMP rune = 700 + + _DELIMITER = '-' + _PREFIX = "xn--" +) + +// ToPunycode converts unicode domain names to DNS-appropriate punycode names. +// This function will return an empty string result for domain names with +// invalid unicode strings. This function expects domain names in lowercase. +func ToPunycode(s string) string { + // Early check to see if encoding is needed. + // This will prevent making heap allocations when not needed. + if !needToPunycode(s) { + return s + } + + tokens := dns.SplitDomainName(s) + switch { + case s == "": + return "" + case tokens == nil: // s == . + return "." + case s[len(s)-1] == '.': + tokens = append(tokens, "") + } + + for i := range tokens { + t := encode([]byte(tokens[i])) + if t == nil { + return "" + } + tokens[i] = string(t) + } + return strings.Join(tokens, ".") +} + +// FromPunycode returns unicode domain name from provided punycode string. +// This function expects punycode strings in lowercase. +func FromPunycode(s string) string { + // Early check to see if decoding is needed. + // This will prevent making heap allocations when not needed. + if !needFromPunycode(s) { + return s + } + + tokens := dns.SplitDomainName(s) + switch { + case s == "": + return "" + case tokens == nil: // s == . + return "." + case s[len(s)-1] == '.': + tokens = append(tokens, "") + } + for i := range tokens { + tokens[i] = string(decode([]byte(tokens[i]))) + } + return strings.Join(tokens, ".") +} + +// digitval converts single byte into meaningful value that's used to calculate decoded unicode character. +const errdigit = 0xffff + +func digitval(code rune) rune { + switch { + case code >= 'A' && code <= 'Z': + return code - 'A' + case code >= 'a' && code <= 'z': + return code - 'a' + case code >= '0' && code <= '9': + return code - '0' + 26 + } + return errdigit +} + +// lettercode finds BASE36 byte (a-z0-9) based on calculated number. +func lettercode(digit rune) rune { + switch { + case digit >= 0 && digit <= 25: + return digit + 'a' + case digit >= 26 && digit <= 36: + return digit - 26 + '0' + } + panic("dns: not reached") +} + +// adapt calculates next bias to be used for next iteration delta. +func adapt(delta rune, numpoints int, firsttime bool) rune { + if firsttime { + delta /= _DAMP + } else { + delta /= 2 + } + + var k rune + for delta = delta + delta/rune(numpoints); delta > (_BASE-_MIN)*_MAX/2; k += _BASE { + delta /= _BASE - _MIN + } + + return k + ((_BASE-_MIN+1)*delta)/(delta+_SKEW) +} + +// next finds minimal rune (one with lowest codepoint value) that should be equal or above boundary. +func next(b []rune, boundary rune) rune { + if len(b) == 0 { + panic("dns: invalid set of runes to determine next one") + } + m := b[0] + for _, x := range b[1:] { + if x >= boundary && (m < boundary || x < m) { + m = x + } + } + return m +} + +// preprune converts unicode rune to lower case. At this time it's not +// supporting all things described in RFCs. +func preprune(r rune) rune { + if unicode.IsUpper(r) { + r = unicode.ToLower(r) + } + return r +} + +// tfunc is a function that helps calculate each character weight. +func tfunc(k, bias rune) rune { + switch { + case k <= bias: + return _MIN + case k >= bias+_MAX: + return _MAX + } + return k - bias +} + +// needToPunycode returns true for strings that require punycode encoding +// (contain unicode characters). +func needToPunycode(s string) bool { + // This function is very similar to bytes.Runes. We don't use bytes.Runes + // because it makes a heap allocation that's not needed here. + for i := 0; len(s) > 0; i++ { + r, l := utf8.DecodeRuneInString(s) + if r > 0x7f { + return true + } + s = s[l:] + } + return false +} + +// needFromPunycode returns true for strings that require punycode decoding. +func needFromPunycode(s string) bool { + if s == "." { + return false + } + + off := 0 + end := false + pl := len(_PREFIX) + sl := len(s) + + // If s starts with _PREFIX. + if sl > pl && s[off:off+pl] == _PREFIX { + return true + } + + for { + // Find the part after the next ".". + off, end = dns.NextLabel(s, off) + if end { + return false + } + // If this parts starts with _PREFIX. + if sl-off > pl && s[off:off+pl] == _PREFIX { + return true + } + } +} + +// encode transforms Unicode input bytes (that represent DNS label) into +// punycode bytestream. This function would return nil if there's an invalid +// character in the label. +func encode(input []byte) []byte { + n, bias := _N, _BIAS + + b := bytes.Runes(input) + for i := range b { + if !isValidRune(b[i]) { + return nil + } + + b[i] = preprune(b[i]) + } + + basic := make([]byte, 0, len(b)) + for _, ltr := range b { + if ltr <= 0x7f { + basic = append(basic, byte(ltr)) + } + } + basiclen := len(basic) + fulllen := len(b) + if basiclen == fulllen { + return basic + } + + var out bytes.Buffer + + out.WriteString(_PREFIX) + if basiclen > 0 { + out.Write(basic) + out.WriteByte(_DELIMITER) + } + + var ( + ltr, nextltr rune + delta, q rune // delta calculation (see rfc) + t, k, cp rune // weight and codepoint calculation + ) + + s := &bytes.Buffer{} + for h := basiclen; h < fulllen; n, delta = n+1, delta+1 { + nextltr = next(b, n) + s.Truncate(0) + s.WriteRune(nextltr) + delta, n = delta+(nextltr-n)*rune(h+1), nextltr + + for _, ltr = range b { + if ltr < n { + delta++ + } + if ltr == n { + q = delta + for k = _BASE; ; k += _BASE { + t = tfunc(k, bias) + if q < t { + break + } + cp = t + ((q - t) % (_BASE - t)) + out.WriteRune(lettercode(cp)) + q = (q - t) / (_BASE - t) + } + + out.WriteRune(lettercode(q)) + + bias = adapt(delta, h+1, h == basiclen) + h, delta = h+1, 0 + } + } + } + return out.Bytes() +} + +// decode transforms punycode input bytes (that represent DNS label) into Unicode bytestream. +func decode(b []byte) []byte { + src := b // b would move and we need to keep it + + n, bias := _N, _BIAS + if !bytes.HasPrefix(b, []byte(_PREFIX)) { + return b + } + out := make([]rune, 0, len(b)) + b = b[len(_PREFIX):] + for pos := len(b) - 1; pos >= 0; pos-- { + // only last delimiter is our interest + if b[pos] == _DELIMITER { + out = append(out, bytes.Runes(b[:pos])...) + b = b[pos+1:] // trim source string + break + } + } + if len(b) == 0 { + return src + } + var ( + i, oldi, w rune + ch byte + t, digit rune + ln int + ) + + for i = 0; len(b) > 0; i++ { + oldi, w = i, 1 + for k := _BASE; len(b) > 0; k += _BASE { + ch, b = b[0], b[1:] + digit = digitval(rune(ch)) + if digit == errdigit { + return src + } + i += digit * w + if i < 0 { + // safety check for rune overflow + return src + } + + t = tfunc(k, bias) + if digit < t { + break + } + + w *= _BASE - t + } + ln = len(out) + 1 + bias = adapt(i-oldi, ln, oldi == 0) + n += i / rune(ln) + i = i % rune(ln) + // insert + out = append(out, 0) + copy(out[i+1:], out[i:]) + out[i] = n + } + + var ret bytes.Buffer + for _, r := range out { + ret.WriteRune(r) + } + return ret.Bytes() +} + +// isValidRune checks if the character is valid. We will look for the +// character property in the code points list. For now we aren't checking special +// rules in case of contextual property +func isValidRune(r rune) bool { + return findProperty(r) == propertyPVALID +} + +// findProperty will try to check the code point property of the given +// character. It will use a binary search algorithm as we have a slice of +// ordered ranges (average case performance O(log n)) +func findProperty(r rune) property { + imin, imax := 0, len(codePoints) + + for imax >= imin { + imid := (imin + imax) / 2 + + codePoint := codePoints[imid] + if (codePoint.start == r && codePoint.end == 0) || (codePoint.start <= r && codePoint.end >= r) { + return codePoint.state + } + + if (codePoint.end > 0 && codePoint.end < r) || (codePoint.end == 0 && codePoint.start < r) { + imin = imid + 1 + } else { + imax = imid - 1 + } + } + + return propertyUnknown +} diff --git a/vendor/github.com/miekg/dns/idn/punycode_test.go b/vendor/github.com/miekg/dns/idn/punycode_test.go new file mode 100644 index 000000000000..9c9a15f0ba1d --- /dev/null +++ b/vendor/github.com/miekg/dns/idn/punycode_test.go @@ -0,0 +1,116 @@ +package idn + +import ( + "strings" + "testing" +) + +var testcases = [][2]string{ + {"", ""}, + {"a", "a"}, + {"a-b", "a-b"}, + {"a-b-c", "a-b-c"}, + {"abc", "abc"}, + {"я", "xn--41a"}, + {"zя", "xn--z-0ub"}, + {"яZ", "xn--z-zub"}, + {"а-я", "xn----7sb8g"}, + {"إختبار", "xn--kgbechtv"}, + {"آزمایشی", "xn--hgbk6aj7f53bba"}, + {"测试", "xn--0zwm56d"}, + {"測試", "xn--g6w251d"}, + {"испытание", "xn--80akhbyknj4f"}, + {"परीक्षा", "xn--11b5bs3a9aj6g"}, + {"δοκιμή", "xn--jxalpdlp"}, + {"테스트", "xn--9t4b11yi5a"}, + {"טעסט", "xn--deba0ad"}, + {"テスト", "xn--zckzah"}, + {"பரிட்சை", "xn--hlcj6aya9esc7a"}, + {"mamão-com-açúcar", "xn--mamo-com-acar-yeb1e6q"}, + {"σ", "xn--4xa"}, +} + +func TestEncodeDecodePunycode(t *testing.T) { + for _, tst := range testcases { + enc := encode([]byte(tst[0])) + if string(enc) != tst[1] { + t.Errorf("%s encodeded as %s but should be %s", tst[0], enc, tst[1]) + } + dec := decode([]byte(tst[1])) + if string(dec) != strings.ToLower(tst[0]) { + t.Errorf("%s decoded as %s but should be %s", tst[1], dec, strings.ToLower(tst[0])) + } + } +} + +func TestToFromPunycode(t *testing.T) { + for _, tst := range testcases { + // assert unicode.com == punycode.com + full := ToPunycode(tst[0] + ".com") + if full != tst[1]+".com" { + t.Errorf("invalid result from string conversion to punycode, %s and should be %s.com", full, tst[1]) + } + // assert punycode.punycode == unicode.unicode + decoded := FromPunycode(tst[1] + "." + tst[1]) + if decoded != strings.ToLower(tst[0]+"."+tst[0]) { + t.Errorf("invalid result from string conversion to punycode, %s and should be %s.%s", decoded, tst[0], tst[0]) + } + } +} + +func TestEncodeDecodeFinalPeriod(t *testing.T) { + for _, tst := range testcases { + // assert unicode.com. == punycode.com. + full := ToPunycode(tst[0] + ".") + if full != tst[1]+"." { + t.Errorf("invalid result from string conversion to punycode when period added at the end, %#v and should be %#v", full, tst[1]+".") + } + // assert punycode.com. == unicode.com. + decoded := FromPunycode(tst[1] + ".") + if decoded != strings.ToLower(tst[0]+".") { + t.Errorf("invalid result from string conversion to punycode when period added, %#v and should be %#v", decoded, tst[0]+".") + } + full = ToPunycode(tst[0]) + if full != tst[1] { + t.Errorf("invalid result from string conversion to punycode when no period added at the end, %#v and should be %#v", full, tst[1]+".") + } + // assert punycode.com. == unicode.com. + decoded = FromPunycode(tst[1]) + if decoded != strings.ToLower(tst[0]) { + t.Errorf("invalid result from string conversion to punycode when no period added, %#v and should be %#v", decoded, tst[0]+".") + } + } +} + +var invalidACEs = []string{ + "xn--*", + "xn--", + "xn---", + "xn--a000000000", +} + +func TestInvalidPunycode(t *testing.T) { + for _, d := range invalidACEs { + s := FromPunycode(d) + if s != d { + t.Errorf("Changed invalid name %s to %#v", d, s) + } + } +} + +// You can verify the labels that are valid or not comparing to the Verisign +// website: http://mct.verisign-grs.com/ +var invalidUnicodes = []string{ + "Σ", + "ЯZ", + "Испытание", +} + +func TestInvalidUnicodes(t *testing.T) { + for _, d := range invalidUnicodes { + s := ToPunycode(d) + if s != "" { + t.Errorf("Changed invalid name %s to %#v", d, s) + } + } +} diff --git a/vendor/github.com/miekg/dns/issue_test.go b/vendor/github.com/miekg/dns/issue_test.go index 7299d3143b5c..3025fc98cb10 100644 --- a/vendor/github.com/miekg/dns/issue_test.go +++ b/vendor/github.com/miekg/dns/issue_test.go @@ -2,10 +2,7 @@ package dns // Tests that solve that an specific issue. -import ( - "strings" - "testing" -) +import "testing" func TestTCPRtt(t *testing.T) { m := new(Msg) @@ -24,39 +21,3 @@ func TestTCPRtt(t *testing.T) { } } } - -func TestNSEC3MissingSalt(t *testing.T) { - rr := testRR("ji6neoaepv8b5o6k4ev33abha8ht9fgc.example. NSEC3 1 1 12 aabbccdd K8UDEMVP1J2F7EG6JEBPS17VP3N8I58H") - m := new(Msg) - m.Answer = []RR{rr} - mb, err := m.Pack() - if err != nil { - t.Fatalf("expected to pack message. err: %s", err) - } - if err := m.Unpack(mb); err != nil { - t.Fatalf("expected to unpack message. missing salt? err: %s", err) - } - in := rr.(*NSEC3).Salt - out := m.Answer[0].(*NSEC3).Salt - if in != out { - t.Fatalf("expected salts to match. packed: `%s`. returned: `%s`", in, out) - } -} - -func TestNSEC3MixedNextDomain(t *testing.T) { - rr := testRR("ji6neoaepv8b5o6k4ev33abha8ht9fgc.example. NSEC3 1 1 12 - k8udemvp1j2f7eg6jebps17vp3n8i58h") - m := new(Msg) - m.Answer = []RR{rr} - mb, err := m.Pack() - if err != nil { - t.Fatalf("expected to pack message. err: %s", err) - } - if err := m.Unpack(mb); err != nil { - t.Fatalf("expected to unpack message. err: %s", err) - } - in := strings.ToUpper(rr.(*NSEC3).NextDomain) - out := m.Answer[0].(*NSEC3).NextDomain - if in != out { - t.Fatalf("expected round trip to produce NextDomain `%s`, instead `%s`", in, out) - } -} diff --git a/vendor/github.com/miekg/dns/labels.go b/vendor/github.com/miekg/dns/labels.go index 760b89e7114f..fca5c7dd2d15 100644 --- a/vendor/github.com/miekg/dns/labels.go +++ b/vendor/github.com/miekg/dns/labels.go @@ -42,7 +42,7 @@ func SplitDomainName(s string) (labels []string) { // CompareDomainName compares the names s1 and s2 and // returns how many labels they have in common starting from the *right*. -// The comparison stops at the first inequality. The names are downcased +// The comparison stops at the first inequality. The names are not downcased // before the comparison. // // www.miek.nl. and miek.nl. have two labels in common: miek and nl @@ -50,21 +50,23 @@ func SplitDomainName(s string) (labels []string) { // // s1 and s2 must be syntactically valid domain names. func CompareDomainName(s1, s2 string) (n int) { - // the first check: root label - if s1 == "." || s2 == "." { - return 0 - } - + s1 = Fqdn(s1) + s2 = Fqdn(s2) l1 := Split(s1) l2 := Split(s2) + // the first check: root label + if l1 == nil || l2 == nil { + return + } + j1 := len(l1) - 1 // end i1 := len(l1) - 2 // start j2 := len(l2) - 1 i2 := len(l2) - 2 // the second check can be done here: last/only label // before we fall through into the for-loop below - if equal(s1[l1[j1]:], s2[l2[j2]:]) { + if s1[l1[j1]:] == s2[l2[j2]:] { n++ } else { return @@ -73,7 +75,7 @@ func CompareDomainName(s1, s2 string) (n int) { if i1 < 0 || i2 < 0 { break } - if equal(s1[l1[i1]:l1[j1]], s2[l2[i2]:l2[j2]]) { + if s1[l1[i1]:l1[j1]] == s2[l2[i2]:l2[j2]] { n++ } else { break @@ -164,28 +166,3 @@ func PrevLabel(s string, n int) (i int, start bool) { } return lab[len(lab)-n], false } - -// equal compares a and b while ignoring case. It returns true when equal otherwise false. -func equal(a, b string) bool { - // might be lifted into API function. - la := len(a) - lb := len(b) - if la != lb { - return false - } - - for i := la - 1; i >= 0; i-- { - ai := a[i] - bi := b[i] - if ai >= 'A' && ai <= 'Z' { - ai |= ('a' - 'A') - } - if bi >= 'A' && bi <= 'Z' { - bi |= ('a' - 'A') - } - if ai != bi { - return false - } - } - return true -} diff --git a/vendor/github.com/miekg/dns/labels_test.go b/vendor/github.com/miekg/dns/labels_test.go index d9bb556df7b4..536757d52f77 100644 --- a/vendor/github.com/miekg/dns/labels_test.go +++ b/vendor/github.com/miekg/dns/labels_test.go @@ -7,8 +7,8 @@ func TestCompareDomainName(t *testing.T) { s2 := "miek.nl." s3 := "www.bla.nl." s4 := "nl.www.bla." - s5 := "nl." - s6 := "miek.nl." + s5 := "nl" + s6 := "miek.nl" if CompareDomainName(s1, s2) != 2 { t.Errorf("%s with %s should be %d", s1, s2, 2) @@ -33,9 +33,6 @@ func TestCompareDomainName(t *testing.T) { if CompareDomainName(".", ".") != 0 { t.Errorf("%s with %s should be %d", ".", ".", 0) } - if CompareDomainName("test.com.", "TEST.COM.") != 2 { - t.Errorf("test.com. and TEST.COM. should be an exact match") - } } func TestSplit(t *testing.T) { @@ -54,6 +51,8 @@ func TestSplit(t *testing.T) { for s, i := range splitter { if x := len(Split(s)); x != i { t.Errorf("labels should be %d, got %d: %s %v", i, x, s, Split(s)) + } else { + t.Logf("%s %v", s, Split(s)) } } } @@ -85,19 +84,19 @@ func TestPrevLabel(t *testing.T) { int } prever := map[prev]int{ - {"www.miek.nl.", 0}: 12, - {"www.miek.nl.", 1}: 9, - {"www.miek.nl.", 2}: 4, + prev{"www.miek.nl.", 0}: 12, + prev{"www.miek.nl.", 1}: 9, + prev{"www.miek.nl.", 2}: 4, - {"www.miek.nl", 0}: 11, - {"www.miek.nl", 1}: 9, - {"www.miek.nl", 2}: 4, + prev{"www.miek.nl", 0}: 11, + prev{"www.miek.nl", 1}: 9, + prev{"www.miek.nl", 2}: 4, - {"www.miek.nl.", 5}: 0, - {"www.miek.nl", 5}: 0, + prev{"www.miek.nl.", 5}: 0, + prev{"www.miek.nl", 5}: 0, - {"www.miek.nl.", 3}: 0, - {"www.miek.nl", 3}: 0, + prev{"www.miek.nl.", 3}: 0, + prev{"www.miek.nl", 3}: 0, } for s, i := range prever { x, ok := PrevLabel(s.string, s.int) @@ -174,28 +173,28 @@ func TestIsDomainName(t *testing.T) { func BenchmarkSplitLabels(b *testing.B) { for i := 0; i < b.N; i++ { - Split("www.example.com.") + Split("www.example.com") } } func BenchmarkLenLabels(b *testing.B) { for i := 0; i < b.N; i++ { - CountLabel("www.example.com.") + CountLabel("www.example.com") } } -func BenchmarkCompareDomainName(b *testing.B) { +func BenchmarkCompareLabels(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { - CompareDomainName("www.example.com.", "aa.example.com.") + CompareDomainName("www.example.com", "aa.example.com") } } func BenchmarkIsSubDomain(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { - IsSubDomain("www.example.com.", "aa.example.com.") - IsSubDomain("example.com.", "aa.example.com.") - IsSubDomain("miek.nl.", "aa.example.com.") + IsSubDomain("www.example.com", "aa.example.com") + IsSubDomain("example.com", "aa.example.com") + IsSubDomain("miek.nl", "aa.example.com") } } diff --git a/vendor/github.com/miekg/dns/leak_test.go b/vendor/github.com/miekg/dns/leak_test.go deleted file mode 100644 index ff83ac74b18d..000000000000 --- a/vendor/github.com/miekg/dns/leak_test.go +++ /dev/null @@ -1,72 +0,0 @@ -package dns - -import ( - "fmt" - "os" - "runtime" - "sort" - "strings" - "testing" - "time" -) - -// copied from net/http/main_test.go - -func interestingGoroutines() (gs []string) { - buf := make([]byte, 2<<20) - buf = buf[:runtime.Stack(buf, true)] - for _, g := range strings.Split(string(buf), "\n\n") { - sl := strings.SplitN(g, "\n", 2) - if len(sl) != 2 { - continue - } - stack := strings.TrimSpace(sl[1]) - if stack == "" || - strings.Contains(stack, "testing.(*M).before.func1") || - strings.Contains(stack, "os/signal.signal_recv") || - strings.Contains(stack, "created by net.startServer") || - strings.Contains(stack, "created by testing.RunTests") || - strings.Contains(stack, "closeWriteAndWait") || - strings.Contains(stack, "testing.Main(") || - strings.Contains(stack, "testing.(*T).Run(") || - strings.Contains(stack, "created by net/http.(*http2Transport).newClientConn") || - // These only show up with GOTRACEBACK=2; Issue 5005 (comment 28) - strings.Contains(stack, "runtime.goexit") || - strings.Contains(stack, "created by runtime.gc") || - strings.Contains(stack, "dns.interestingGoroutines") || - strings.Contains(stack, "runtime.MHeap_Scavenger") { - continue - } - gs = append(gs, stack) - } - sort.Strings(gs) - return -} - -func goroutineLeaked() error { - if testing.Short() { - // Don't worry about goroutine leaks in -short mode or in - // benchmark mode. Too distracting when there are false positives. - return nil - } - - var stackCount map[string]int - for i := 0; i < 5; i++ { - n := 0 - stackCount = make(map[string]int) - gs := interestingGoroutines() - for _, g := range gs { - stackCount[g]++ - n++ - } - if n == 0 { - return nil - } - // Wait for goroutines to schedule and die off: - time.Sleep(100 * time.Millisecond) - } - for stack, count := range stackCount { - fmt.Fprintf(os.Stderr, "%d instances of:\n%s\n", count, stack) - } - return fmt.Errorf("too many goroutines running after dns test(s)") -} diff --git a/vendor/github.com/miekg/dns/length_test.go b/vendor/github.com/miekg/dns/length_test.go deleted file mode 100644 index 66521a5f25a9..000000000000 --- a/vendor/github.com/miekg/dns/length_test.go +++ /dev/null @@ -1,371 +0,0 @@ -package dns - -import ( - "encoding/hex" - "fmt" - "net" - "reflect" - "strings" - "testing" -) - -func TestCompressLength(t *testing.T) { - m := new(Msg) - m.SetQuestion("miek.nl", TypeMX) - ul := m.Len() - m.Compress = true - if ul != m.Len() { - t.Fatalf("should be equal") - } -} - -// Does the predicted length match final packed length? -func TestMsgCompressLength(t *testing.T) { - makeMsg := func(question string, ans, ns, e []RR) *Msg { - msg := new(Msg) - msg.SetQuestion(Fqdn(question), TypeANY) - msg.Answer = append(msg.Answer, ans...) - msg.Ns = append(msg.Ns, ns...) - msg.Extra = append(msg.Extra, e...) - msg.Compress = true - return msg - } - - name1 := "12345678901234567890123456789012345.12345678.123." - rrA := testRR(name1 + " 3600 IN A 192.0.2.1") - rrMx := testRR(name1 + " 3600 IN MX 10 " + name1) - tests := []*Msg{ - makeMsg(name1, []RR{rrA}, nil, nil), - makeMsg(name1, []RR{rrMx, rrMx}, nil, nil)} - - for _, msg := range tests { - predicted := msg.Len() - buf, err := msg.Pack() - if err != nil { - t.Error(err) - } - if predicted < len(buf) { - t.Errorf("predicted compressed length is wrong: predicted %s (len=%d) %d, actual %d", - msg.Question[0].Name, len(msg.Answer), predicted, len(buf)) - } - } -} - -func TestMsgLength(t *testing.T) { - makeMsg := func(question string, ans, ns, e []RR) *Msg { - msg := new(Msg) - msg.Compress = true - msg.SetQuestion(Fqdn(question), TypeANY) - msg.Answer = append(msg.Answer, ans...) - msg.Ns = append(msg.Ns, ns...) - msg.Extra = append(msg.Extra, e...) - return msg - } - - name1 := "12345678901234567890123456789012345.12345678.123." - rrA := testRR(name1 + " 3600 IN A 192.0.2.1") - rrMx := testRR(name1 + " 3600 IN MX 10 " + name1) - tests := []*Msg{ - makeMsg(name1, []RR{rrA}, nil, nil), - makeMsg(name1, []RR{rrMx, rrMx}, nil, nil)} - - for _, msg := range tests { - predicted := msg.Len() - buf, err := msg.Pack() - if err != nil { - t.Error(err) - } - if predicted < len(buf) { - t.Errorf("predicted length is wrong: predicted %s (len=%d), actual %d", - msg.Question[0].Name, predicted, len(buf)) - } - } -} - -func TestCompressionLenHelper(t *testing.T) { - c := make(map[string]int) - compressionLenHelper(c, "example.com", 12) - if c["example.com"] != 12 { - t.Errorf("bad %d", c["example.com"]) - } - if c["com"] != 20 { - t.Errorf("bad %d", c["com"]) - } - - // Test boundaries - c = make(map[string]int) - // foo label starts at 16379 - // com label starts at 16384 - compressionLenHelper(c, "foo.com", 16379) - if c["foo.com"] != 16379 { - t.Errorf("bad %d", c["foo.com"]) - } - // com label is accessible - if c["com"] != 16383 { - t.Errorf("bad %d", c["com"]) - } - - c = make(map[string]int) - // foo label starts at 16379 - // com label starts at 16385 => outside range - compressionLenHelper(c, "foo.com", 16380) - if c["foo.com"] != 16380 { - t.Errorf("bad %d", c["foo.com"]) - } - // com label is NOT accessible - if c["com"] != 0 { - t.Errorf("bad %d", c["com"]) - } - - c = make(map[string]int) - compressionLenHelper(c, "example.com", 16375) - if c["example.com"] != 16375 { - t.Errorf("bad %d", c["example.com"]) - } - // com starts AFTER 16384 - if c["com"] != 16383 { - t.Errorf("bad %d", c["com"]) - } - - c = make(map[string]int) - compressionLenHelper(c, "example.com", 16376) - if c["example.com"] != 16376 { - t.Errorf("bad %d", c["example.com"]) - } - // com starts AFTER 16384 - if c["com"] != 0 { - t.Errorf("bad %d", c["com"]) - } -} - -func TestCompressionLenSearch(t *testing.T) { - c := make(map[string]int) - compressed, ok, fullSize := compressionLenSearch(c, "a.b.org.") - if compressed != 0 || ok || fullSize != 14 { - panic(fmt.Errorf("Failed: compressed:=%d, ok:=%v, fullSize:=%d", compressed, ok, fullSize)) - } - c["org."] = 3 - compressed, ok, fullSize = compressionLenSearch(c, "a.b.org.") - if compressed != 4 || !ok || fullSize != 8 { - panic(fmt.Errorf("Failed: compressed:=%d, ok:=%v, fullSize:=%d", compressed, ok, fullSize)) - } - c["b.org."] = 5 - compressed, ok, fullSize = compressionLenSearch(c, "a.b.org.") - if compressed != 6 || !ok || fullSize != 4 { - panic(fmt.Errorf("Failed: compressed:=%d, ok:=%v, fullSize:=%d", compressed, ok, fullSize)) - } - // Not found long compression - c["x.b.org."] = 5 - compressed, ok, fullSize = compressionLenSearch(c, "a.b.org.") - if compressed != 6 || !ok || fullSize != 4 { - panic(fmt.Errorf("Failed: compressed:=%d, ok:=%v, fullSize:=%d", compressed, ok, fullSize)) - } - // Found long compression - c["a.b.org."] = 5 - compressed, ok, fullSize = compressionLenSearch(c, "a.b.org.") - if compressed != 8 || !ok || fullSize != 0 { - panic(fmt.Errorf("Failed: compressed:=%d, ok:=%v, fullSize:=%d", compressed, ok, fullSize)) - } -} - -func TestMsgLength2(t *testing.T) { - // Serialized replies - var testMessages = []string{ - // google.com. IN A? - "064e81800001000b0004000506676f6f676c6503636f6d0000010001c00c00010001000000050004adc22986c00c00010001000000050004adc22987c00c00010001000000050004adc22988c00c00010001000000050004adc22989c00c00010001000000050004adc2298ec00c00010001000000050004adc22980c00c00010001000000050004adc22981c00c00010001000000050004adc22982c00c00010001000000050004adc22983c00c00010001000000050004adc22984c00c00010001000000050004adc22985c00c00020001000000050006036e7331c00cc00c00020001000000050006036e7332c00cc00c00020001000000050006036e7333c00cc00c00020001000000050006036e7334c00cc0d800010001000000050004d8ef200ac0ea00010001000000050004d8ef220ac0fc00010001000000050004d8ef240ac10e00010001000000050004d8ef260a0000290500000000050000", - // amazon.com. IN A? (reply has no EDNS0 record) - // TODO(miek): this one is off-by-one, need to find out why - //"6de1818000010004000a000806616d617a6f6e03636f6d0000010001c00c000100010000000500044815c2d4c00c000100010000000500044815d7e8c00c00010001000000050004b02062a6c00c00010001000000050004cdfbf236c00c000200010000000500140570646e733408756c747261646e73036f726700c00c000200010000000500150570646e733508756c747261646e7304696e666f00c00c000200010000000500160570646e733608756c747261646e7302636f02756b00c00c00020001000000050014036e7331037033310664796e656374036e657400c00c00020001000000050006036e7332c0cfc00c00020001000000050006036e7333c0cfc00c00020001000000050006036e7334c0cfc00c000200010000000500110570646e733108756c747261646e73c0dac00c000200010000000500080570646e7332c127c00c000200010000000500080570646e7333c06ec0cb00010001000000050004d04e461fc0eb00010001000000050004cc0dfa1fc0fd00010001000000050004d04e471fc10f00010001000000050004cc0dfb1fc12100010001000000050004cc4a6c01c121001c000100000005001020010502f3ff00000000000000000001c13e00010001000000050004cc4a6d01c13e001c0001000000050010261000a1101400000000000000000001", - // yahoo.com. IN A? - "fc2d81800001000300070008057961686f6f03636f6d0000010001c00c00010001000000050004628afd6dc00c00010001000000050004628bb718c00c00010001000000050004cebe242dc00c00020001000000050006036e7336c00cc00c00020001000000050006036e7338c00cc00c00020001000000050006036e7331c00cc00c00020001000000050006036e7332c00cc00c00020001000000050006036e7333c00cc00c00020001000000050006036e7334c00cc00c00020001000000050006036e7335c00cc07b0001000100000005000444b48310c08d00010001000000050004448eff10c09f00010001000000050004cb54dd35c0b100010001000000050004628a0b9dc0c30001000100000005000477a0f77cc05700010001000000050004ca2bdfaac06900010001000000050004caa568160000290500000000050000", - // microsoft.com. IN A? - "f4368180000100020005000b096d6963726f736f667403636f6d0000010001c00c0001000100000005000440040b25c00c0001000100000005000441373ac9c00c0002000100000005000e036e7331046d736674036e657400c00c00020001000000050006036e7332c04fc00c00020001000000050006036e7333c04fc00c00020001000000050006036e7334c04fc00c00020001000000050006036e7335c04fc04b000100010000000500044137253ec04b001c00010000000500102a010111200500000000000000010001c0650001000100000005000440043badc065001c00010000000500102a010111200600060000000000010001c07700010001000000050004d5c7b435c077001c00010000000500102a010111202000000000000000010001c08900010001000000050004cf2e4bfec089001c00010000000500102404f800200300000000000000010001c09b000100010000000500044137e28cc09b001c00010000000500102a010111200f000100000000000100010000290500000000050000", - // google.com. IN MX? - "724b8180000100050004000b06676f6f676c6503636f6d00000f0001c00c000f000100000005000c000a056173706d78016cc00cc00c000f0001000000050009001404616c7431c02ac00c000f0001000000050009001e04616c7432c02ac00c000f0001000000050009002804616c7433c02ac00c000f0001000000050009003204616c7434c02ac00c00020001000000050006036e7332c00cc00c00020001000000050006036e7333c00cc00c00020001000000050006036e7334c00cc00c00020001000000050006036e7331c00cc02a00010001000000050004adc2421bc02a001c00010000000500102a00145040080c01000000000000001bc04200010001000000050004adc2461bc05700010001000000050004adc2451bc06c000100010000000500044a7d8f1bc081000100010000000500044a7d191bc0ca00010001000000050004d8ef200ac09400010001000000050004d8ef220ac0a600010001000000050004d8ef240ac0b800010001000000050004d8ef260a0000290500000000050000", - // reddit.com. IN A? - "12b98180000100080000000c0672656464697403636f6d0000020001c00c0002000100000005000f046175733204616b616d036e657400c00c000200010000000500070475736534c02dc00c000200010000000500070475737733c02dc00c000200010000000500070475737735c02dc00c00020001000000050008056173696131c02dc00c00020001000000050008056173696139c02dc00c00020001000000050008056e73312d31c02dc00c0002000100000005000a076e73312d313935c02dc02800010001000000050004c30a242ec04300010001000000050004451f1d39c05600010001000000050004451f3bc7c0690001000100000005000460073240c07c000100010000000500046007fb81c090000100010000000500047c283484c090001c00010000000500102a0226f0006700000000000000000064c0a400010001000000050004c16c5b01c0a4001c000100000005001026001401000200000000000000000001c0b800010001000000050004c16c5bc3c0b8001c0001000000050010260014010002000000000000000000c30000290500000000050000", - } - - for i, hexData := range testMessages { - // we won't fail the decoding of the hex - input, _ := hex.DecodeString(hexData) - - m := new(Msg) - m.Unpack(input) - m.Compress = true - lenComp := m.Len() - b, _ := m.Pack() - pacComp := len(b) - m.Compress = false - lenUnComp := m.Len() - b, _ = m.Pack() - pacUnComp := len(b) - if pacComp+1 != lenComp { - t.Errorf("msg.Len(compressed)=%d actual=%d for test %d", lenComp, pacComp, i) - } - if pacUnComp+1 != lenUnComp { - t.Errorf("msg.Len(uncompressed)=%d actual=%d for test %d", lenUnComp, pacUnComp, i) - } - } -} - -func TestMsgLengthCompressionMalformed(t *testing.T) { - // SOA with empty hostmaster, which is illegal - soa := &SOA{Hdr: RR_Header{Name: ".", Rrtype: TypeSOA, Class: ClassINET, Ttl: 12345}, - Ns: ".", - Mbox: "", - Serial: 0, - Refresh: 28800, - Retry: 7200, - Expire: 604800, - Minttl: 60} - m := new(Msg) - m.Compress = true - m.Ns = []RR{soa} - m.Len() // Should not crash. -} - -func TestMsgCompressLength2(t *testing.T) { - msg := new(Msg) - msg.Compress = true - msg.SetQuestion(Fqdn("bliep."), TypeANY) - msg.Answer = append(msg.Answer, &SRV{Hdr: RR_Header{Name: "blaat.", Rrtype: 0x21, Class: 0x1, Ttl: 0x3c}, Port: 0x4c57, Target: "foo.bar."}) - msg.Extra = append(msg.Extra, &A{Hdr: RR_Header{Name: "foo.bar.", Rrtype: 0x1, Class: 0x1, Ttl: 0x3c}, A: net.IP{0xac, 0x11, 0x0, 0x3}}) - predicted := msg.Len() - buf, err := msg.Pack() - if err != nil { - t.Error(err) - } - if predicted != len(buf) { - t.Errorf("predicted compressed length is wrong: predicted %s (len=%d) %d, actual %d", - msg.Question[0].Name, len(msg.Answer), predicted, len(buf)) - } -} - -func TestMsgCompressLengthLargeRecords(t *testing.T) { - msg := new(Msg) - msg.Compress = true - msg.SetQuestion("my.service.acme.", TypeSRV) - j := 1 - for i := 0; i < 250; i++ { - target := fmt.Sprintf("host-redis-1-%d.test.acme.com.node.dc1.consul.", i) - msg.Answer = append(msg.Answer, &SRV{Hdr: RR_Header{Name: "redis.service.consul.", Class: 1, Rrtype: TypeSRV, Ttl: 0x3c}, Port: 0x4c57, Target: target}) - msg.Extra = append(msg.Extra, &CNAME{Hdr: RR_Header{Name: target, Class: 1, Rrtype: TypeCNAME, Ttl: 0x3c}, Target: fmt.Sprintf("fx.168.%d.%d.", j, i)}) - } - predicted := msg.Len() - buf, err := msg.Pack() - if err != nil { - t.Error(err) - } - if predicted != len(buf) { - t.Fatalf("predicted compressed length is wrong: predicted %s (len=%d) %d, actual %d", msg.Question[0].Name, len(msg.Answer), predicted, len(buf)) - } -} - -func TestCompareCompressionMapsForANY(t *testing.T) { - msg := new(Msg) - msg.Compress = true - msg.SetQuestion("a.service.acme.", TypeANY) - // Be sure to have more than 14bits - for i := 0; i < 2000; i++ { - target := fmt.Sprintf("host.app-%d.x%d.test.acme.", i%250, i) - msg.Answer = append(msg.Answer, &AAAA{Hdr: RR_Header{Name: target, Rrtype: TypeAAAA, Class: ClassINET, Ttl: 0x3c}, AAAA: net.IP{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, byte(i / 255), byte(i % 255)}}) - msg.Answer = append(msg.Answer, &A{Hdr: RR_Header{Name: target, Rrtype: TypeA, Class: ClassINET, Ttl: 0x3c}, A: net.IP{127, 0, byte(i / 255), byte(i % 255)}}) - if msg.Len() > 16384 { - break - } - } - for labelSize := 0; labelSize < 63; labelSize++ { - msg.SetQuestion(fmt.Sprintf("a%s.service.acme.", strings.Repeat("x", labelSize)), TypeANY) - - compressionFake := make(map[string]int) - lenFake := compressedLenWithCompressionMap(msg, compressionFake) - - compressionReal := make(map[string]int) - buf, err := msg.packBufferWithCompressionMap(nil, compressionReal) - if err != nil { - t.Fatal(err) - } - if lenFake != len(buf) { - t.Fatalf("padding= %d ; Predicted len := %d != real:= %d", labelSize, lenFake, len(buf)) - } - if !reflect.DeepEqual(compressionFake, compressionReal) { - t.Fatalf("padding= %d ; Fake Compression Map != Real Compression Map\n*** Real:= %v\n\n***Fake:= %v", labelSize, compressionReal, compressionFake) - } - } -} - -func TestCompareCompressionMapsForSRV(t *testing.T) { - msg := new(Msg) - msg.Compress = true - msg.SetQuestion("a.service.acme.", TypeSRV) - // Be sure to have more than 14bits - for i := 0; i < 2000; i++ { - target := fmt.Sprintf("host.app-%d.x%d.test.acme.", i%250, i) - msg.Answer = append(msg.Answer, &SRV{Hdr: RR_Header{Name: "redis.service.consul.", Class: ClassINET, Rrtype: TypeSRV, Ttl: 0x3c}, Port: 0x4c57, Target: target}) - msg.Extra = append(msg.Extra, &A{Hdr: RR_Header{Name: target, Rrtype: TypeA, Class: ClassINET, Ttl: 0x3c}, A: net.IP{127, 0, byte(i / 255), byte(i % 255)}}) - if msg.Len() > 16384 { - break - } - } - for labelSize := 0; labelSize < 63; labelSize++ { - msg.SetQuestion(fmt.Sprintf("a%s.service.acme.", strings.Repeat("x", labelSize)), TypeAAAA) - - compressionFake := make(map[string]int) - lenFake := compressedLenWithCompressionMap(msg, compressionFake) - - compressionReal := make(map[string]int) - buf, err := msg.packBufferWithCompressionMap(nil, compressionReal) - if err != nil { - t.Fatal(err) - } - if lenFake != len(buf) { - t.Fatalf("padding= %d ; Predicted len := %d != real:= %d", labelSize, lenFake, len(buf)) - } - if !reflect.DeepEqual(compressionFake, compressionReal) { - t.Fatalf("padding= %d ; Fake Compression Map != Real Compression Map\n*** Real:= %v\n\n***Fake:= %v", labelSize, compressionReal, compressionFake) - } - } -} - -func TestMsgCompressLengthLargeRecordsWithPaddingPermutation(t *testing.T) { - msg := new(Msg) - msg.Compress = true - msg.SetQuestion("my.service.acme.", TypeSRV) - - for i := 0; i < 250; i++ { - target := fmt.Sprintf("host-redis-x-%d.test.acme.com.node.dc1.consul.", i) - msg.Answer = append(msg.Answer, &SRV{Hdr: RR_Header{Name: "redis.service.consul.", Class: 1, Rrtype: TypeSRV, Ttl: 0x3c}, Port: 0x4c57, Target: target}) - msg.Extra = append(msg.Extra, &CNAME{Hdr: RR_Header{Name: target, Class: ClassINET, Rrtype: TypeCNAME, Ttl: 0x3c}, Target: fmt.Sprintf("fx.168.x.%d.", i)}) - } - for labelSize := 1; labelSize < 63; labelSize++ { - msg.SetQuestion(fmt.Sprintf("my.%s.service.acme.", strings.Repeat("x", labelSize)), TypeSRV) - predicted := msg.Len() - buf, err := msg.Pack() - if err != nil { - t.Error(err) - } - if predicted != len(buf) { - t.Fatalf("padding= %d ; predicted compressed length is wrong: predicted %s (len=%d) %d, actual %d", labelSize, msg.Question[0].Name, len(msg.Answer), predicted, len(buf)) - } - } -} - -func TestMsgCompressLengthLargeRecordsAllValues(t *testing.T) { - msg := new(Msg) - msg.Compress = true - msg.SetQuestion("redis.service.consul.", TypeSRV) - for i := 0; i < 900; i++ { - target := fmt.Sprintf("host-redis-%d-%d.test.acme.com.node.dc1.consul.", i/256, i%256) - msg.Answer = append(msg.Answer, &SRV{Hdr: RR_Header{Name: "redis.service.consul.", Class: 1, Rrtype: TypeSRV, Ttl: 0x3c}, Port: 0x4c57, Target: target}) - msg.Extra = append(msg.Extra, &CNAME{Hdr: RR_Header{Name: target, Class: ClassINET, Rrtype: TypeCNAME, Ttl: 0x3c}, Target: fmt.Sprintf("fx.168.%d.%d.", i/256, i%256)}) - predicted := msg.Len() - buf, err := msg.Pack() - if err != nil { - t.Error(err) - } - if predicted != len(buf) { - t.Fatalf("predicted compressed length is wrong for %d records: predicted %s (len=%d) %d, actual %d", i, msg.Question[0].Name, len(msg.Answer), predicted, len(buf)) - } - } -} diff --git a/vendor/github.com/miekg/dns/msg.go b/vendor/github.com/miekg/dns/msg.go index dcd3b6a5e167..ec2f7ab7bb7e 100644 --- a/vendor/github.com/miekg/dns/msg.go +++ b/vendor/github.com/miekg/dns/msg.go @@ -9,36 +9,42 @@ package dns //go:generate go run msg_generate.go -//go:generate go run compress_generate.go import ( crand "crypto/rand" "encoding/binary" - "fmt" "math/big" "math/rand" "strconv" - "sync" ) -const ( - maxCompressionOffset = 2 << 13 // We have 14 bits for the compression pointer - maxDomainNameWireOctets = 255 // See RFC 1035 section 2.3.4 -) +func init() { + // Initialize default math/rand source using crypto/rand to provide better + // security without the performance trade-off. + buf := make([]byte, 8) + _, err := crand.Read(buf) + if err != nil { + // Failed to read from cryptographic source, fallback to default initial + // seed (1) by returning early + return + } + seed := binary.BigEndian.Uint64(buf) + rand.Seed(int64(seed)) +} + +const maxCompressionOffset = 2 << 13 // We have 14 bits for the compression pointer -// Errors defined in this package. var ( ErrAlg error = &Error{err: "bad algorithm"} // ErrAlg indicates an error with the (DNSSEC) algorithm. ErrAuth error = &Error{err: "bad authentication"} // ErrAuth indicates an error in the TSIG authentication. - ErrBuf error = &Error{err: "buffer size too small"} // ErrBuf indicates that the buffer used is too small for the message. - ErrConnEmpty error = &Error{err: "conn has no connection"} // ErrConnEmpty indicates a connection is being used before it is initialized. + ErrBuf error = &Error{err: "buffer size too small"} // ErrBuf indicates that the buffer used it too small for the message. + ErrConnEmpty error = &Error{err: "conn has no connection"} // ErrConnEmpty indicates a connection is being uses before it is initialized. ErrExtendedRcode error = &Error{err: "bad extended rcode"} // ErrExtendedRcode ... ErrFqdn error = &Error{err: "domain must be fully qualified"} // ErrFqdn indicates that a domain name does not have a closing dot. ErrId error = &Error{err: "id mismatch"} // ErrId indicates there is a mismatch with the message's ID. ErrKeyAlg error = &Error{err: "bad key algorithm"} // ErrKeyAlg indicates that the algorithm in the key is not valid. ErrKey error = &Error{err: "bad key"} ErrKeySize error = &Error{err: "bad key size"} - ErrLongDomain error = &Error{err: fmt.Sprintf("domain name exceeded %d wire-format octets", maxDomainNameWireOctets)} ErrNoSig error = &Error{err: "no signature found"} ErrPrivKey error = &Error{err: "bad private key"} ErrRcode error = &Error{err: "bad rcode"} @@ -52,53 +58,19 @@ var ( ErrTruncated error = &Error{err: "failed to unpack truncated message"} // ErrTruncated indicates that we failed to unpack a truncated message. We unpacked as much as we had so Msg can still be used, if desired. ) -// Id by default, returns a 16 bits random number to be used as a +// Id, by default, returns a 16 bits random number to be used as a // message id. The random provided should be good enough. This being a // variable the function can be reassigned to a custom function. // For instance, to make it return a static value: // // dns.Id = func() uint16 { return 3 } -var Id = id - -var ( - idLock sync.Mutex - idRand *rand.Rand -) +var Id func() uint16 = id // id returns a 16 bits random number to be used as a // message id. The random provided should be good enough. func id() uint16 { - idLock.Lock() - - if idRand == nil { - // This (partially) works around - // https://github.com/golang/go/issues/11833 by only - // seeding idRand upon the first call to id. - - var seed int64 - var buf [8]byte - - if _, err := crand.Read(buf[:]); err == nil { - seed = int64(binary.LittleEndian.Uint64(buf[:])) - } else { - seed = rand.Int63() - } - - idRand = rand.New(rand.NewSource(seed)) - } - - // The call to idRand.Uint32 must be within the - // mutex lock because *rand.Rand is not safe for - // concurrent use. - // - // There is no added performance overhead to calling - // idRand.Uint32 inside a mutex lock over just - // calling rand.Uint32 as the global math/rand rng - // is internally protected by a sync.Mutex. - id := uint16(idRand.Uint32()) - - idLock.Unlock() - return id + id32 := rand.Uint32() + return uint16(id32) } // MsgHdr is a a manually-unpacked version of (id, bits). @@ -231,6 +203,12 @@ func packDomainName(s string, msg []byte, off int, compression map[string]int, c bs[j] = bs[j+2] } ls -= 2 + } else if bs[i] == 't' { + bs[i] = '\t' + } else if bs[i] == 'r' { + bs[i] = '\r' + } else if bs[i] == 'n' { + bs[i] = '\n' } escapedDot = bs[i] == '.' bsFresh = false @@ -269,9 +247,7 @@ func packDomainName(s string, msg []byte, off int, compression map[string]int, c bsFresh = true } // Don't try to compress '.' - // We should only compress when compress it true, but we should also still pick - // up names that can be used for *future* compression(s). - if compression != nil && roBs[begin:] != "." { + if compress && roBs[begin:] != "." { if p, ok := compression[roBs[begin:]]; !ok { // Only offsets smaller than this can be used. if offset < maxCompressionOffset { @@ -335,7 +311,6 @@ func UnpackDomainName(msg []byte, off int) (string, int, error) { s := make([]byte, 0, 64) off1 := 0 lenmsg := len(msg) - maxLen := maxDomainNameWireOctets ptr := 0 // number of pointers followed Loop: for { @@ -360,10 +335,12 @@ Loop: fallthrough case '"', '\\': s = append(s, '\\', b) - // presentation-format \X escapes add an extra byte - maxLen++ + case '\t': + s = append(s, '\\', 't') + case '\r': + s = append(s, '\\', 'r') default: - if b < 32 || b >= 127 { // unprintable, use \DDD + if b < 32 || b >= 127 { // unprintable use \DDD var buf [3]byte bufs := strconv.AppendInt(buf[:0], int64(b), 10) s = append(s, '\\') @@ -373,8 +350,6 @@ Loop: for _, r := range bufs { s = append(s, r) } - // presentation-format \DDD escapes add 3 extra bytes - maxLen += 3 } else { s = append(s, b) } @@ -399,9 +374,6 @@ Loop: if ptr++; ptr > 10 { return "", lenmsg, &Error{err: "too many compression pointers"} } - // pointer should guarantee that it advances and points forwards at least - // but the condition on previous three lines guarantees that it's - // at least loop-free off = (c^0xC0)<<8 | int(c1) default: // 0x80 and 0x40 are reserved @@ -413,9 +385,6 @@ Loop: } if len(s) == 0 { s = []byte(".") - } else if len(s) >= maxLen { - // error if the name is too long, but don't throw it away - return string(s), lenmsg, ErrLongDomain } return string(s), off1, nil } @@ -462,6 +431,12 @@ func packTxtString(s string, msg []byte, offset int, tmp []byte) (int, error) { if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) { msg[offset] = dddToByte(bs[i:]) i += 2 + } else if bs[i] == 't' { + msg[offset] = '\t' + } else if bs[i] == 'r' { + msg[offset] = '\r' + } else if bs[i] == 'n' { + msg[offset] = '\n' } else { msg[offset] = bs[i] } @@ -533,6 +508,12 @@ func unpackTxtString(msg []byte, offset int) (string, int, error) { switch b { case '"', '\\': s = append(s, '\\', b) + case '\t': + s = append(s, `\t`...) + case '\r': + s = append(s, `\r`...) + case '\n': + s = append(s, `\n`...) default: if b < 32 || b > 127 { // unprintable var buf [3]byte @@ -595,13 +576,6 @@ func UnpackRR(msg []byte, off int) (rr RR, off1 int, err error) { if err != nil { return nil, len(msg), err } - - return UnpackRRWithHeader(h, msg, off) -} - -// UnpackRRWithHeader unpacks the record type specific payload given an existing -// RR_Header. -func UnpackRRWithHeader(h RR_Header, msg []byte, off int) (rr RR, off1 int, err error) { end := off + int(h.Rdlength) if fn, known := typeToUnpack[h.Rrtype]; !known { @@ -619,8 +593,8 @@ func UnpackRRWithHeader(h RR_Header, msg []byte, off int) (rr RR, off1 int, err // If we cannot unpack the whole array, then it will return nil func unpackRRslice(l int, msg []byte, off int) (dst1 []RR, off1 int, err error) { var r RR - // Don't pre-allocate, l may be under attacker control - var dst []RR + // Optimistically make dst be the length that was sent + dst := make([]RR, 0, l) for i := 0; i < l; i++ { off1 := off r, off, err = UnpackRR(msg, off) @@ -691,20 +665,18 @@ func (dns *Msg) Pack() (msg []byte, err error) { return dns.PackBuffer(nil) } -// PackBuffer packs a Msg, using the given buffer buf. If buf is too small a new buffer is allocated. +// PackBuffer packs a Msg, using the given buffer buf. If buf is too small +// a new buffer is allocated. func (dns *Msg) PackBuffer(buf []byte) (msg []byte, err error) { - var compression map[string]int - if dns.Compress { - compression = make(map[string]int) // Compression pointer mappings. - } - return dns.packBufferWithCompressionMap(buf, compression) -} - -// packBufferWithCompressionMap packs a Msg, using the given buffer buf. -func (dns *Msg) packBufferWithCompressionMap(buf []byte, compression map[string]int) (msg []byte, err error) { // We use a similar function in tsig.go's stripTsig. + var ( + dh Header + compression map[string]int + ) - var dh Header + if dns.Compress { + compression = make(map[string]int) // Compression pointer mappings + } if dns.Rcode < 0 || dns.Rcode > 0xFFF { return nil, ErrRcode @@ -716,11 +688,12 @@ func (dns *Msg) packBufferWithCompressionMap(buf []byte, compression map[string] return nil, ErrExtendedRcode } opt.SetExtendedRcode(uint8(dns.Rcode >> 4)) + dns.Rcode &= 0xF } // Convert convenient Msg into wire-like Header. dh.Id = dns.Id - dh.Bits = uint16(dns.Opcode)<<11 | uint16(dns.Rcode&0xF) + dh.Bits = uint16(dns.Opcode)<<11 | uint16(dns.Rcode) if dns.Response { dh.Bits |= _QR } @@ -759,10 +732,12 @@ func (dns *Msg) packBufferWithCompressionMap(buf []byte, compression map[string] // We need the uncompressed length here, because we first pack it and then compress it. msg = buf - uncompressedLen := compressedLen(dns, false) - if packLen := uncompressedLen + 1; len(msg) < packLen { + compress := dns.Compress + dns.Compress = false + if packLen := dns.Len() + 1; len(msg) < packLen { msg = make([]byte, packLen) } + dns.Compress = compress // Pack it in: header and then the pieces. off := 0 @@ -806,6 +781,9 @@ func (dns *Msg) Unpack(msg []byte) (err error) { if dh, off, err = unpackMsgHdr(msg, off); err != nil { return err } + if off == len(msg) { + return ErrTruncated + } dns.Id = dh.Id dns.Response = (dh.Bits & _QR) != 0 @@ -819,19 +797,9 @@ func (dns *Msg) Unpack(msg []byte) (err error) { dns.CheckingDisabled = (dh.Bits & _CD) != 0 dns.Rcode = int(dh.Bits & 0xF) - // If we are at the end of the message we should return *just* the - // header. This can still be useful to the caller. 9.9.9.9 sends these - // when responding with REFUSED for instance. - if off == len(msg) { - // reset sections before returning - dns.Question, dns.Answer, dns.Ns, dns.Extra = nil, nil, nil, nil - return nil - } + // Optimistically use the count given to us in the header + dns.Question = make([]Question, 0, int(dh.Qdcount)) - // Qdcount, Ancount, Nscount, Arcount can't be trusted, as they are - // attacker controlled. This means we can't use them to pre-allocate - // slices. - dns.Question = nil for i := 0; i < int(dh.Qdcount); i++ { off1 := off var q Question @@ -921,140 +889,197 @@ func (dns *Msg) String() string { // If dns.Compress is true compression it is taken into account. Len() // is provided to be a faster way to get the size of the resulting packet, // than packing it, measuring the size and discarding the buffer. -func (dns *Msg) Len() int { return compressedLen(dns, dns.Compress) } - -func compressedLenWithCompressionMap(dns *Msg, compression map[string]int) int { - l := 12 // Message header is always 12 bytes - for _, r := range dns.Question { - compressionLenHelper(compression, r.Name, l) - l += r.len() - } - l += compressionLenSlice(l, compression, dns.Answer) - l += compressionLenSlice(l, compression, dns.Ns) - l += compressionLenSlice(l, compression, dns.Extra) - return l -} - -// compressedLen returns the message length when in compressed wire format -// when compress is true, otherwise the uncompressed length is returned. -func compressedLen(dns *Msg, compress bool) int { +func (dns *Msg) Len() int { // We always return one more than needed. - if compress { - compression := map[string]int{} - return compressedLenWithCompressionMap(dns, compression) - } l := 12 // Message header is always 12 bytes - - for _, r := range dns.Question { - l += r.len() + var compression map[string]int + if dns.Compress { + compression = make(map[string]int) } - for _, r := range dns.Answer { - if r != nil { - l += r.len() + for i := 0; i < len(dns.Question); i++ { + l += dns.Question[i].len() + if dns.Compress { + compressionLenHelper(compression, dns.Question[i].Name) } } - for _, r := range dns.Ns { - if r != nil { - l += r.len() + for i := 0; i < len(dns.Answer); i++ { + if dns.Answer[i] == nil { + continue } - } - for _, r := range dns.Extra { - if r != nil { - l += r.len() + l += dns.Answer[i].len() + if dns.Compress { + k, ok := compressionLenSearch(compression, dns.Answer[i].Header().Name) + if ok { + l += 1 - k + } + compressionLenHelper(compression, dns.Answer[i].Header().Name) + k, ok = compressionLenSearchType(compression, dns.Answer[i]) + if ok { + l += 1 - k + } + compressionLenHelperType(compression, dns.Answer[i]) } } - - return l -} - -func compressionLenSlice(lenp int, c map[string]int, rs []RR) int { - initLen := lenp - for _, r := range rs { - if r == nil { + for i := 0; i < len(dns.Ns); i++ { + if dns.Ns[i] == nil { continue } - // TmpLen is to track len of record at 14bits boudaries - tmpLen := lenp - - x := r.len() - // track this length, and the global length in len, while taking compression into account for both. - k, ok, _ := compressionLenSearch(c, r.Header().Name) - if ok { - // Size of x is reduced by k, but we add 1 since k includes the '.' and label descriptor take 2 bytes - // so, basically x:= x - k - 1 + 2 - x += 1 - k + l += dns.Ns[i].len() + if dns.Compress { + k, ok := compressionLenSearch(compression, dns.Ns[i].Header().Name) + if ok { + l += 1 - k + } + compressionLenHelper(compression, dns.Ns[i].Header().Name) + k, ok = compressionLenSearchType(compression, dns.Ns[i]) + if ok { + l += 1 - k + } + compressionLenHelperType(compression, dns.Ns[i]) } - - tmpLen += compressionLenHelper(c, r.Header().Name, tmpLen) - k, ok, _ = compressionLenSearchType(c, r) - if ok { - x += 1 - k + } + for i := 0; i < len(dns.Extra); i++ { + if dns.Extra[i] == nil { + continue + } + l += dns.Extra[i].len() + if dns.Compress { + k, ok := compressionLenSearch(compression, dns.Extra[i].Header().Name) + if ok { + l += 1 - k + } + compressionLenHelper(compression, dns.Extra[i].Header().Name) + k, ok = compressionLenSearchType(compression, dns.Extra[i]) + if ok { + l += 1 - k + } + compressionLenHelperType(compression, dns.Extra[i]) } - lenp += x - tmpLen = lenp - tmpLen += compressionLenHelperType(c, r, tmpLen) - } - return lenp - initLen + return l } -// Put the parts of the name in the compression map, return the size in bytes added in payload -func compressionLenHelper(c map[string]int, s string, currentLen int) int { - if currentLen > maxCompressionOffset { - // We won't be able to add any label that could be re-used later anyway - return 0 - } - if _, ok := c[s]; ok { - return 0 - } - initLen := currentLen +// Put the parts of the name in the compression map. +func compressionLenHelper(c map[string]int, s string) { pref := "" - prev := s lbs := Split(s) - for j := 0; j < len(lbs); j++ { + for j := len(lbs) - 1; j >= 0; j-- { pref = s[lbs[j]:] - currentLen += len(prev) - len(pref) - prev = pref if _, ok := c[pref]; !ok { - // If first byte label is within the first 14bits, it might be re-used later - if currentLen < maxCompressionOffset { - c[pref] = currentLen - } - } else { - added := currentLen - initLen - if j > 0 { - // We added a new PTR - added += 2 - } - return added + c[pref] = len(pref) } } - return currentLen - initLen } // Look for each part in the compression map and returns its length, // keep on searching so we get the longest match. -// Will return the size of compression found, whether a match has been -// found and the size of record if added in payload -func compressionLenSearch(c map[string]int, s string) (int, bool, int) { +func compressionLenSearch(c map[string]int, s string) (int, bool) { off := 0 end := false if s == "" { // don't bork on bogus data - return 0, false, 0 + return 0, false } - fullSize := 0 for { if _, ok := c[s[off:]]; ok { - return len(s[off:]), true, fullSize + off + return len(s[off:]), true } if end { break } - // Each label descriptor takes 2 bytes, add it - fullSize += 2 off, end = NextLabel(s, off) } - return 0, false, fullSize + len(s) + return 0, false +} + +// TODO(miek): should add all types, because the all can be *used* for compression. Autogenerate from msg_generate and put in zmsg.go +func compressionLenHelperType(c map[string]int, r RR) { + switch x := r.(type) { + case *NS: + compressionLenHelper(c, x.Ns) + case *MX: + compressionLenHelper(c, x.Mx) + case *CNAME: + compressionLenHelper(c, x.Target) + case *PTR: + compressionLenHelper(c, x.Ptr) + case *SOA: + compressionLenHelper(c, x.Ns) + compressionLenHelper(c, x.Mbox) + case *MB: + compressionLenHelper(c, x.Mb) + case *MG: + compressionLenHelper(c, x.Mg) + case *MR: + compressionLenHelper(c, x.Mr) + case *MF: + compressionLenHelper(c, x.Mf) + case *MD: + compressionLenHelper(c, x.Md) + case *RT: + compressionLenHelper(c, x.Host) + case *RP: + compressionLenHelper(c, x.Mbox) + compressionLenHelper(c, x.Txt) + case *MINFO: + compressionLenHelper(c, x.Rmail) + compressionLenHelper(c, x.Email) + case *AFSDB: + compressionLenHelper(c, x.Hostname) + case *SRV: + compressionLenHelper(c, x.Target) + case *NAPTR: + compressionLenHelper(c, x.Replacement) + case *RRSIG: + compressionLenHelper(c, x.SignerName) + case *NSEC: + compressionLenHelper(c, x.NextDomain) + // HIP? + } +} + +// Only search on compressing these types. +func compressionLenSearchType(c map[string]int, r RR) (int, bool) { + switch x := r.(type) { + case *NS: + return compressionLenSearch(c, x.Ns) + case *MX: + return compressionLenSearch(c, x.Mx) + case *CNAME: + return compressionLenSearch(c, x.Target) + case *DNAME: + return compressionLenSearch(c, x.Target) + case *PTR: + return compressionLenSearch(c, x.Ptr) + case *SOA: + k, ok := compressionLenSearch(c, x.Ns) + k1, ok1 := compressionLenSearch(c, x.Mbox) + if !ok && !ok1 { + return 0, false + } + return k + k1, true + case *MB: + return compressionLenSearch(c, x.Mb) + case *MG: + return compressionLenSearch(c, x.Mg) + case *MR: + return compressionLenSearch(c, x.Mr) + case *MF: + return compressionLenSearch(c, x.Mf) + case *MD: + return compressionLenSearch(c, x.Md) + case *RT: + return compressionLenSearch(c, x.Host) + case *MINFO: + k, ok := compressionLenSearch(c, x.Rmail) + k1, ok1 := compressionLenSearch(c, x.Email) + if !ok && !ok1 { + return 0, false + } + return k + k1, true + case *AFSDB: + return compressionLenSearch(c, x.Hostname) + } + return 0, false } // Copy returns a new RR which is a deep-copy of r. diff --git a/vendor/github.com/miekg/dns/msg_generate.go b/vendor/github.com/miekg/dns/msg_generate.go index 8ba609f7269e..166b3af00c83 100644 --- a/vendor/github.com/miekg/dns/msg_generate.go +++ b/vendor/github.com/miekg/dns/msg_generate.go @@ -18,7 +18,8 @@ import ( ) var packageHdr = ` -// Code generated by "go run msg_generate.go"; DO NOT EDIT. +// *** DO NOT MODIFY *** +// AUTOGENERATED BY go generate from msg_generate.go package dns @@ -116,9 +117,9 @@ return off, err switch { case st.Tag(i) == `dns:"-"`: // ignored case st.Tag(i) == `dns:"cdomain-name"`: - o("off, err = PackDomainName(rr.%s, msg, off, compression, compress)\n") + fallthrough case st.Tag(i) == `dns:"domain-name"`: - o("off, err = PackDomainName(rr.%s, msg, off, compression, false)\n") + o("off, err = PackDomainName(rr.%s, msg, off, compression, compress)\n") case st.Tag(i) == `dns:"a"`: o("off, err = packDataA(rr.%s, msg, off)\n") case st.Tag(i) == `dns:"aaaa"`: @@ -138,18 +139,6 @@ return off, err case st.Tag(i) == `dns:"base64"`: o("off, err = packStringBase64(rr.%s, msg, off)\n") - case strings.HasPrefix(st.Tag(i), `dns:"size-hex:SaltLength`): - // directly write instead of using o() so we get the error check in the correct place - field := st.Field(i).Name() - fmt.Fprintf(b, `// Only pack salt if value is not "-", i.e. empty -if rr.%s != "-" { - off, err = packStringHex(rr.%s, msg, off) - if err != nil { - return off, err - } -} -`, field, field) - continue case strings.HasPrefix(st.Tag(i), `dns:"size-hex`): // size-hex can be packed just like hex fallthrough case st.Tag(i) == `dns:"hex"`: @@ -177,7 +166,7 @@ if rr.%s != "-" { } } // We have packed everything, only now we know the rdlength of this RR - fmt.Fprintln(b, "rr.Header().Rdlength = uint16(off-headerEnd)") + fmt.Fprintln(b, "rr.Header().Rdlength = uint16(off- headerEnd)") fmt.Fprintln(b, "return off, nil }\n") } diff --git a/vendor/github.com/miekg/dns/msg_helpers.go b/vendor/github.com/miekg/dns/msg_helpers.go index 4a6e878de93a..e7a9500cc074 100644 --- a/vendor/github.com/miekg/dns/msg_helpers.go +++ b/vendor/github.com/miekg/dns/msg_helpers.go @@ -96,7 +96,7 @@ func unpackHeader(msg []byte, off int) (rr RR_Header, off1 int, truncmsg []byte, return hdr, len(msg), msg, err } msg, err = truncateMsgFromRdlength(msg, off, hdr.Rdlength) - return hdr, off, msg, err + return hdr, off, msg, nil } // pack packs an RR header, returning the offset to the end of the header. @@ -141,24 +141,15 @@ func truncateMsgFromRdlength(msg []byte, off int, rdlength uint16) (truncmsg []b return msg[:lenrd], nil } -var base32HexNoPadEncoding = base32.HexEncoding.WithPadding(base32.NoPadding) - func fromBase32(s []byte) (buf []byte, err error) { - for i, b := range s { - if b >= 'a' && b <= 'z' { - s[i] = b - 32 - } - } - buflen := base32HexNoPadEncoding.DecodedLen(len(s)) + buflen := base32.HexEncoding.DecodedLen(len(s)) buf = make([]byte, buflen) - n, err := base32HexNoPadEncoding.Decode(buf, s) + n, err := base32.HexEncoding.Decode(buf, s) buf = buf[:n] return } -func toBase32(b []byte) string { - return base32HexNoPadEncoding.EncodeToString(b) -} +func toBase32(b []byte) string { return base32.HexEncoding.EncodeToString(b) } func fromBase64(s []byte) (buf []byte, err error) { buflen := base64.StdEncoding.DecodedLen(len(s)) @@ -272,6 +263,8 @@ func unpackString(msg []byte, off int) (string, int, error) { switch b { case '"', '\\': s = append(s, '\\', b) + case '\t', '\r', '\n': + s = append(s, b) default: if b < 32 || b > 127 { // unprintable var buf [3]byte @@ -410,13 +403,16 @@ Option: } edns = append(edns, e) off += int(optlen) - case EDNS0SUBNET: + case EDNS0SUBNET, EDNS0SUBNETDRAFT: e := new(EDNS0_SUBNET) if err := e.unpack(msg[off : off+int(optlen)]); err != nil { return nil, len(msg), err } edns = append(edns, e) off += int(optlen) + if code == EDNS0SUBNETDRAFT { + e.DraftOption = true + } case EDNS0COOKIE: e := new(EDNS0_COOKIE) if err := e.unpack(msg[off : off+int(optlen)]); err != nil { @@ -459,13 +455,6 @@ Option: } edns = append(edns, e) off += int(optlen) - case EDNS0PADDING: - e := new(EDNS0_PADDING) - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) default: e := new(EDNS0_LOCAL) e.Code = code diff --git a/vendor/github.com/miekg/dns/msg_test.go b/vendor/github.com/miekg/dns/msg_test.go deleted file mode 100644 index afbe6d83216d..000000000000 --- a/vendor/github.com/miekg/dns/msg_test.go +++ /dev/null @@ -1,146 +0,0 @@ -package dns - -import ( - "fmt" - "regexp" - "strconv" - "strings" - "testing" -) - -const ( - maxPrintableLabel = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789x" - tooLongLabel = maxPrintableLabel + "x" -) - -var ( - longDomain = maxPrintableLabel[:53] + strings.TrimSuffix( - strings.Join([]string{".", ".", ".", ".", "."}, maxPrintableLabel[:49]), ".") - reChar = regexp.MustCompile(`.`) - i = -1 - maxUnprintableLabel = reChar.ReplaceAllStringFunc(maxPrintableLabel, func(ch string) string { - if i++; i >= 32 { - i = 0 - } - return fmt.Sprintf("\\%03d", i) - }) -) - -func TestPackNoSideEffect(t *testing.T) { - m := new(Msg) - m.SetQuestion(Fqdn("example.com."), TypeNS) - - a := new(Msg) - o := &OPT{ - Hdr: RR_Header{ - Name: ".", - Rrtype: TypeOPT, - }, - } - o.SetUDPSize(DefaultMsgSize) - - a.Extra = append(a.Extra, o) - a.SetRcode(m, RcodeBadVers) - - a.Pack() - if a.Rcode != RcodeBadVers { - t.Errorf("after pack: Rcode is expected to be BADVERS") - } -} - -func TestUnpackDomainName(t *testing.T) { - var cases = []struct { - label string - input string - expectedOutput string - expectedError string - }{ - {"empty domain", - "\x00", - ".", - ""}, - {"long label", - string(63) + maxPrintableLabel + "\x00", - maxPrintableLabel + ".", - ""}, - {"unprintable label", - string(63) + regexp.MustCompile(`\\[0-9]+`).ReplaceAllStringFunc(maxUnprintableLabel, - func(escape string) string { - n, _ := strconv.ParseInt(escape[1:], 10, 8) - return string(n) - }) + "\x00", - maxUnprintableLabel + ".", - ""}, - {"long domain", - string(53) + strings.Replace(longDomain, ".", string(49), -1) + "\x00", - longDomain + ".", - ""}, - {"compression pointer", - // an unrealistic but functional test referencing an offset _inside_ a label - "\x03foo" + "\x05\x03com\x00" + "\x07example" + "\xC0\x05", - "foo.\\003com\\000.example.com.", - ""}, - - {"too long domain", - string(54) + "x" + strings.Replace(longDomain, ".", string(49), -1) + "\x00", - "x" + longDomain + ".", - ErrLongDomain.Error()}, - {"too long by pointer", - // a matryoshka doll name to get over 255 octets after expansion via internal pointers - string([]byte{ - // 11 length values, first to last - 40, 37, 34, 31, 28, 25, 22, 19, 16, 13, 0, - // 12 filler values - 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, - // 10 pointers, last to first - 192, 10, 192, 9, 192, 8, 192, 7, 192, 6, 192, 5, 192, 4, 192, 3, 192, 2, 192, 1, - }), - "", - ErrLongDomain.Error()}, - {"long by pointer", - // a matryoshka doll name _not_ exceeding 255 octets after expansion - string([]byte{ - // 11 length values, first to last - 37, 34, 31, 28, 25, 22, 19, 16, 13, 10, 0, - // 9 filler values - 120, 120, 120, 120, 120, 120, 120, 120, 120, - // 10 pointers, last to first - 192, 10, 192, 9, 192, 8, 192, 7, 192, 6, 192, 5, 192, 4, 192, 3, 192, 2, 192, 1, - }), - "" + - (`\"\031\028\025\022\019\016\013\010\000xxxxxxxxx` + - `\192\010\192\009\192\008\192\007\192\006\192\005\192\004\192\003\192\002.`) + - (`\031\028\025\022\019\016\013\010\000xxxxxxxxx` + - `\192\010\192\009\192\008\192\007\192\006\192\005\192\004\192\003.`) + - (`\028\025\022\019\016\013\010\000xxxxxxxxx` + - `\192\010\192\009\192\008\192\007\192\006\192\005\192\004.`) + - (`\025\022\019\016\013\010\000xxxxxxxxx` + - `\192\010\192\009\192\008\192\007\192\006\192\005.`) + - `\022\019\016\013\010\000xxxxxxxxx\192\010\192\009\192\008\192\007\192\006.` + - `\019\016\013\010\000xxxxxxxxx\192\010\192\009\192\008\192\007.` + - `\016\013\010\000xxxxxxxxx\192\010\192\009\192\008.` + - `\013\010\000xxxxxxxxx\192\010\192\009.` + - `\010\000xxxxxxxxx\192\010.` + - `\000xxxxxxxxx.`, - ""}, - {"truncated name", "\x07example\x03", "", "dns: buffer size too small"}, - {"non-absolute name", "\x07example\x03com", "", "dns: buffer size too small"}, - {"compression pointer cycle", - "\x03foo" + "\x03bar" + "\x07example" + "\xC0\x04", - "", - "dns: too many compression pointers"}, - {"reserved compression pointer 0b10", "\x07example\x80", "", "dns: bad rdata"}, - {"reserved compression pointer 0b01", "\x07example\x40", "", "dns: bad rdata"}, - } - for _, test := range cases { - output, idx, err := UnpackDomainName([]byte(test.input), 0) - if test.expectedOutput != "" && output != test.expectedOutput { - t.Errorf("%s: expected %s, got %s", test.label, test.expectedOutput, output) - } - if test.expectedError == "" && err != nil { - t.Errorf("%s: expected no error, got %d %v", test.label, idx, err) - } else if test.expectedError != "" && (err == nil || err.Error() != test.expectedError) { - t.Errorf("%s: expected error %s, got %d %v", test.label, test.expectedError, idx, err) - } - } -} diff --git a/vendor/github.com/miekg/dns/nsecx.go b/vendor/github.com/miekg/dns/nsecx.go index 9b908c447863..6f10f3e65bcc 100644 --- a/vendor/github.com/miekg/dns/nsecx.go +++ b/vendor/github.com/miekg/dns/nsecx.go @@ -3,6 +3,7 @@ package dns import ( "crypto/sha1" "hash" + "io" "strings" ) @@ -35,63 +36,75 @@ func HashName(label string, ha uint8, iter uint16, salt string) string { } // k = 0 - s.Write(name) - s.Write(wire) + name = append(name, wire...) + io.WriteString(s, string(name)) nsec3 := s.Sum(nil) // k > 0 for k := uint16(0); k < iter; k++ { s.Reset() - s.Write(nsec3) - s.Write(wire) - nsec3 = s.Sum(nsec3[:0]) + nsec3 = append(nsec3, wire...) + io.WriteString(s, string(nsec3)) + nsec3 = s.Sum(nil) } return toBase32(nsec3) } -// Cover returns true if a name is covered by the NSEC3 record +// Denialer is an interface that should be implemented by types that are used to denial +// answers in DNSSEC. +type Denialer interface { + // Cover will check if the (unhashed) name is being covered by this NSEC or NSEC3. + Cover(name string) bool + // Match will check if the ownername matches the (unhashed) name for this NSEC3 or NSEC3. + Match(name string) bool +} + +// Cover implements the Denialer interface. +func (rr *NSEC) Cover(name string) bool { + return true +} + +// Match implements the Denialer interface. +func (rr *NSEC) Match(name string) bool { + return true +} + +// Cover implements the Denialer interface. func (rr *NSEC3) Cover(name string) bool { - nameHash := HashName(name, rr.Hash, rr.Iterations, rr.Salt) - owner := strings.ToUpper(rr.Hdr.Name) - labelIndices := Split(owner) - if len(labelIndices) < 2 { + // FIXME(miek): check if the zones match + // FIXME(miek): check if we're not dealing with parent nsec3 + hname := HashName(name, rr.Hash, rr.Iterations, rr.Salt) + labels := Split(rr.Hdr.Name) + if len(labels) < 2 { return false } - ownerHash := owner[:labelIndices[1]-1] - ownerZone := owner[labelIndices[1]:] - if !IsSubDomain(ownerZone, strings.ToUpper(name)) { // name is outside owner zone - return false + hash := strings.ToUpper(rr.Hdr.Name[labels[0] : labels[1]-1]) // -1 to remove the dot + if hash == rr.NextDomain { + return false // empty interval } - - nextHash := rr.NextDomain - if ownerHash == nextHash { // empty interval - return false + if hash > rr.NextDomain { // last name, points to apex + // hname > hash + // hname > rr.NextDomain + // TODO(miek) } - if ownerHash > nextHash { // end of zone - if nameHash > ownerHash { // covered since there is nothing after ownerHash - return true - } - return nameHash < nextHash // if nameHash is before beginning of zone it is covered + if hname <= hash { + return false } - if nameHash < ownerHash { // nameHash is before ownerHash, not covered + if hname >= rr.NextDomain { return false } - return nameHash < nextHash // if nameHash is before nextHash is it covered (between ownerHash and nextHash) + return true } -// Match returns true if a name matches the NSEC3 record +// Match implements the Denialer interface. func (rr *NSEC3) Match(name string) bool { - nameHash := HashName(name, rr.Hash, rr.Iterations, rr.Salt) - owner := strings.ToUpper(rr.Hdr.Name) - labelIndices := Split(owner) - if len(labelIndices) < 2 { - return false - } - ownerHash := owner[:labelIndices[1]-1] - ownerZone := owner[labelIndices[1]:] - if !IsSubDomain(ownerZone, strings.ToUpper(name)) { // name is outside owner zone + // FIXME(miek): Check if we are in the same zone + hname := HashName(name, rr.Hash, rr.Iterations, rr.Salt) + labels := Split(rr.Hdr.Name) + if len(labels) < 2 { return false } - if ownerHash == nameHash { + hash := strings.ToUpper(rr.Hdr.Name[labels[0] : labels[1]-1]) // -1 to remove the . + if hash == hname { return true } return false diff --git a/vendor/github.com/miekg/dns/nsecx_test.go b/vendor/github.com/miekg/dns/nsecx_test.go index 74ebda3f5440..93e0c63fceec 100644 --- a/vendor/github.com/miekg/dns/nsecx_test.go +++ b/vendor/github.com/miekg/dns/nsecx_test.go @@ -1,6 +1,8 @@ package dns -import "testing" +import ( + "testing" +) func TestPackNsec3(t *testing.T) { nsec3 := HashName("dnsex.nl.", SHA1, 0, "DEAD") @@ -15,127 +17,13 @@ func TestPackNsec3(t *testing.T) { } func TestNsec3(t *testing.T) { - nsec3 := testRR("sk4e8fj94u78smusb40o1n0oltbblu2r.nl. IN NSEC3 1 1 5 F10E9F7EA83FC8F3 SK4F38CQ0ATIEI8MH3RGD0P5I4II6QAN NS SOA TXT RRSIG DNSKEY NSEC3PARAM") - if !nsec3.(*NSEC3).Match("nl.") { // name hash = sk4e8fj94u78smusb40o1n0oltbblu2r - t.Fatal("sk4e8fj94u78smusb40o1n0oltbblu2r.nl. should match sk4e8fj94u78smusb40o1n0oltbblu2r.nl.") - } - if !nsec3.(*NSEC3).Match("NL.") { // name hash = sk4e8fj94u78smusb40o1n0oltbblu2r - t.Fatal("sk4e8fj94u78smusb40o1n0oltbblu2r.NL. should match sk4e8fj94u78smusb40o1n0oltbblu2r.nl.") - } - if nsec3.(*NSEC3).Match("com.") { // - t.Fatal("com. is not in the zone nl.") - } - if nsec3.(*NSEC3).Match("test.nl.") { // name hash = gd0ptr5bnfpimpu2d3v6gd4n0bai7s0q - t.Fatal("gd0ptr5bnfpimpu2d3v6gd4n0bai7s0q.nl. should not match sk4e8fj94u78smusb40o1n0oltbblu2r.nl.") - } - nsec3 = testRR("nl. IN NSEC3 1 1 5 F10E9F7EA83FC8F3 SK4F38CQ0ATIEI8MH3RGD0P5I4II6QAN NS SOA TXT RRSIG DNSKEY NSEC3PARAM") - if nsec3.(*NSEC3).Match("nl.") { - t.Fatal("sk4e8fj94u78smusb40o1n0oltbblu2r.nl. should not match a record without a owner hash") - } - - for _, tc := range []struct { - rr *NSEC3 - name string - covers bool - }{ - // positive tests - { // name hash between owner hash and next hash - rr: &NSEC3{ - Hdr: RR_Header{Name: "2N1TB3VAIRUOBL6RKDVII42N9TFMIALP.com."}, - Hash: 1, - Flags: 1, - Iterations: 5, - Salt: "F10E9F7EA83FC8F3", - NextDomain: "PT3RON8N7PM3A0OE989IB84OOSADP7O8", - }, - name: "bsd.com.", - covers: true, - }, - { // end of zone, name hash is after owner hash - rr: &NSEC3{ - Hdr: RR_Header{Name: "3v62ulr0nre83v0rja2vjgtlif9v6rab.com."}, - Hash: 1, - Flags: 1, - Iterations: 5, - Salt: "F10E9F7EA83FC8F3", - NextDomain: "2N1TB3VAIRUOBL6RKDVII42N9TFMIALP", - }, - name: "csd.com.", - covers: true, - }, - { // end of zone, name hash is before beginning of zone - rr: &NSEC3{ - Hdr: RR_Header{Name: "PT3RON8N7PM3A0OE989IB84OOSADP7O8.com."}, - Hash: 1, - Flags: 1, - Iterations: 5, - Salt: "F10E9F7EA83FC8F3", - NextDomain: "3V62ULR0NRE83V0RJA2VJGTLIF9V6RAB", - }, - name: "asd.com.", - covers: true, - }, - // negative tests - { // too short owner name - rr: &NSEC3{ - Hdr: RR_Header{Name: "nl."}, - Hash: 1, - Flags: 1, - Iterations: 5, - Salt: "F10E9F7EA83FC8F3", - NextDomain: "39P99DCGG0MDLARTCRMCF6OFLLUL7PR6", - }, - name: "asd.com.", - covers: false, - }, - { // outside of zone - rr: &NSEC3{ - Hdr: RR_Header{Name: "39p91242oslggest5e6a7cci4iaeqvnk.nl."}, - Hash: 1, - Flags: 1, - Iterations: 5, - Salt: "F10E9F7EA83FC8F3", - NextDomain: "39P99DCGG0MDLARTCRMCF6OFLLUL7PR6", - }, - name: "asd.com.", - covers: false, - }, - { // empty interval - rr: &NSEC3{ - Hdr: RR_Header{Name: "2n1tb3vairuobl6rkdvii42n9tfmialp.com."}, - Hash: 1, - Flags: 1, - Iterations: 5, - Salt: "F10E9F7EA83FC8F3", - NextDomain: "2N1TB3VAIRUOBL6RKDVII42N9TFMIALP", - }, - name: "asd.com.", - covers: false, - }, - { // name hash is before owner hash, not covered - rr: &NSEC3{ - Hdr: RR_Header{Name: "3V62ULR0NRE83V0RJA2VJGTLIF9V6RAB.com."}, - Hash: 1, - Flags: 1, - Iterations: 5, - Salt: "F10E9F7EA83FC8F3", - NextDomain: "PT3RON8N7PM3A0OE989IB84OOSADP7O8", - }, - name: "asd.com.", - covers: false, - }, - } { - covers := tc.rr.Cover(tc.name) - if tc.covers != covers { - t.Fatalf("cover failed for %s: expected %t, got %t [record: %s]", tc.name, tc.covers, covers, tc.rr) - } - } -} - -func TestNsec3EmptySalt(t *testing.T) { - rr, _ := NewRR("CK0POJMG874LJREF7EFN8430QVIT8BSM.com. 86400 IN NSEC3 1 1 0 - CK0Q1GIN43N1ARRC9OSM6QPQR81H5M9A NS SOA RRSIG DNSKEY NSEC3PARAM") - - if !rr.(*NSEC3).Match("com.") { - t.Fatalf("expected record to match com. label") + // examples taken from .nl + nsec3, _ := NewRR("39p91242oslggest5e6a7cci4iaeqvnk.nl. IN NSEC3 1 1 5 F10E9F7EA83FC8F3 39P99DCGG0MDLARTCRMCF6OFLLUL7PR6 NS DS RRSIG") + if !nsec3.(*NSEC3).Cover("snasajsksasasa.nl.") { // 39p94jrinub66hnpem8qdpstrec86pg3 + t.Error("39p94jrinub66hnpem8qdpstrec86pg3. should be covered by 39p91242oslggest5e6a7cci4iaeqvnk.nl. - 39P99DCGG0MDLARTCRMCF6OFLLUL7PR6") + } + nsec3, _ = NewRR("sk4e8fj94u78smusb40o1n0oltbblu2r.nl. IN NSEC3 1 1 5 F10E9F7EA83FC8F3 SK4F38CQ0ATIEI8MH3RGD0P5I4II6QAN NS SOA TXT RRSIG DNSKEY NSEC3PARAM") + if !nsec3.(*NSEC3).Match("nl.") { // sk4e8fj94u78smusb40o1n0oltbblu2r.nl. + t.Error("sk4e8fj94u78smusb40o1n0oltbblu2r.nl. should match sk4e8fj94u78smusb40o1n0oltbblu2r.nl.") } } diff --git a/vendor/github.com/miekg/dns/parse_test.go b/vendor/github.com/miekg/dns/parse_test.go index efdaf59af14d..4e5b5676d65c 100644 --- a/vendor/github.com/miekg/dns/parse_test.go +++ b/vendor/github.com/miekg/dns/parse_test.go @@ -8,11 +8,11 @@ import ( "math/rand" "net" "reflect" - "regexp" "strconv" "strings" "testing" "testing/quick" + "time" ) func TestDotInName(t *testing.T) { @@ -52,10 +52,14 @@ func TestTooLongDomainName(t *testing.T) { _, err := NewRR(dom + " IN A 127.0.0.1") if err == nil { t.Error("should be too long") + } else { + t.Logf("error is %v", err) } _, err = NewRR("..com. IN A 127.0.0.1") if err == nil { t.Error("should fail") + } else { + t.Logf("error is %v", err) } } @@ -82,7 +86,7 @@ func TestDomainName(t *testing.T) { } func TestDomainNameAndTXTEscapes(t *testing.T) { - tests := []byte{'.', '(', ')', ';', ' ', '@', '"', '\\', 9, 13, 10, 0, 255} + tests := []byte{'.', '(', ')', ';', ' ', '@', '"', '\\', '\t', '\r', '\n', 0, 255} for _, b := range tests { rrbytes := []byte{ 1, b, 0, // owner @@ -98,14 +102,23 @@ func TestDomainNameAndTXTEscapes(t *testing.T) { s := rr1.String() rr2, err := NewRR(s) if err != nil { - t.Errorf("error parsing unpacked RR's string: %v", err) + t.Errorf("Error parsing unpacked RR's string: %v", err) + t.Errorf(" Bytes: %v", rrbytes) + t.Errorf("String: %v", s) } repacked := make([]byte, len(rrbytes)) if _, err := PackRR(rr2, repacked, 0, nil, false); err != nil { t.Errorf("error packing parsed RR: %v", err) + t.Errorf(" original Bytes: %v", rrbytes) + t.Errorf("unpacked Struct: %v", rr1) + t.Errorf(" parsed Struct: %v", rr2) } if !bytes.Equal(repacked, rrbytes) { t.Error("packed bytes don't match original bytes") + t.Errorf(" original bytes: %v", rrbytes) + t.Errorf(" packed bytes: %v", repacked) + t.Errorf("unpacked struct: %v", rr1) + t.Errorf(" parsed struct: %v", rr2) } } } @@ -114,8 +127,8 @@ func TestTXTEscapeParsing(t *testing.T) { test := [][]string{ {`";"`, `";"`}, {`\;`, `";"`}, - {`"\t"`, `"t"`}, - {`"\r"`, `"r"`}, + {`"\t"`, `"\t"`}, + {`"\r"`, `"\r"`}, {`"\ "`, `" "`}, {`"\;"`, `";"`}, {`"\;\""`, `";\""`}, @@ -124,9 +137,8 @@ func TestTXTEscapeParsing(t *testing.T) { {`"(a\)"`, `"(a)"`}, {`"(a)"`, `"(a)"`}, {`"\048"`, `"0"`}, - {`"\` + "\t" + `"`, `"\009"`}, - {`"\` + "\n" + `"`, `"\010"`}, - {`"\` + "\r" + `"`, `"\013"`}, + {`"\` + "\n" + `"`, `"\n"`}, + {`"\` + "\r" + `"`, `"\r"`}, {`"\` + "\x11" + `"`, `"\017"`}, {`"\'"`, `"'"`}, } @@ -145,7 +157,7 @@ func TestTXTEscapeParsing(t *testing.T) { } func GenerateDomain(r *rand.Rand, size int) []byte { - dnLen := size % 70 // artificially limit size so there's less to interpret if a failure occurs + dnLen := size % 70 // artificially limit size so there's less to intrepret if a failure occurs var dn []byte done := false for i := 0; i < dnLen && !done; { @@ -203,7 +215,7 @@ func TestDomainQuick(t *testing.T) { } func GenerateTXT(r *rand.Rand, size int) []byte { - rdLen := size % 300 // artificially limit size so there's less to interpret if a failure occurs + rdLen := size % 300 // artificially limit size so there's less to intrepret if a failure occurs var rd []byte for i := 0; i < rdLen; { max := rdLen - 1 @@ -335,6 +347,8 @@ func TestParseDirectiveMisc(t *testing.T) { } if rr.String() != o { t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) + } else { + t.Logf("RR is OK: `%s'", rr.String()) } } } @@ -346,7 +360,6 @@ func TestNSEC(t *testing.T) { "localhost.dnssex.nl. IN NSEC www.dnssex.nl. A RRSIG NSEC": "localhost.dnssex.nl.\t3600\tIN\tNSEC\twww.dnssex.nl. A RRSIG NSEC", "localhost.dnssex.nl. IN NSEC www.dnssex.nl. A RRSIG NSEC TYPE65534": "localhost.dnssex.nl.\t3600\tIN\tNSEC\twww.dnssex.nl. A RRSIG NSEC TYPE65534", "localhost.dnssex.nl. IN NSEC www.dnssex.nl. A RRSIG NSec Type65534": "localhost.dnssex.nl.\t3600\tIN\tNSEC\twww.dnssex.nl. A RRSIG NSEC TYPE65534", - "44ohaq2njb0idnvolt9ggthvsk1e1uv8.skydns.test. NSEC3 1 0 0 - 44OHAQ2NJB0IDNVOLT9GGTHVSK1E1UVA": "44ohaq2njb0idnvolt9ggthvsk1e1uv8.skydns.test.\t3600\tIN\tNSEC3\t1 0 0 - 44OHAQ2NJB0IDNVOLT9GGTHVSK1E1UVA", } for i, o := range nsectests { rr, err := NewRR(i) @@ -356,6 +369,8 @@ func TestNSEC(t *testing.T) { } if rr.String() != o { t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) + } else { + t.Logf("RR is OK: `%s'", rr.String()) } } } @@ -373,6 +388,8 @@ func TestParseLOC(t *testing.T) { } if rr.String() != o { t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) + } else { + t.Logf("RR is OK: `%s'", rr.String()) } } } @@ -389,6 +406,8 @@ func TestParseDS(t *testing.T) { } if rr.String() != o { t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) + } else { + t.Logf("RR is OK: `%s'", rr.String()) } } } @@ -397,16 +416,16 @@ func TestQuotes(t *testing.T) { tests := map[string]string{ `t.example.com. IN TXT "a bc"`: "t.example.com.\t3600\tIN\tTXT\t\"a bc\"", `t.example.com. IN TXT "a - bc"`: "t.example.com.\t3600\tIN\tTXT\t\"a\\010 bc\"", + bc"`: "t.example.com.\t3600\tIN\tTXT\t\"a\\n bc\"", `t.example.com. IN TXT ""`: "t.example.com.\t3600\tIN\tTXT\t\"\"", `t.example.com. IN TXT "a"`: "t.example.com.\t3600\tIN\tTXT\t\"a\"", `t.example.com. IN TXT "aa"`: "t.example.com.\t3600\tIN\tTXT\t\"aa\"", `t.example.com. IN TXT "aaa" ;`: "t.example.com.\t3600\tIN\tTXT\t\"aaa\"", `t.example.com. IN TXT "abc" "DEF"`: "t.example.com.\t3600\tIN\tTXT\t\"abc\" \"DEF\"", `t.example.com. IN TXT "abc" ( "DEF" )`: "t.example.com.\t3600\tIN\tTXT\t\"abc\" \"DEF\"", - `t.example.com. IN TXT aaa ;`: "t.example.com.\t3600\tIN\tTXT\t\"aaa\"", - `t.example.com. IN TXT aaa aaa;`: "t.example.com.\t3600\tIN\tTXT\t\"aaa\" \"aaa\"", - `t.example.com. IN TXT aaa aaa`: "t.example.com.\t3600\tIN\tTXT\t\"aaa\" \"aaa\"", + `t.example.com. IN TXT aaa ;`: "t.example.com.\t3600\tIN\tTXT\t\"aaa \"", + `t.example.com. IN TXT aaa aaa;`: "t.example.com.\t3600\tIN\tTXT\t\"aaa aaa\"", + `t.example.com. IN TXT aaa aaa`: "t.example.com.\t3600\tIN\tTXT\t\"aaa aaa\"", `t.example.com. IN TXT aaa`: "t.example.com.\t3600\tIN\tTXT\t\"aaa\"", "cid.urn.arpa. NAPTR 100 50 \"s\" \"z3950+I2L+I2C\" \"\" _z3950._tcp.gatech.edu.": "cid.urn.arpa.\t3600\tIN\tNAPTR\t100 50 \"s\" \"z3950+I2L+I2C\" \"\" _z3950._tcp.gatech.edu.", "cid.urn.arpa. NAPTR 100 50 \"s\" \"rcds+I2C\" \"\" _rcds._udp.gatech.edu.": "cid.urn.arpa.\t3600\tIN\tNAPTR\t100 50 \"s\" \"rcds+I2C\" \"\" _rcds._udp.gatech.edu.", @@ -421,6 +440,8 @@ func TestQuotes(t *testing.T) { } if rr.String() != o { t.Errorf("`%s' should be equal to\n`%s', but is\n`%s'", i, o, rr.String()) + } else { + t.Logf("RR is OK: `%s'", rr.String()) } } } @@ -433,7 +454,6 @@ func TestParseClass(t *testing.T) { // ClassANY can not occur in zone files // "t.example.com. ANY A 127.0.0.1": "t.example.com. 3600 ANY A 127.0.0.1", "t.example.com. NONE A 127.0.0.1": "t.example.com. 3600 NONE A 127.0.0.1", - "t.example.com. CLASS255 A 127.0.0.1": "t.example.com. 3600 CLASS255 A 127.0.0.1", } for i, o := range tests { rr, err := NewRR(i) @@ -443,6 +463,8 @@ func TestParseClass(t *testing.T) { } if rr.String() != o { t.Errorf("`%s' should be equal to\n`%s', but is\n`%s'", i, o, rr.String()) + } else { + t.Logf("RR is OK: `%s'", rr.String()) } } } @@ -492,6 +514,8 @@ func TestBrace(t *testing.T) { } if rr.String() != o { t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) + } else { + t.Logf("RR is OK: `%s'", rr.String()) } } } @@ -517,88 +541,109 @@ func TestParseFailure(t *testing.T) { } } -func TestOmittedTTL(t *testing.T) { - zone := ` -$ORIGIN example.com. -example.com. 42 IN SOA ns1.example.com. hostmaster.example.com. 1 86400 60 86400 3600 ; TTL=42 SOA -example.com. NS 2 ; TTL=42 absolute owner name -@ MD 3 ; TTL=42 current-origin owner name - MF 4 ; TTL=42 leading-space implied owner name - 43 TYPE65280 \# 1 05 ; TTL=43 implied owner name explicit TTL - MB 6 ; TTL=43 leading-tab implied owner name -$TTL 1337 -example.com. 88 MG 7 ; TTL=88 explicit TTL -example.com. MR 8 ; TTL=1337 after first $TTL -$TTL 314 - 1 TXT 9 ; TTL=1 implied owner name explicit TTL -example.com. DNAME 10 ; TTL=314 after second $TTL +func TestZoneParsing(t *testing.T) { + // parse_test.db + db := ` +a.example.com. IN A 127.0.0.1 +8db7._openpgpkey.example.com. IN OPENPGPKEY mQCNAzIG +$ORIGIN a.example.com. +test IN A 127.0.0.1 + IN SSHFP 1 2 ( + BC6533CDC95A79078A39A56EA7635984ED655318ADA9 + B6159E30723665DA95BB ) +$ORIGIN b.example.com. +test IN CNAME test.a.example.com. ` - reCaseFromComment := regexp.MustCompile(`TTL=(\d+)\s+(.*)`) - records := ParseZone(strings.NewReader(zone), "", "") + start := time.Now().UnixNano() + to := ParseZone(strings.NewReader(db), "", "parse_test.db") var i int - for record := range records { + for x := range to { i++ - if record.Error != nil { - t.Error(record.Error) + if x.Error != nil { + t.Error(x.Error) continue } - expected := reCaseFromComment.FindStringSubmatch(record.Comment) - expectedTTL, _ := strconv.ParseUint(expected[1], 10, 32) - ttl := record.RR.Header().Ttl - if ttl != uint32(expectedTTL) { - t.Errorf("%s: expected TTL %d, got %d", expected[2], expectedTTL, ttl) - } - } - if i != 10 { - t.Errorf("expected %d records, got %d", 5, i) + t.Log(x.RR) } + delta := time.Now().UnixNano() - start + t.Logf("%d RRs parsed in %.2f s (%.2f RR/s)", i, float32(delta)/1e9, float32(i)/(float32(delta)/1e9)) } -func TestRelativeNameErrors(t *testing.T) { - var badZones = []struct { - label string - zoneContents string - expectedErr string - }{ - { - "relative owner name without origin", - "example.com 3600 IN SOA ns.example.com. hostmaster.example.com. 1 86400 60 86400 3600", - "bad owner name", - }, - { - "relative owner name in RDATA", - "example.com. 3600 IN SOA ns hostmaster 1 86400 60 86400 3600", - "bad SOA Ns", - }, - { - "origin reference without origin", - "@ 3600 IN SOA ns.example.com. hostmaster.example.com. 1 86400 60 86400 3600", - "bad owner name", - }, - { - "relative owner name in $INCLUDE", - "$INCLUDE file.db example.com", - "bad origin name", - }, - { - "relative owner name in $ORIGIN", - "$ORIGIN example.com", - "bad origin name", - }, - } - for _, errorCase := range badZones { - entries := ParseZone(strings.NewReader(errorCase.zoneContents), "", "") - for entry := range entries { - if entry.Error == nil { - t.Errorf("%s: expected error, got nil", errorCase.label) - continue - } - err := entry.Error.err - if err != errorCase.expectedErr { - t.Errorf("%s: expected error `%s`, got `%s`", errorCase.label, errorCase.expectedErr, err) - } - } +func ExampleParseZone() { + zone := `$ORIGIN . +$TTL 3600 ; 1 hour +name IN SOA a6.nstld.com. hostmaster.nic.name. ( + 203362132 ; serial + 300 ; refresh (5 minutes) + 300 ; retry (5 minutes) + 1209600 ; expire (2 weeks) + 300 ; minimum (5 minutes) + ) +$TTL 10800 ; 3 hours +name. 10800 IN NS name. + IN NS g6.nstld.com. + 7200 NS h6.nstld.com. + 3600 IN NS j6.nstld.com. + IN 3600 NS k6.nstld.com. + NS l6.nstld.com. + NS a6.nstld.com. + NS c6.nstld.com. + NS d6.nstld.com. + NS f6.nstld.com. + NS m6.nstld.com. +( + NS m7.nstld.com. +) +$ORIGIN name. +0-0onlus NS ns7.ehiweb.it. + NS ns8.ehiweb.it. +0-g MX 10 mx01.nic + MX 10 mx02.nic + MX 10 mx03.nic + MX 10 mx04.nic +$ORIGIN 0-g.name +moutamassey NS ns01.yahoodomains.jp. + NS ns02.yahoodomains.jp. +` + to := ParseZone(strings.NewReader(zone), "", "testzone") + for x := range to { + fmt.Println(x.RR) + } + // Output: + // name. 3600 IN SOA a6.nstld.com. hostmaster.nic.name. 203362132 300 300 1209600 300 + // name. 10800 IN NS name. + // name. 10800 IN NS g6.nstld.com. + // name. 7200 IN NS h6.nstld.com. + // name. 3600 IN NS j6.nstld.com. + // name. 3600 IN NS k6.nstld.com. + // name. 10800 IN NS l6.nstld.com. + // name. 10800 IN NS a6.nstld.com. + // name. 10800 IN NS c6.nstld.com. + // name. 10800 IN NS d6.nstld.com. + // name. 10800 IN NS f6.nstld.com. + // name. 10800 IN NS m6.nstld.com. + // name. 10800 IN NS m7.nstld.com. + // 0-0onlus.name. 10800 IN NS ns7.ehiweb.it. + // 0-0onlus.name. 10800 IN NS ns8.ehiweb.it. + // 0-g.name. 10800 IN MX 10 mx01.nic.name. + // 0-g.name. 10800 IN MX 10 mx02.nic.name. + // 0-g.name. 10800 IN MX 10 mx03.nic.name. + // 0-g.name. 10800 IN MX 10 mx04.nic.name. + // moutamassey.0-g.name.name. 10800 IN NS ns01.yahoodomains.jp. + // moutamassey.0-g.name.name. 10800 IN NS ns02.yahoodomains.jp. +} + +func ExampleHIP() { + h := `www.example.com IN HIP ( 2 200100107B1A74DF365639CC39F1D578 + AwEAAbdxyhNuSutc5EMzxTs9LBPCIkOFH8cIvM4p +9+LrV4e19WzK00+CI6zBCQTdtWsuxKbWIy87UOoJTwkUs7lBu+Upr1gsNrut79ryra+bSRGQ +b1slImA8YVJyuIDsj7kwzG7jnERNqnWxZ48AWkskmdHaVDP4BcelrTI3rMXdXF5D + rvs.example.com. )` + if hip, err := NewRR(h); err == nil { + fmt.Println(hip.String()) } + // Output: + // www.example.com. 3600 IN HIP 2 200100107B1A74DF365639CC39F1D578 AwEAAbdxyhNuSutc5EMzxTs9LBPCIkOFH8cIvM4p9+LrV4e19WzK00+CI6zBCQTdtWsuxKbWIy87UOoJTwkUs7lBu+Upr1gsNrut79ryra+bSRGQb1slImA8YVJyuIDsj7kwzG7jnERNqnWxZ48AWkskmdHaVDP4BcelrTI3rMXdXF5D rvs.example.com. } func TestHIP(t *testing.T) { @@ -612,6 +657,7 @@ b1slImA8YVJyuIDsj7kwzG7jnERNqnWxZ48AWkskmdHaVDP4BcelrTI3rMXdXF5D if err != nil { t.Fatalf("failed to parse RR: %v", err) } + t.Logf("RR: %s", rr) msg := new(Msg) msg.Answer = []RR{rr, rr} bytes, err := msg.Pack() @@ -626,6 +672,7 @@ b1slImA8YVJyuIDsj7kwzG7jnERNqnWxZ48AWkskmdHaVDP4BcelrTI3rMXdXF5D } for i, rr := range msg.Answer { rr := rr.(*HIP) + t.Logf("RR: %s", rr) if l := len(rr.RendezvousServers); l != 2 { t.Fatalf("2 servers expected, only %d in record %d:\n%v", l, i, msg) } @@ -637,6 +684,24 @@ b1slImA8YVJyuIDsj7kwzG7jnERNqnWxZ48AWkskmdHaVDP4BcelrTI3rMXdXF5D } } +func ExampleSOA() { + s := "example.com. 1000 SOA master.example.com. admin.example.com. 1 4294967294 4294967293 4294967295 100" + if soa, err := NewRR(s); err == nil { + fmt.Println(soa.String()) + } + // Output: + // example.com. 1000 IN SOA master.example.com. admin.example.com. 1 4294967294 4294967293 4294967295 100 +} + +func TestLineNumberError(t *testing.T) { + s := "example.com. 1000 SOA master.example.com. admin.example.com. monkey 4294967294 4294967293 4294967295 100" + if _, err := NewRR(s); err != nil { + if err.Error() != "dns: bad SOA zone parameter: \"monkey\" at line: 1:68" { + t.Error("not expecting this error: ", err) + } + } +} + // Test with no known RR on the line func TestLineNumberError2(t *testing.T) { tests := map[string]string{ @@ -734,6 +799,28 @@ func TestLowercaseTokens(t *testing.T) { } } +func ExampleParseZone_generate() { + // From the manual: http://www.bind9.net/manual/bind/9.3.2/Bv9ARM.ch06.html#id2566761 + zone := "$GENERATE 1-2 0 NS SERVER$.EXAMPLE.\n$GENERATE 1-8 $ CNAME $.0" + to := ParseZone(strings.NewReader(zone), "0.0.192.IN-ADDR.ARPA.", "") + for x := range to { + if x.Error == nil { + fmt.Println(x.RR.String()) + } + } + // Output: + // 0.0.0.192.IN-ADDR.ARPA. 3600 IN NS SERVER1.EXAMPLE. + // 0.0.0.192.IN-ADDR.ARPA. 3600 IN NS SERVER2.EXAMPLE. + // 1.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 1.0.0.0.192.IN-ADDR.ARPA. + // 2.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 2.0.0.0.192.IN-ADDR.ARPA. + // 3.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 3.0.0.0.192.IN-ADDR.ARPA. + // 4.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 4.0.0.0.192.IN-ADDR.ARPA. + // 5.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 5.0.0.0.192.IN-ADDR.ARPA. + // 6.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 6.0.0.0.192.IN-ADDR.ARPA. + // 7.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 7.0.0.0.192.IN-ADDR.ARPA. + // 8.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 8.0.0.0.192.IN-ADDR.ARPA. +} + func TestSRVPacking(t *testing.T) { msg := Msg{} @@ -747,7 +834,11 @@ func TestSRVPacking(t *testing.T) { if err != nil { continue } - port, _ := strconv.ParseUint(p, 10, 16) + port := 8484 + tmp, err := strconv.Atoi(p) + if err == nil { + port = tmp + } rr := &SRV{ Hdr: RR_Header{Name: "somename.", @@ -770,14 +861,20 @@ func TestSRVPacking(t *testing.T) { } func TestParseBackslash(t *testing.T) { - if _, err := NewRR("nul\\000gap.test.globnix.net. 600 IN A 192.0.2.10"); err != nil { + if r, err := NewRR("nul\\000gap.test.globnix.net. 600 IN A 192.0.2.10"); err != nil { t.Errorf("could not create RR with \\000 in it") + } else { + t.Logf("parsed %s", r.String()) } - if _, err := NewRR(`nul\000gap.test.globnix.net. 600 IN TXT "Hello\123"`); err != nil { + if r, err := NewRR(`nul\000gap.test.globnix.net. 600 IN TXT "Hello\123"`); err != nil { t.Errorf("could not create RR with \\000 in it") + } else { + t.Logf("parsed %s", r.String()) } - if _, err := NewRR(`m\ @\ iek.nl. IN 3600 A 127.0.0.1`); err != nil { + if r, err := NewRR(`m\ @\ iek.nl. IN 3600 A 127.0.0.1`); err != nil { t.Errorf("could not create RR with \\ and \\@ in it") + } else { + t.Logf("parsed %s", r.String()) } } @@ -824,6 +921,8 @@ func TestGposEidNimloc(t *testing.T) { } if rr.String() != o { t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) + } else { + t.Logf("RR is OK: `%s'", rr.String()) } } } @@ -841,6 +940,8 @@ func TestPX(t *testing.T) { } if rr.String() != o { t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) + } else { + t.Logf("RR is OK: `%s'", rr.String()) } } } @@ -986,6 +1087,7 @@ func TestTXT(t *testing.T) { if rr.(*TXT).Txt[1] != "b" { t.Errorf("Txt should have two chunk, last one my be 'b', but is %s", rr.(*TXT).Txt[1]) } + t.Log(rr.String()) } func TestTypeXXXX(t *testing.T) { @@ -1027,6 +1129,7 @@ func TestDigit(t *testing.T) { t.Fatalf("failed to parse %v", err) } PackRR(r, buf, 0, nil, false) + t.Log(buf) if buf[5] != i { t.Fatalf("5 pos must be %d, is %d", i, buf[5]) } @@ -1059,6 +1162,7 @@ func TestTxtEqual(t *testing.T) { // This is not an error, but keep this test. t.Errorf("these two TXT records should match:\n%s\n%s", rr1.String(), rr2.String()) } + t.Logf("%s\n%s", rr1.String(), rr2.String()) } func TestTxtLong(t *testing.T) { @@ -1085,8 +1189,12 @@ func TestMalformedPackets(t *testing.T) { // com = 63 6f 6d for _, packet := range packets { data, _ := hex.DecodeString(packet) + // for _, v := range data { + // t.Log(v) + // } var msg Msg msg.Unpack(data) + // println(msg.String()) } } @@ -1105,7 +1213,6 @@ func TestNewPrivateKey(t *testing.T) { {RSASHA1, 1024}, {RSASHA256, 2048}, {DSA, 1024}, - {ED25519, 256}, } for _, algo := range algorithms { @@ -1244,6 +1351,7 @@ func TestParseTokenOverflow(t *testing.T) { if err == nil { t.Fatalf("token overflow should return an error") } + t.Logf("err: %s\n", err) } func TestParseTLSA(t *testing.T) { @@ -1260,25 +1368,8 @@ func TestParseTLSA(t *testing.T) { } if rr.String() != o { t.Errorf("`%s' should be equal to\n`%s', but is `%s'", o, o, rr.String()) - } - } -} - -func TestParseSMIMEA(t *testing.T) { - lt := map[string]string{ - "2e85e1db3e62be6ea._smimecert.example.com.\t3600\tIN\tSMIMEA\t1 1 2 bd80f334566928fc18f58df7e4928c1886f48f71ca3fd41cd9b1854aca7c2180aaacad2819612ed68e7bd3701cc39be7f2529b017c0bc6a53e8fb3f0c7d48070": "2e85e1db3e62be6ea._smimecert.example.com.\t3600\tIN\tSMIMEA\t1 1 2 bd80f334566928fc18f58df7e4928c1886f48f71ca3fd41cd9b1854aca7c2180aaacad2819612ed68e7bd3701cc39be7f2529b017c0bc6a53e8fb3f0c7d48070", - "2e85e1db3e62be6ea._smimecert.example.com.\t3600\tIN\tSMIMEA\t0 0 1 cdcf0fc66b182928c5217ddd42c826983f5a4b94160ee6c1c9be62d38199f710": "2e85e1db3e62be6ea._smimecert.example.com.\t3600\tIN\tSMIMEA\t0 0 1 cdcf0fc66b182928c5217ddd42c826983f5a4b94160ee6c1c9be62d38199f710", - "2e85e1db3e62be6ea._smimecert.example.com.\t3600\tIN\tSMIMEA\t3 0 2 499a1eda2af8828b552cdb9d80c3744a25872fddd73f3898d8e4afa3549595d2dd4340126e759566fe8c26b251fa0c887ba4869f011a65f7e79967c2eb729f5b": "2e85e1db3e62be6ea._smimecert.example.com.\t3600\tIN\tSMIMEA\t3 0 2 499a1eda2af8828b552cdb9d80c3744a25872fddd73f3898d8e4afa3549595d2dd4340126e759566fe8c26b251fa0c887ba4869f011a65f7e79967c2eb729f5b", - "2e85e1db3e62be6eb._smimecert.example.com.\t3600\tIN\tSMIMEA\t3 0 2 499a1eda2af8828b552cdb9d80c3744a25872fddd73f3898d8e4afa3549595d2dd4340126e759566fe8 c26b251fa0c887ba4869f01 1a65f7e79967c2eb729f5b": "2e85e1db3e62be6eb._smimecert.example.com.\t3600\tIN\tSMIMEA\t3 0 2 499a1eda2af8828b552cdb9d80c3744a25872fddd73f3898d8e4afa3549595d2dd4340126e759566fe8c26b251fa0c887ba4869f011a65f7e79967c2eb729f5b", - } - for i, o := range lt { - rr, err := NewRR(i) - if err != nil { - t.Error("failed to parse RR: ", err) - continue - } - if rr.String() != o { - t.Errorf("`%s' should be equal to\n`%s', but is `%s'", o, o, rr.String()) + } else { + t.Logf("RR is OK: `%s'", rr.String()) } } } @@ -1299,6 +1390,8 @@ func TestParseSSHFP(t *testing.T) { } if rr.String() != result { t.Errorf("`%s' should be equal to\n\n`%s', but is \n`%s'", o, result, rr.String()) + } else { + t.Logf("RR is OK: `%s'", rr.String()) } } } @@ -1322,6 +1415,8 @@ func TestParseHINFO(t *testing.T) { } if rr.String() != o { t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) + } else { + t.Logf("RR is OK: `%s'", rr.String()) } } } @@ -1342,6 +1437,8 @@ func TestParseCAA(t *testing.T) { } if rr.String() != o { t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) + } else { + t.Logf("RR is OK: `%s'", rr.String()) } } } @@ -1388,78 +1485,8 @@ func TestParseURI(t *testing.T) { } if rr.String() != o { t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) - } - } -} - -func TestParseAVC(t *testing.T) { - avcs := map[string]string{ - `example.org. IN AVC "app-name:WOLFGANG|app-class:OAM|business=yes"`: `example.org. 3600 IN AVC "app-name:WOLFGANG|app-class:OAM|business=yes"`, - } - for avc, o := range avcs { - rr, err := NewRR(avc) - if err != nil { - t.Error("failed to parse RR: ", err) - continue - } - if rr.String() != o { - t.Errorf("`%s' should be equal to\n`%s', but is `%s'", avc, o, rr.String()) - } - } -} - -func TestParseCSYNC(t *testing.T) { - syncs := map[string]string{ - `example.com. 3600 IN CSYNC 66 3 A NS AAAA`: `example.com. 3600 IN CSYNC 66 3 A NS AAAA`, - } - for s, o := range syncs { - rr, err := NewRR(s) - if err != nil { - t.Error("failed to parse RR: ", err) - continue - } - if rr.String() != o { - t.Errorf("`%s' should be equal to\n`%s', but is `%s'", s, o, rr.String()) - } - } -} - -func TestParseBadNAPTR(t *testing.T) { - // Should look like: mplus.ims.vodafone.com. 3600 IN NAPTR 10 100 "S" "SIP+D2U" "" _sip._udp.mplus.ims.vodafone.com. - naptr := `mplus.ims.vodafone.com. 3600 IN NAPTR 10 100 S SIP+D2U _sip._udp.mplus.ims.vodafone.com.` - _, err := NewRR(naptr) // parse fails, we should not have leaked a goroutine. - if err == nil { - t.Fatalf("parsing NAPTR should have failed: %s", naptr) - } - if err := goroutineLeaked(); err != nil { - t.Errorf("leaked goroutines: %s", err) - } -} - -func TestUnbalancedParens(t *testing.T) { - sig := `example.com. 3600 IN RRSIG MX 15 2 3600 ( - 1440021600 1438207200 3613 example.com. ( - oL9krJun7xfBOIWcGHi7mag5/hdZrKWw15jPGrHpjQeRAvTdszaPD+QLs3f - x8A4M3e23mRZ9VrbpMngwcrqNAg== )` - _, err := NewRR(sig) - if err == nil { - t.Fatalf("failed to detect extra opening brace") - } -} - -func TestBad(t *testing.T) { - tests := []string{ - `" TYPE257 9 1E12\x00\x105"`, - `" TYPE256 9 5"`, - `" TYPE257 0\"00000000000000400000000000000000000\x00\x10000000000000000000000000000000000 9 l\x16\x01\x005266"`, - } - for i := range tests { - s, err := strconv.Unquote(tests[i]) - if err != nil { - t.Fatalf("failed to unquote: %q: %s", tests[i], err) - } - if _, err = NewRR(s); err == nil { - t.Errorf("correctly parsed %q", s) + } else { + t.Logf("RR is OK: `%s'", rr.String()) } } } diff --git a/vendor/github.com/miekg/dns/privaterr.go b/vendor/github.com/miekg/dns/privaterr.go index 41989e7aee7f..6b08e6e9592e 100644 --- a/vendor/github.com/miekg/dns/privaterr.go +++ b/vendor/github.com/miekg/dns/privaterr.go @@ -56,7 +56,8 @@ func (r *PrivateRR) len() int { return r.Hdr.len() + r.Data.Len() } func (r *PrivateRR) copy() RR { // make new RR like this: rr := mkPrivateRR(r.Hdr.Rrtype) - rr.Hdr = r.Hdr + newh := r.Hdr.copyHeader() + rr.Hdr = *newh err := r.Data.Copy(rr.Data) if err != nil { diff --git a/vendor/github.com/miekg/dns/privaterr_test.go b/vendor/github.com/miekg/dns/privaterr_test.go index fa5ff48fa6ad..5f177aa47e8d 100644 --- a/vendor/github.com/miekg/dns/privaterr_test.go +++ b/vendor/github.com/miekg/dns/privaterr_test.go @@ -7,7 +7,7 @@ import ( "github.com/miekg/dns" ) -const TypeISBN uint16 = 0xFF00 +const TypeISBN uint16 = 0x0F01 // A crazy new RR type :) type ISBN struct { @@ -59,6 +59,8 @@ func TestPrivateText(t *testing.T) { } if rr.String() != testrecord { t.Errorf("record string representation did not match original %#v != %#v", rr.String(), testrecord) + } else { + t.Log(rr.String()) } } @@ -94,10 +96,12 @@ func TestPrivateByteSlice(t *testing.T) { if rr1.String() != testrecord { t.Errorf("record string representation did not match original %#v != %#v", rr1.String(), testrecord) + } else { + t.Log(rr1.String()) } } -const TypeVERSION uint16 = 0xFF01 +const TypeVERSION uint16 = 0x0F02 type VERSION struct { x string @@ -139,7 +143,7 @@ func (rd *VERSION) Len() int { } var smallzone = `$ORIGIN example.org. -@ 3600 IN SOA sns.dns.icann.org. noc.dns.icann.org. ( +@ SOA sns.dns.icann.org. noc.dns.icann.org. ( 2014091518 7200 3600 1209600 3600 ) A 1.2.3.4 @@ -162,5 +166,6 @@ func TestPrivateZoneParser(t *testing.T) { if err := x.Error; err != nil { t.Fatal(err) } + t.Log(x.RR) } } diff --git a/vendor/github.com/miekg/dns/reverse.go b/vendor/github.com/miekg/dns/reverse.go index f6e7a47a6e8a..099dac9486d5 100644 --- a/vendor/github.com/miekg/dns/reverse.go +++ b/vendor/github.com/miekg/dns/reverse.go @@ -6,10 +6,10 @@ var StringToType = reverseInt16(TypeToString) // StringToClass is the reverse of ClassToString, needed for string parsing. var StringToClass = reverseInt16(ClassToString) -// StringToOpcode is a map of opcodes to strings. +// Map of opcodes strings. var StringToOpcode = reverseInt(OpcodeToString) -// StringToRcode is a map of rcodes to strings. +// Map of rcodes strings. var StringToRcode = reverseInt(RcodeToString) // Reverse a map diff --git a/vendor/github.com/miekg/dns/rr_test.go b/vendor/github.com/miekg/dns/rr_test.go deleted file mode 100644 index 77a153b1e7e4..000000000000 --- a/vendor/github.com/miekg/dns/rr_test.go +++ /dev/null @@ -1,7 +0,0 @@ -package dns - -// testRR returns the RR from string s. The error is thrown away. -func testRR(s string) RR { - r, _ := NewRR(s) - return r -} diff --git a/vendor/github.com/miekg/dns/sanitize.go b/vendor/github.com/miekg/dns/sanitize.go index c415bdd6c36a..b489f3f050bf 100644 --- a/vendor/github.com/miekg/dns/sanitize.go +++ b/vendor/github.com/miekg/dns/sanitize.go @@ -3,7 +3,7 @@ package dns // Dedup removes identical RRs from rrs. It preserves the original ordering. // The lowest TTL of any duplicates is used in the remaining one. Dedup modifies // rrs. -// m is used to store the RRs temporary. If it is nil a new map will be allocated. +// m is used to store the RRs temporay. If it is nil a new map will be allocated. func Dedup(rrs []RR, m map[string]RR) []RR { if m == nil { m = make(map[string]RR) diff --git a/vendor/github.com/miekg/dns/sanitize_test.go b/vendor/github.com/miekg/dns/sanitize_test.go index b2c0e2081708..c108dc694a9e 100644 --- a/vendor/github.com/miekg/dns/sanitize_test.go +++ b/vendor/github.com/miekg/dns/sanitize_test.go @@ -3,36 +3,37 @@ package dns import "testing" func TestDedup(t *testing.T) { + // make it []string testcases := map[[3]RR][]string{ [...]RR{ - testRR("mIek.nl. IN A 127.0.0.1"), - testRR("mieK.nl. IN A 127.0.0.1"), - testRR("miek.Nl. IN A 127.0.0.1"), + newRR(t, "mIek.nl. IN A 127.0.0.1"), + newRR(t, "mieK.nl. IN A 127.0.0.1"), + newRR(t, "miek.Nl. IN A 127.0.0.1"), }: {"mIek.nl.\t3600\tIN\tA\t127.0.0.1"}, [...]RR{ - testRR("miEk.nl. 2000 IN A 127.0.0.1"), - testRR("mieK.Nl. 1000 IN A 127.0.0.1"), - testRR("Miek.nL. 500 IN A 127.0.0.1"), + newRR(t, "miEk.nl. 2000 IN A 127.0.0.1"), + newRR(t, "mieK.Nl. 1000 IN A 127.0.0.1"), + newRR(t, "Miek.nL. 500 IN A 127.0.0.1"), }: {"miEk.nl.\t500\tIN\tA\t127.0.0.1"}, [...]RR{ - testRR("miek.nl. IN A 127.0.0.1"), - testRR("miek.nl. CH A 127.0.0.1"), - testRR("miek.nl. IN A 127.0.0.1"), + newRR(t, "miek.nl. IN A 127.0.0.1"), + newRR(t, "miek.nl. CH A 127.0.0.1"), + newRR(t, "miek.nl. IN A 127.0.0.1"), }: {"miek.nl.\t3600\tIN\tA\t127.0.0.1", "miek.nl.\t3600\tCH\tA\t127.0.0.1", }, [...]RR{ - testRR("miek.nl. CH A 127.0.0.1"), - testRR("miek.nl. IN A 127.0.0.1"), - testRR("miek.de. IN A 127.0.0.1"), + newRR(t, "miek.nl. CH A 127.0.0.1"), + newRR(t, "miek.nl. IN A 127.0.0.1"), + newRR(t, "miek.de. IN A 127.0.0.1"), }: {"miek.nl.\t3600\tCH\tA\t127.0.0.1", "miek.nl.\t3600\tIN\tA\t127.0.0.1", "miek.de.\t3600\tIN\tA\t127.0.0.1", }, [...]RR{ - testRR("miek.de. IN A 127.0.0.1"), - testRR("miek.nl. 200 IN A 127.0.0.1"), - testRR("miek.nl. 300 IN A 127.0.0.1"), + newRR(t, "miek.de. IN A 127.0.0.1"), + newRR(t, "miek.nl. 200 IN A 127.0.0.1"), + newRR(t, "miek.nl. 300 IN A 127.0.0.1"), }: {"miek.de.\t3600\tIN\tA\t127.0.0.1", "miek.nl.\t200\tIN\tA\t127.0.0.1", }, @@ -50,9 +51,9 @@ func TestDedup(t *testing.T) { func BenchmarkDedup(b *testing.B) { rrs := []RR{ - testRR("miEk.nl. 2000 IN A 127.0.0.1"), - testRR("mieK.Nl. 1000 IN A 127.0.0.1"), - testRR("Miek.nL. 500 IN A 127.0.0.1"), + newRR(nil, "miEk.nl. 2000 IN A 127.0.0.1"), + newRR(nil, "mieK.Nl. 1000 IN A 127.0.0.1"), + newRR(nil, "Miek.nL. 500 IN A 127.0.0.1"), } m := make(map[string]RR) for i := 0; i < b.N; i++ { @@ -62,9 +63,9 @@ func BenchmarkDedup(b *testing.B) { func TestNormalizedString(t *testing.T) { tests := map[RR]string{ - testRR("mIEk.Nl. 3600 IN A 127.0.0.1"): "miek.nl.\tIN\tA\t127.0.0.1", - testRR("m\\ iek.nL. 3600 IN A 127.0.0.1"): "m\\ iek.nl.\tIN\tA\t127.0.0.1", - testRR("m\\\tIeK.nl. 3600 in A 127.0.0.1"): "m\\009iek.nl.\tIN\tA\t127.0.0.1", + newRR(t, "mIEk.Nl. 3600 IN A 127.0.0.1"): "miek.nl.\tIN\tA\t127.0.0.1", + newRR(t, "m\\ iek.nL. 3600 IN A 127.0.0.1"): "m\\ iek.nl.\tIN\tA\t127.0.0.1", + newRR(t, "m\\\tIeK.nl. 3600 in A 127.0.0.1"): "m\\tiek.nl.\tIN\tA\t127.0.0.1", } for tc, expected := range tests { n := normalizedString(tc) @@ -73,3 +74,11 @@ func TestNormalizedString(t *testing.T) { } } } + +func newRR(t *testing.T, s string) RR { + r, err := NewRR(s) + if err != nil { + t.Logf("newRR: %v", err) + } + return r +} diff --git a/vendor/github.com/miekg/dns/scan.go b/vendor/github.com/miekg/dns/scan.go index f9cd47401d1d..0e83797fb597 100644 --- a/vendor/github.com/miekg/dns/scan.go +++ b/vendor/github.com/miekg/dns/scan.go @@ -1,14 +1,23 @@ package dns import ( - "fmt" "io" + "log" "os" - "path/filepath" "strconv" "strings" ) +type debugging bool + +const debug debugging = false + +func (d debugging) Printf(format string, args ...interface{}) { + if d { + log.Printf(format, args...) + } +} + const maxTok = 2048 // Largest token we can return. const maxUint16 = 1<<16 - 1 @@ -29,7 +38,7 @@ const ( zOwner zClass zDirOrigin // $ORIGIN - zDirTTL // $TTL + zDirTtl // $TTL zDirInclude // $INCLUDE zDirGenerate // $GENERATE @@ -42,13 +51,13 @@ const ( zExpectAny // Expect rrtype, ttl or class zExpectAnyNoClass // Expect rrtype or ttl zExpectAnyNoClassBl // The whitespace after _EXPECT_ANY_NOCLASS - zExpectAnyNoTTL // Expect rrtype or class - zExpectAnyNoTTLBl // Whitespace after _EXPECT_ANY_NOTTL + zExpectAnyNoTtl // Expect rrtype or class + zExpectAnyNoTtlBl // Whitespace after _EXPECT_ANY_NOTTL zExpectRrtype // Expect rrtype zExpectRrtypeBl // Whitespace BEFORE rrtype zExpectRdata // The first element of the rdata - zExpectDirTTLBl // Space after directive $TTL - zExpectDirTTL // Directive $TTL + zExpectDirTtlBl // Space after directive $TTL + zExpectDirTtl // Directive $TTL zExpectDirOriginBl // Space after directive $ORIGIN zExpectDirOrigin // Directive $ORIGIN zExpectDirIncludeBl // Space after directive $INCLUDE @@ -96,12 +105,6 @@ type Token struct { Comment string } -// ttlState describes the state necessary to fill in an omitted RR TTL -type ttlState struct { - ttl uint32 // ttl is the current default TTL - isByDirective bool // isByDirective indicates whether ttl was set by a $TTL directive -} - // NewRR reads the RR contained in the string s. Only the first RR is // returned. If s contains no RR, return nil with no error. The class // defaults to IN and TTL defaults to 3600. The full zone file syntax @@ -117,8 +120,7 @@ func NewRR(s string) (RR, error) { // ReadRR reads the RR contained in q. // See NewRR for more documentation. func ReadRR(q io.Reader, filename string) (RR, error) { - defttl := &ttlState{defaultTtl, false} - r := <-parseZoneHelper(q, ".", filename, defttl, 1) + r := <-parseZoneHelper(q, ".", filename, 1) if r == nil { return nil, nil } @@ -130,10 +132,10 @@ func ReadRR(q io.Reader, filename string) (RR, error) { } // ParseZone reads a RFC 1035 style zonefile from r. It returns *Tokens on the -// returned channel, each consisting of either a parsed RR and optional comment -// or a nil RR and an error. The string file is only used +// returned channel, which consist out the parsed RR, a potential comment or an error. +// If there is an error the RR is nil. The string file is only used // in error reporting. The string origin is used as the initial origin, as -// if the file would start with an $ORIGIN directive. +// if the file would start with: $ORIGIN origin . // The directives $INCLUDE, $ORIGIN, $TTL and $GENERATE are supported. // The channel t is closed by ParseZone when the end of r is reached. // @@ -155,37 +157,25 @@ func ReadRR(q io.Reader, filename string) (RR, error) { // The text "; this is comment" is returned in Token.Comment. Comments inside the // RR are discarded. Comments on a line by themselves are discarded too. func ParseZone(r io.Reader, origin, file string) chan *Token { - return parseZoneHelper(r, origin, file, nil, 10000) + return parseZoneHelper(r, origin, file, 10000) } -func parseZoneHelper(r io.Reader, origin, file string, defttl *ttlState, chansize int) chan *Token { +func parseZoneHelper(r io.Reader, origin, file string, chansize int) chan *Token { t := make(chan *Token, chansize) - go parseZone(r, origin, file, defttl, t, 0) + go parseZone(r, origin, file, t, 0) return t } -func parseZone(r io.Reader, origin, f string, defttl *ttlState, t chan *Token, include int) { +func parseZone(r io.Reader, origin, f string, t chan *Token, include int) { defer func() { if include == 0 { close(t) } }() - s, cancel := scanInit(r) + s := scanInit(r) c := make(chan lex) // Start the lexer go zlexer(s, c) - - defer func() { - cancel() - // zlexer can send up to three tokens, the next one and possibly 2 remainders. - // Do a non-blocking read. - _, ok := <-c - _, ok = <-c - _, ok = <-c - if !ok { - // too bad - } - }() // 6 possible beginnings of a line, _ is a space // 0. zRRTYPE -> all omitted until the rrtype // 1. zOwner _ zRrtype -> class/ttl omitted @@ -196,16 +186,18 @@ func parseZone(r io.Reader, origin, f string, defttl *ttlState, t chan *Token, i // After detecting these, we know the zRrtype so we can jump to functions // handling the rdata for each of these types. - if origin != "" { - origin = Fqdn(origin) - if _, ok := IsDomainName(origin); !ok { - t <- &Token{Error: &ParseError{f, "bad initial origin name", lex{}}} - return - } + if origin == "" { + origin = "." + } + origin = Fqdn(origin) + if _, ok := IsDomainName(origin); !ok { + t <- &Token{Error: &ParseError{f, "bad initial origin name", lex{}}} + return } st := zExpectOwnerDir // initial state var h RR_Header + var defttl uint32 = defaultTtl var prevName string for l := range c { // Lexer spotted an error already @@ -217,25 +209,31 @@ func parseZone(r io.Reader, origin, f string, defttl *ttlState, t chan *Token, i switch st { case zExpectOwnerDir: // We can also expect a directive, like $TTL or $ORIGIN - if defttl != nil { - h.Ttl = defttl.ttl - } + h.Ttl = defttl h.Class = ClassINET switch l.value { case zNewline: st = zExpectOwnerDir case zOwner: h.Name = l.token - name, ok := toAbsoluteName(l.token, origin) + if l.token[0] == '@' { + h.Name = origin + prevName = h.Name + st = zExpectOwnerBl + break + } + if h.Name[l.length-1] != '.' { + h.Name = appendOrigin(h.Name, origin) + } + _, ok := IsDomainName(l.token) if !ok { t <- &Token{Error: &ParseError{f, "bad owner name", l}} return } - h.Name = name prevName = h.Name st = zExpectOwnerBl - case zDirTTL: - st = zExpectDirTTLBl + case zDirTtl: + st = zExpectDirTtlBl case zDirOrigin: st = zExpectDirOriginBl case zDirInclude: @@ -254,16 +252,15 @@ func parseZone(r io.Reader, origin, f string, defttl *ttlState, t chan *Token, i // Discard, can happen when there is nothing on the // line except the RR type case zString: - ttl, ok := stringToTTL(l.token) + ttl, ok := stringToTtl(l.token) if !ok { t <- &Token{Error: &ParseError{f, "not a TTL", l}} return } h.Ttl = ttl - if defttl == nil || !defttl.isByDirective { - defttl = &ttlState{ttl, false} - } - st = zExpectAnyNoTTLBl + // Don't about the defttl, we should take the $TTL value + // defttl = ttl + st = zExpectAnyNoTtlBl default: t <- &Token{Error: &ParseError{f, "syntax error at beginning", l}} @@ -281,16 +278,25 @@ func parseZone(r io.Reader, origin, f string, defttl *ttlState, t chan *Token, i return } neworigin := origin // There may be optionally a new origin set after the filename, if not use current one - switch l := <-c; l.value { + l := <-c + switch l.value { case zBlank: l := <-c if l.value == zString { - name, ok := toAbsoluteName(l.token, origin) - if !ok { + if _, ok := IsDomainName(l.token); !ok || l.length == 0 || l.err { t <- &Token{Error: &ParseError{f, "bad origin name", l}} return } - neworigin = name + // a new origin is specified. + if l.token[l.length-1] != '.' { + if origin != "." { // Prevent .. endings + neworigin = l.token + "." + origin + } else { + neworigin = l.token + origin + } + } else { + neworigin = l.token + } } case zNewline, zEOF: // Ok @@ -299,32 +305,24 @@ func parseZone(r io.Reader, origin, f string, defttl *ttlState, t chan *Token, i return } // Start with the new file - includePath := l.token - if !filepath.IsAbs(includePath) { - includePath = filepath.Join(filepath.Dir(f), includePath) - } - r1, e1 := os.Open(includePath) + r1, e1 := os.Open(l.token) if e1 != nil { - msg := fmt.Sprintf("failed to open `%s'", l.token) - if !filepath.IsAbs(l.token) { - msg += fmt.Sprintf(" as `%s'", includePath) - } - t <- &Token{Error: &ParseError{f, msg, l}} + t <- &Token{Error: &ParseError{f, "failed to open `" + l.token + "'", l}} return } if include+1 > 7 { t <- &Token{Error: &ParseError{f, "too deeply nested $INCLUDE", l}} return } - parseZone(r1, neworigin, includePath, defttl, t, include+1) + parseZone(r1, l.token, neworigin, t, include+1) st = zExpectOwnerDir - case zExpectDirTTLBl: + case zExpectDirTtlBl: if l.value != zBlank { t <- &Token{Error: &ParseError{f, "no blank after $TTL-directive", l}} return } - st = zExpectDirTTL - case zExpectDirTTL: + st = zExpectDirTtl + case zExpectDirTtl: if l.value != zString { t <- &Token{Error: &ParseError{f, "expecting $TTL value, not this...", l}} return @@ -333,12 +331,12 @@ func parseZone(r io.Reader, origin, f string, defttl *ttlState, t chan *Token, i t <- &Token{Error: e} return } - ttl, ok := stringToTTL(l.token) + ttl, ok := stringToTtl(l.token) if !ok { t <- &Token{Error: &ParseError{f, "expecting $TTL value, not this...", l}} return } - defttl = &ttlState{ttl, true} + defttl = ttl st = zExpectOwnerDir case zExpectDirOriginBl: if l.value != zBlank { @@ -354,12 +352,19 @@ func parseZone(r io.Reader, origin, f string, defttl *ttlState, t chan *Token, i if e, _ := slurpRemainder(c, f); e != nil { t <- &Token{Error: e} } - name, ok := toAbsoluteName(l.token, origin) - if !ok { + if _, ok := IsDomainName(l.token); !ok { t <- &Token{Error: &ParseError{f, "bad origin name", l}} return } - origin = name + if l.token[l.length-1] != '.' { + if origin != "." { // Prevent .. endings + origin = l.token + "." + origin + } else { + origin = l.token + origin + } + } else { + origin = l.token + } st = zExpectOwnerDir case zExpectDirGenerateBl: if l.value != zBlank { @@ -386,26 +391,20 @@ func parseZone(r io.Reader, origin, f string, defttl *ttlState, t chan *Token, i case zExpectAny: switch l.value { case zRrtpe: - if defttl == nil { - t <- &Token{Error: &ParseError{f, "missing TTL with no previous value", l}} - return - } h.Rrtype = l.torc st = zExpectRdata case zClass: h.Class = l.torc st = zExpectAnyNoClassBl case zString: - ttl, ok := stringToTTL(l.token) + ttl, ok := stringToTtl(l.token) if !ok { t <- &Token{Error: &ParseError{f, "not a TTL", l}} return } h.Ttl = ttl - if defttl == nil || !defttl.isByDirective { - defttl = &ttlState{ttl, false} - } - st = zExpectAnyNoTTLBl + // defttl = ttl // don't set the defttl here + st = zExpectAnyNoTtlBl default: t <- &Token{Error: &ParseError{f, "expecting RR type, TTL or class, not this...", l}} return @@ -416,13 +415,13 @@ func parseZone(r io.Reader, origin, f string, defttl *ttlState, t chan *Token, i return } st = zExpectAnyNoClass - case zExpectAnyNoTTLBl: + case zExpectAnyNoTtlBl: if l.value != zBlank { t <- &Token{Error: &ParseError{f, "no blank before TTL", l}} return } - st = zExpectAnyNoTTL - case zExpectAnyNoTTL: + st = zExpectAnyNoTtl + case zExpectAnyNoTtl: switch l.value { case zClass: h.Class = l.torc @@ -437,15 +436,13 @@ func parseZone(r io.Reader, origin, f string, defttl *ttlState, t chan *Token, i case zExpectAnyNoClass: switch l.value { case zString: - ttl, ok := stringToTTL(l.token) + ttl, ok := stringToTtl(l.token) if !ok { t <- &Token{Error: &ParseError{f, "not a TTL", l}} return } h.Ttl = ttl - if defttl == nil || !defttl.isByDirective { - defttl = &ttlState{ttl, false} - } + // defttl = ttl // don't set the def ttl anymore st = zExpectRrtypeBl case zRrtpe: h.Rrtype = l.torc @@ -508,12 +505,14 @@ func zlexer(s *scan, c chan lex) { if stri >= maxTok { l.token = "token length insufficient for parsing" l.err = true + debug.Printf("[%+v]", l.token) c <- l return } if comi >= maxTok { l.token = "comment length insufficient for parsing" l.err = true + debug.Printf("[%+v]", l.token) c <- l return } @@ -548,7 +547,7 @@ func zlexer(s *scan, c chan lex) { // escape $... start with a \ not a $, so this will work switch l.tokenUpper { case "$TTL": - l.value = zDirTTL + l.value = zDirTtl case "$ORIGIN": l.value = zDirOrigin case "$INCLUDE": @@ -556,6 +555,7 @@ func zlexer(s *scan, c chan lex) { case "$GENERATE": l.value = zDirGenerate } + debug.Printf("[7 %+v]", l.token) c <- l } else { l.value = zString @@ -577,7 +577,6 @@ func zlexer(s *scan, c chan lex) { return } l.value = zRrtpe - rrtype = true l.torc = t } } @@ -598,14 +597,16 @@ func zlexer(s *scan, c chan lex) { } } } + debug.Printf("[6 %+v]", l.token) c <- l } stri = 0 - + // I reverse space stuff here if !space && !commt { l.value = zBlank l.token = " " l.length = 1 + debug.Printf("[5 %+v]", l.token) c <- l } owner = false @@ -626,8 +627,8 @@ func zlexer(s *scan, c chan lex) { if stri > 0 { l.value = zString l.token = string(str[:stri]) - l.tokenUpper = strings.ToUpper(l.token) l.length = stri + debug.Printf("[4 %+v]", l.token) c <- l stri = 0 } @@ -662,9 +663,9 @@ func zlexer(s *scan, c chan lex) { owner = true l.value = zNewline l.token = "\n" - l.tokenUpper = l.token l.length = 1 l.comment = string(com[:comi]) + debug.Printf("[3 %+v %+v]", l.token, l.comment) c <- l l.comment = "" comi = 0 @@ -690,12 +691,13 @@ func zlexer(s *scan, c chan lex) { rrtype = true } } + debug.Printf("[2 %+v]", l.token) c <- l } l.value = zNewline l.token = "\n" - l.tokenUpper = l.token l.length = 1 + debug.Printf("[1 %+v]", l.token) c <- l stri = 0 commt = false @@ -738,9 +740,9 @@ func zlexer(s *scan, c chan lex) { if stri != 0 { l.value = zString l.token = string(str[:stri]) - l.tokenUpper = strings.ToUpper(l.token) l.length = stri + debug.Printf("[%+v]", l.token) c <- l stri = 0 } @@ -748,7 +750,6 @@ func zlexer(s *scan, c chan lex) { // send quote itself as separate token l.value = zQuote l.token = "\"" - l.tokenUpper = l.token l.length = 1 c <- l quote = !quote @@ -774,8 +775,8 @@ func zlexer(s *scan, c chan lex) { brace-- if brace < 0 { l.token = "extra closing brace" - l.tokenUpper = l.token l.err = true + debug.Printf("[%+v]", l.token) c <- l return } @@ -798,15 +799,9 @@ func zlexer(s *scan, c chan lex) { if stri > 0 { // Send remainder l.token = string(str[:stri]) - l.tokenUpper = strings.ToUpper(l.token) l.length = stri l.value = zString - c <- l - } - if brace != 0 { - l.token = "unbalanced brace" - l.tokenUpper = l.token - l.err = true + debug.Printf("[%+v]", l.token) c <- l } } @@ -817,8 +812,8 @@ func classToInt(token string) (uint16, bool) { if len(token) < offset+1 { return 0, false } - class, err := strconv.ParseUint(token[offset:], 10, 16) - if err != nil { + class, ok := strconv.Atoi(token[offset:]) + if ok != nil || class > maxUint16 { return 0, false } return uint16(class), true @@ -830,15 +825,15 @@ func typeToInt(token string) (uint16, bool) { if len(token) < offset+1 { return 0, false } - typ, err := strconv.ParseUint(token[offset:], 10, 16) - if err != nil { + typ, ok := strconv.Atoi(token[offset:]) + if ok != nil || typ > maxUint16 { return 0, false } return uint16(typ), true } -// stringToTTL parses things like 2w, 2m, etc, and returns the time in seconds. -func stringToTTL(token string) (uint32, bool) { +// Parse things like 2w, 2m, etc, Return the time in seconds. +func stringToTtl(token string) (uint32, bool) { s := uint32(0) i := uint32(0) for _, c := range token { @@ -911,34 +906,6 @@ func stringToCm(token string) (e, m uint8, ok bool) { return } -func toAbsoluteName(name, origin string) (absolute string, ok bool) { - // check for an explicit origin reference - if name == "@" { - // require a nonempty origin - if origin == "" { - return "", false - } - return origin, true - } - - // require a valid domain name - _, ok = IsDomainName(name) - if !ok || name == "" { - return "", false - } - - // check if name is already absolute - if name[len(name)-1] == '.' { - return name, true - } - - // require a nonempty origin - if origin == "" { - return "", false - } - return appendOrigin(name, origin), true -} - func appendOrigin(name, origin string) string { if origin == "." { return name + origin diff --git a/vendor/github.com/miekg/dns/scan_rr.go b/vendor/github.com/miekg/dns/scan_rr.go index fb6f95d1da99..e521dc063e95 100644 --- a/vendor/github.com/miekg/dns/scan_rr.go +++ b/vendor/github.com/miekg/dns/scan_rr.go @@ -64,63 +64,74 @@ func endingToString(c chan lex, errstr, f string) (string, *ParseError, string) return s, nil, l.comment } -// A remainder of the rdata with embedded spaces, split on unquoted whitespace -// and return the parsed string slice or an error +// A remainder of the rdata with embedded spaces, return the parsed string slice (sans the spaces) +// or an error func endingToTxtSlice(c chan lex, errstr, f string) ([]string, *ParseError, string) { // Get the remaining data until we see a zNewline + quote := false l := <-c + var s []string if l.err { - return nil, &ParseError{f, errstr, l}, "" - } - - // Build the slice - s := make([]string, 0) - quote := false - empty := false - for l.value != zNewline && l.value != zEOF { - if l.err { - return nil, &ParseError{f, errstr, l}, "" - } - switch l.value { - case zString: - empty = false - if len(l.token) > 255 { - // split up tokens that are larger than 255 into 255-chunks - sx := []string{} - p, i := 0, 255 - for { - if i <= len(l.token) { - sx = append(sx, l.token[p:i]) - } else { - sx = append(sx, l.token[p:]) - break - + return s, &ParseError{f, errstr, l}, "" + } + switch l.value == zQuote { + case true: // A number of quoted string + s = make([]string, 0) + empty := true + for l.value != zNewline && l.value != zEOF { + if l.err { + return nil, &ParseError{f, errstr, l}, "" + } + switch l.value { + case zString: + empty = false + if len(l.token) > 255 { + // split up tokens that are larger than 255 into 255-chunks + sx := []string{} + p, i := 0, 255 + for { + if i <= len(l.token) { + sx = append(sx, l.token[p:i]) + } else { + sx = append(sx, l.token[p:]) + break + + } + p, i = p+255, i+255 } - p, i = p+255, i+255 + s = append(s, sx...) + break } - s = append(s, sx...) - break - } - s = append(s, l.token) - case zBlank: - if quote { - // zBlank can only be seen in between txt parts. + s = append(s, l.token) + case zBlank: + if quote { + // zBlank can only be seen in between txt parts. + return nil, &ParseError{f, errstr, l}, "" + } + case zQuote: + if empty && quote { + s = append(s, "") + } + quote = !quote + empty = true + default: return nil, &ParseError{f, errstr, l}, "" } - case zQuote: - if empty && quote { - s = append(s, "") - } - quote = !quote - empty = true - default: + l = <-c + } + if quote { return nil, &ParseError{f, errstr, l}, "" } - l = <-c - } - if quote { - return nil, &ParseError{f, errstr, l}, "" + case false: // Unquoted text record + s = make([]string, 1) + for l.value != zNewline && l.value != zEOF { + if l.err { + return s, &ParseError{f, errstr, l}, "" + } + s[0] += l.token + l = <-c + } } return s, nil, l.comment } @@ -130,10 +141,9 @@ func setA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { rr.Hdr = h l := <-c - if l.length == 0 { // dynamic update rr. + if l.length == 0 { // Dynamic updates. return rr, nil, "" } - rr.A = net.ParseIP(l.token) if rr.A == nil || l.err { return nil, &ParseError{f, "bad A A", l}, "" @@ -146,10 +156,9 @@ func setAAAA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { rr.Hdr = h l := <-c - if l.length == 0 { // dynamic update rr. + if l.length == 0 { return rr, nil, "" } - rr.AAAA = net.ParseIP(l.token) if rr.AAAA == nil || l.err { return nil, &ParseError{f, "bad AAAA AAAA", l}, "" @@ -163,15 +172,20 @@ func setNS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { l := <-c rr.Ns = l.token - if l.length == 0 { // dynamic update rr. + if l.length == 0 { return rr, nil, "" } - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { + if l.token == "@" { + rr.Ns = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { return nil, &ParseError{f, "bad NS Ns", l}, "" } - rr.Ns = name + if rr.Ns[l.length-1] != '.' { + rr.Ns = appendOrigin(rr.Ns, o) + } return rr, nil, "" } @@ -184,12 +198,17 @@ func setPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { if l.length == 0 { // dynamic update rr. return rr, nil, "" } - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { + if l.token == "@" { + rr.Ptr = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { return nil, &ParseError{f, "bad PTR Ptr", l}, "" } - rr.Ptr = name + if rr.Ptr[l.length-1] != '.' { + rr.Ptr = appendOrigin(rr.Ptr, o) + } return rr, nil, "" } @@ -199,15 +218,20 @@ func setNSAPPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) l := <-c rr.Ptr = l.token - if l.length == 0 { // dynamic update rr. + if l.length == 0 { return rr, nil, "" } - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { + if l.token == "@" { + rr.Ptr = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { return nil, &ParseError{f, "bad NSAP-PTR Ptr", l}, "" } - rr.Ptr = name + if rr.Ptr[l.length-1] != '.' { + rr.Ptr = appendOrigin(rr.Ptr, o) + } return rr, nil, "" } @@ -217,26 +241,34 @@ func setRP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { l := <-c rr.Mbox = l.token - if l.length == 0 { // dynamic update rr. + if l.length == 0 { return rr, nil, "" } - - mbox, mboxOk := toAbsoluteName(l.token, o) - if l.err || !mboxOk { - return nil, &ParseError{f, "bad RP Mbox", l}, "" + if l.token == "@" { + rr.Mbox = o + } else { + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad RP Mbox", l}, "" + } + if rr.Mbox[l.length-1] != '.' { + rr.Mbox = appendOrigin(rr.Mbox, o) + } } - rr.Mbox = mbox - <-c // zBlank l = <-c rr.Txt = l.token - - txt, txtOk := toAbsoluteName(l.token, o) - if l.err || !txtOk { + if l.token == "@" { + rr.Txt = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { return nil, &ParseError{f, "bad RP Txt", l}, "" } - rr.Txt = txt - + if rr.Txt[l.length-1] != '.' { + rr.Txt = appendOrigin(rr.Txt, o) + } return rr, nil, "" } @@ -246,15 +278,20 @@ func setMR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { l := <-c rr.Mr = l.token - if l.length == 0 { // dynamic update rr. + if l.length == 0 { return rr, nil, "" } - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { + if l.token == "@" { + rr.Mr = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { return nil, &ParseError{f, "bad MR Mr", l}, "" } - rr.Mr = name + if rr.Mr[l.length-1] != '.' { + rr.Mr = appendOrigin(rr.Mr, o) + } return rr, nil, "" } @@ -264,15 +301,20 @@ func setMB(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { l := <-c rr.Mb = l.token - if l.length == 0 { // dynamic update rr. + if l.length == 0 { return rr, nil, "" } - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { + if l.token == "@" { + rr.Mb = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { return nil, &ParseError{f, "bad MB Mb", l}, "" } - rr.Mb = name + if rr.Mb[l.length-1] != '.' { + rr.Mb = appendOrigin(rr.Mb, o) + } return rr, nil, "" } @@ -282,15 +324,20 @@ func setMG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { l := <-c rr.Mg = l.token - if l.length == 0 { // dynamic update rr. + if l.length == 0 { return rr, nil, "" } - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { + if l.token == "@" { + rr.Mg = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { return nil, &ParseError{f, "bad MG Mg", l}, "" } - rr.Mg = name + if rr.Mg[l.length-1] != '.' { + rr.Mg = appendOrigin(rr.Mg, o) + } return rr, nil, "" } @@ -326,26 +373,34 @@ func setMINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { l := <-c rr.Rmail = l.token - if l.length == 0 { // dynamic update rr. + if l.length == 0 { return rr, nil, "" } - - rmail, rmailOk := toAbsoluteName(l.token, o) - if l.err || !rmailOk { - return nil, &ParseError{f, "bad MINFO Rmail", l}, "" + if l.token == "@" { + rr.Rmail = o + } else { + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad MINFO Rmail", l}, "" + } + if rr.Rmail[l.length-1] != '.' { + rr.Rmail = appendOrigin(rr.Rmail, o) + } } - rr.Rmail = rmail - <-c // zBlank l = <-c rr.Email = l.token - - email, emailOk := toAbsoluteName(l.token, o) - if l.err || !emailOk { + if l.token == "@" { + rr.Email = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { return nil, &ParseError{f, "bad MINFO Email", l}, "" } - rr.Email = email - + if rr.Email[l.length-1] != '.' { + rr.Email = appendOrigin(rr.Email, o) + } return rr, nil, "" } @@ -355,15 +410,20 @@ func setMF(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { l := <-c rr.Mf = l.token - if l.length == 0 { // dynamic update rr. + if l.length == 0 { return rr, nil, "" } - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { + if l.token == "@" { + rr.Mf = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { return nil, &ParseError{f, "bad MF Mf", l}, "" } - rr.Mf = name + if rr.Mf[l.length-1] != '.' { + rr.Mf = appendOrigin(rr.Mf, o) + } return rr, nil, "" } @@ -373,15 +433,20 @@ func setMD(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { l := <-c rr.Md = l.token - if l.length == 0 { // dynamic update rr. + if l.length == 0 { return rr, nil, "" } - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { + if l.token == "@" { + rr.Md = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { return nil, &ParseError{f, "bad MD Md", l}, "" } - rr.Md = name + if rr.Md[l.length-1] != '.' { + rr.Md = appendOrigin(rr.Md, o) + } return rr, nil, "" } @@ -390,54 +455,57 @@ func setMX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { rr.Hdr = h l := <-c - if l.length == 0 { // dynamic update rr. + if l.length == 0 { return rr, nil, "" } - - i, e := strconv.ParseUint(l.token, 10, 16) + i, e := strconv.Atoi(l.token) if e != nil || l.err { return nil, &ParseError{f, "bad MX Pref", l}, "" } rr.Preference = uint16(i) - <-c // zBlank l = <-c // zString rr.Mx = l.token - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { + if l.token == "@" { + rr.Mx = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { return nil, &ParseError{f, "bad MX Mx", l}, "" } - rr.Mx = name - + if rr.Mx[l.length-1] != '.' { + rr.Mx = appendOrigin(rr.Mx, o) + } return rr, nil, "" } func setRT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { rr := new(RT) rr.Hdr = h - l := <-c - if l.length == 0 { // dynamic update rr. + if l.length == 0 { return rr, nil, "" } - - i, e := strconv.ParseUint(l.token, 10, 16) + i, e := strconv.Atoi(l.token) if e != nil { return nil, &ParseError{f, "bad RT Preference", l}, "" } rr.Preference = uint16(i) - <-c // zBlank l = <-c // zString rr.Host = l.token - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { + if l.token == "@" { + rr.Host = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { return nil, &ParseError{f, "bad RT Host", l}, "" } - rr.Host = name - + if rr.Host[l.length-1] != '.' { + rr.Host = appendOrigin(rr.Host, o) + } return rr, nil, "" } @@ -446,25 +514,28 @@ func setAFSDB(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { rr.Hdr = h l := <-c - if l.length == 0 { // dynamic update rr. + if l.length == 0 { return rr, nil, "" } - - i, e := strconv.ParseUint(l.token, 10, 16) + i, e := strconv.Atoi(l.token) if e != nil || l.err { return nil, &ParseError{f, "bad AFSDB Subtype", l}, "" } rr.Subtype = uint16(i) - <-c // zBlank l = <-c // zString rr.Hostname = l.token - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { + if l.token == "@" { + rr.Hostname = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { return nil, &ParseError{f, "bad AFSDB Hostname", l}, "" } - rr.Hostname = name + if rr.Hostname[l.length-1] != '.' { + rr.Hostname = appendOrigin(rr.Hostname, o) + } return rr, nil, "" } @@ -473,10 +544,9 @@ func setX25(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { rr.Hdr = h l := <-c - if l.length == 0 { // dynamic update rr. + if l.length == 0 { return rr, nil, "" } - if l.err { return nil, &ParseError{f, "bad X25 PSDNAddress", l}, "" } @@ -489,25 +559,28 @@ func setKX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { rr.Hdr = h l := <-c - if l.length == 0 { // dynamic update rr. + if l.length == 0 { return rr, nil, "" } - - i, e := strconv.ParseUint(l.token, 10, 16) + i, e := strconv.Atoi(l.token) if e != nil || l.err { return nil, &ParseError{f, "bad KX Pref", l}, "" } rr.Preference = uint16(i) - <-c // zBlank l = <-c // zString rr.Exchanger = l.token - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { + if l.token == "@" { + rr.Exchanger = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { return nil, &ParseError{f, "bad KX Exchanger", l}, "" } - rr.Exchanger = name + if rr.Exchanger[l.length-1] != '.' { + rr.Exchanger = appendOrigin(rr.Exchanger, o) + } return rr, nil, "" } @@ -517,15 +590,20 @@ func setCNAME(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { l := <-c rr.Target = l.token - if l.length == 0 { // dynamic update rr. + if l.length == 0 { return rr, nil, "" } - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { + if l.token == "@" { + rr.Target = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { return nil, &ParseError{f, "bad CNAME Target", l}, "" } - rr.Target = name + if rr.Target[l.length-1] != '.' { + rr.Target = appendOrigin(rr.Target, o) + } return rr, nil, "" } @@ -535,15 +613,20 @@ func setDNAME(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { l := <-c rr.Target = l.token - if l.length == 0 { // dynamic update rr. + if l.length == 0 { return rr, nil, "" } - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { - return nil, &ParseError{f, "bad DNAME Target", l}, "" + if l.token == "@" { + rr.Target = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad CNAME Target", l}, "" + } + if rr.Target[l.length-1] != '.' { + rr.Target = appendOrigin(rr.Target, o) } - rr.Target = name return rr, nil, "" } @@ -553,26 +636,35 @@ func setSOA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { l := <-c rr.Ns = l.token - if l.length == 0 { // dynamic update rr. + if l.length == 0 { return rr, nil, "" } - - ns, nsOk := toAbsoluteName(l.token, o) - if l.err || !nsOk { - return nil, &ParseError{f, "bad SOA Ns", l}, "" + <-c // zBlank + if l.token == "@" { + rr.Ns = o + } else { + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad SOA Ns", l}, "" + } + if rr.Ns[l.length-1] != '.' { + rr.Ns = appendOrigin(rr.Ns, o) + } } - rr.Ns = ns - <-c // zBlank l = <-c rr.Mbox = l.token - - mbox, mboxOk := toAbsoluteName(l.token, o) - if l.err || !mboxOk { - return nil, &ParseError{f, "bad SOA Mbox", l}, "" + if l.token == "@" { + rr.Mbox = o + } else { + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad SOA Mbox", l}, "" + } + if rr.Mbox[l.length-1] != '.' { + rr.Mbox = appendOrigin(rr.Mbox, o) + } } - rr.Mbox = mbox - <-c // zBlank var ( @@ -584,13 +676,12 @@ func setSOA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { if l.err { return nil, &ParseError{f, "bad SOA zone parameter", l}, "" } - if j, e := strconv.ParseUint(l.token, 10, 32); e != nil { + if j, e := strconv.Atoi(l.token); e != nil { if i == 0 { - // Serial must be a number + // Serial should be a number return nil, &ParseError{f, "bad SOA zone parameter", l}, "" } - // We allow other fields to be unitful duration strings - if v, ok = stringToTTL(l.token); !ok { + if v, ok = stringToTtl(l.token); !ok { return nil, &ParseError{f, "bad SOA zone parameter", l}, "" } @@ -622,41 +713,42 @@ func setSRV(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { rr.Hdr = h l := <-c - if l.length == 0 { // dynamic update rr. + if l.length == 0 { return rr, nil, "" } - - i, e := strconv.ParseUint(l.token, 10, 16) + i, e := strconv.Atoi(l.token) if e != nil || l.err { return nil, &ParseError{f, "bad SRV Priority", l}, "" } rr.Priority = uint16(i) - <-c // zBlank l = <-c // zString - i, e = strconv.ParseUint(l.token, 10, 16) + i, e = strconv.Atoi(l.token) if e != nil || l.err { return nil, &ParseError{f, "bad SRV Weight", l}, "" } rr.Weight = uint16(i) - <-c // zBlank l = <-c // zString - i, e = strconv.ParseUint(l.token, 10, 16) + i, e = strconv.Atoi(l.token) if e != nil || l.err { return nil, &ParseError{f, "bad SRV Port", l}, "" } rr.Port = uint16(i) - <-c // zBlank l = <-c // zString rr.Target = l.token - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { + if l.token == "@" { + rr.Target = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { return nil, &ParseError{f, "bad SRV Target", l}, "" } - rr.Target = name + if rr.Target[l.length-1] != '.' { + rr.Target = appendOrigin(rr.Target, o) + } return rr, nil, "" } @@ -665,24 +757,21 @@ func setNAPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { rr.Hdr = h l := <-c - if l.length == 0 { // dynamic update rr. + if l.length == 0 { return rr, nil, "" } - - i, e := strconv.ParseUint(l.token, 10, 16) + i, e := strconv.Atoi(l.token) if e != nil || l.err { return nil, &ParseError{f, "bad NAPTR Order", l}, "" } rr.Order = uint16(i) - <-c // zBlank l = <-c // zString - i, e = strconv.ParseUint(l.token, 10, 16) + i, e = strconv.Atoi(l.token) if e != nil || l.err { return nil, &ParseError{f, "bad NAPTR Preference", l}, "" } rr.Preference = uint16(i) - // Flags <-c // zBlank l = <-c // _QUOTE @@ -739,17 +828,21 @@ func setNAPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { } else { return nil, &ParseError{f, "bad NAPTR Regexp", l}, "" } - // After quote no space?? <-c // zBlank l = <-c // zString rr.Replacement = l.token - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { + if l.token == "@" { + rr.Replacement = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { return nil, &ParseError{f, "bad NAPTR Replacement", l}, "" } - rr.Replacement = name + if rr.Replacement[l.length-1] != '.' { + rr.Replacement = appendOrigin(rr.Replacement, o) + } return rr, nil, "" } @@ -759,26 +852,34 @@ func setTALINK(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { l := <-c rr.PreviousName = l.token - if l.length == 0 { // dynamic update rr. + if l.length == 0 { return rr, nil, "" } - - previousName, previousNameOk := toAbsoluteName(l.token, o) - if l.err || !previousNameOk { - return nil, &ParseError{f, "bad TALINK PreviousName", l}, "" + if l.token == "@" { + rr.PreviousName = o + } else { + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad TALINK PreviousName", l}, "" + } + if rr.PreviousName[l.length-1] != '.' { + rr.PreviousName = appendOrigin(rr.PreviousName, o) + } } - rr.PreviousName = previousName - <-c // zBlank l = <-c rr.NextName = l.token - - nextName, nextNameOk := toAbsoluteName(l.token, o) - if l.err || !nextNameOk { + if l.token == "@" { + rr.NextName = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { return nil, &ParseError{f, "bad TALINK NextName", l}, "" } - rr.NextName = nextName - + if rr.NextName[l.length-1] != '.' { + rr.NextName = appendOrigin(rr.NextName, o) + } return rr, nil, "" } @@ -790,13 +891,12 @@ func setLOC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { rr.VertPre = 162 // 10 rr.Size = 18 // 1 ok := false - // North l := <-c - if l.length == 0 { // dynamic update rr. + if l.length == 0 { return rr, nil, "" } - i, e := strconv.ParseUint(l.token, 10, 32) + i, e := strconv.Atoi(l.token) if e != nil || l.err { return nil, &ParseError{f, "bad LOC Latitude", l}, "" } @@ -808,7 +908,7 @@ func setLOC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok { goto East } - i, e = strconv.ParseUint(l.token, 10, 32) + i, e = strconv.Atoi(l.token) if e != nil || l.err { return nil, &ParseError{f, "bad LOC Latitude minutes", l}, "" } @@ -834,7 +934,7 @@ East: // East <-c // zBlank l = <-c - if i, e := strconv.ParseUint(l.token, 10, 32); e != nil || l.err { + if i, e := strconv.Atoi(l.token); e != nil || l.err { return nil, &ParseError{f, "bad LOC Longitude", l}, "" } else { rr.Longitude = 1000 * 60 * 60 * uint32(i) @@ -845,7 +945,7 @@ East: if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok { goto Altitude } - if i, e := strconv.ParseUint(l.token, 10, 32); e != nil || l.err { + if i, e := strconv.Atoi(l.token); e != nil || l.err { return nil, &ParseError{f, "bad LOC Longitude minutes", l}, "" } else { rr.Longitude += 1000 * 60 * uint32(i) @@ -924,16 +1024,14 @@ func setHIP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { // HitLength is not represented l := <-c - if l.length == 0 { // dynamic update rr. + if l.length == 0 { return rr, nil, l.comment } - - i, e := strconv.ParseUint(l.token, 10, 8) + i, e := strconv.Atoi(l.token) if e != nil || l.err { return nil, &ParseError{f, "bad HIP PublicKeyAlgorithm", l}, "" } rr.PublicKeyAlgorithm = uint8(i) - <-c // zBlank l = <-c // zString if l.length == 0 || l.err { @@ -956,11 +1054,19 @@ func setHIP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { for l.value != zNewline && l.value != zEOF { switch l.value { case zString: - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { + if l.token == "@" { + xs = append(xs, o) + l = <-c + continue + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { return nil, &ParseError{f, "bad HIP RendezvousServers", l}, "" } - xs = append(xs, name) + if l.token[l.length-1] != '.' { + l.token = appendOrigin(l.token, o) + } + xs = append(xs, l.token) case zBlank: // Ok default: @@ -977,20 +1083,19 @@ func setCERT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { rr.Hdr = h l := <-c - if l.length == 0 { // dynamic update rr. + if l.length == 0 { return rr, nil, l.comment } - if v, ok := StringToCertType[l.token]; ok { rr.Type = v - } else if i, e := strconv.ParseUint(l.token, 10, 16); e != nil { + } else if i, e := strconv.Atoi(l.token); e != nil { return nil, &ParseError{f, "bad CERT Type", l}, "" } else { rr.Type = uint16(i) } <-c // zBlank l = <-c // zString - i, e := strconv.ParseUint(l.token, 10, 16) + i, e := strconv.Atoi(l.token) if e != nil || l.err { return nil, &ParseError{f, "bad CERT KeyTag", l}, "" } @@ -999,7 +1104,7 @@ func setCERT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { l = <-c // zString if v, ok := StringToAlgorithm[l.token]; ok { rr.Algorithm = v - } else if i, e := strconv.ParseUint(l.token, 10, 8); e != nil { + } else if i, e := strconv.Atoi(l.token); e != nil { return nil, &ParseError{f, "bad CERT Algorithm", l}, "" } else { rr.Algorithm = uint8(i) @@ -1024,56 +1129,6 @@ func setOPENPGPKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, strin return rr, nil, c1 } -func setCSYNC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(CSYNC) - rr.Hdr = h - - l := <-c - if l.length == 0 { // dynamic update rr. - return rr, nil, l.comment - } - j, e := strconv.ParseUint(l.token, 10, 32) - if e != nil { - // Serial must be a number - return nil, &ParseError{f, "bad CSYNC serial", l}, "" - } - rr.Serial = uint32(j) - - <-c // zBlank - - l = <-c - j, e = strconv.ParseUint(l.token, 10, 16) - if e != nil { - // Serial must be a number - return nil, &ParseError{f, "bad CSYNC flags", l}, "" - } - rr.Flags = uint16(j) - - rr.TypeBitMap = make([]uint16, 0) - var ( - k uint16 - ok bool - ) - l = <-c - for l.value != zNewline && l.value != zEOF { - switch l.value { - case zBlank: - // Ok - case zString: - if k, ok = StringToType[l.tokenUpper]; !ok { - if k, ok = typeToInt(l.tokenUpper); !ok { - return nil, &ParseError{f, "bad CSYNC TypeBitMap", l}, "" - } - } - rr.TypeBitMap = append(rr.TypeBitMap, k) - default: - return nil, &ParseError{f, "bad CSYNC TypeBitMap", l}, "" - } - l = <-c - } - return rr, nil, l.comment -} - func setSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { r, e, s := setRRSIG(h, c, o, f) if r != nil { @@ -1085,12 +1140,10 @@ func setSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { func setRRSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { rr := new(RRSIG) rr.Hdr = h - l := <-c - if l.length == 0 { // dynamic update rr. + if l.length == 0 { return rr, nil, l.comment } - if t, ok := StringToType[l.tokenUpper]; !ok { if strings.HasPrefix(l.tokenUpper, "TYPE") { t, ok = typeToInt(l.tokenUpper) @@ -1104,31 +1157,27 @@ func setRRSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { } else { rr.TypeCovered = t } - <-c // zBlank l = <-c - i, err := strconv.ParseUint(l.token, 10, 8) + i, err := strconv.Atoi(l.token) if err != nil || l.err { return nil, &ParseError{f, "bad RRSIG Algorithm", l}, "" } rr.Algorithm = uint8(i) - <-c // zBlank l = <-c - i, err = strconv.ParseUint(l.token, 10, 8) + i, err = strconv.Atoi(l.token) if err != nil || l.err { return nil, &ParseError{f, "bad RRSIG Labels", l}, "" } rr.Labels = uint8(i) - <-c // zBlank l = <-c - i, err = strconv.ParseUint(l.token, 10, 32) + i, err = strconv.Atoi(l.token) if err != nil || l.err { return nil, &ParseError{f, "bad RRSIG OrigTtl", l}, "" } rr.OrigTtl = uint32(i) - <-c // zBlank l = <-c if i, err := StringToTime(l.token); err != nil { @@ -1142,7 +1191,6 @@ func setRRSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { } else { rr.Expiration = i } - <-c // zBlank l = <-c if i, err := StringToTime(l.token); err != nil { @@ -1154,30 +1202,32 @@ func setRRSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { } else { rr.Inception = i } - <-c // zBlank l = <-c - i, err = strconv.ParseUint(l.token, 10, 16) + i, err = strconv.Atoi(l.token) if err != nil || l.err { return nil, &ParseError{f, "bad RRSIG KeyTag", l}, "" } rr.KeyTag = uint16(i) - <-c // zBlank l = <-c rr.SignerName = l.token - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { - return nil, &ParseError{f, "bad RRSIG SignerName", l}, "" + if l.token == "@" { + rr.SignerName = o + } else { + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad RRSIG SignerName", l}, "" + } + if rr.SignerName[l.length-1] != '.' { + rr.SignerName = appendOrigin(rr.SignerName, o) + } } - rr.SignerName = name - s, e, c1 := endingToString(c, "bad RRSIG Signature", f) if e != nil { return nil, e, c1 } rr.Signature = s - return rr, nil, c1 } @@ -1187,15 +1237,20 @@ func setNSEC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { l := <-c rr.NextDomain = l.token - if l.length == 0 { // dynamic update rr. + if l.length == 0 { return rr, nil, l.comment } - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { - return nil, &ParseError{f, "bad NSEC NextDomain", l}, "" + if l.token == "@" { + rr.NextDomain = o + } else { + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad NSEC NextDomain", l}, "" + } + if rr.NextDomain[l.length-1] != '.' { + rr.NextDomain = appendOrigin(rr.NextDomain, o) + } } - rr.NextDomain = name rr.TypeBitMap = make([]uint16, 0) var ( @@ -1227,25 +1282,24 @@ func setNSEC3(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { rr.Hdr = h l := <-c - if l.length == 0 { // dynamic update rr. + if l.length == 0 { return rr, nil, l.comment } - - i, e := strconv.ParseUint(l.token, 10, 8) + i, e := strconv.Atoi(l.token) if e != nil || l.err { return nil, &ParseError{f, "bad NSEC3 Hash", l}, "" } rr.Hash = uint8(i) <-c // zBlank l = <-c - i, e = strconv.ParseUint(l.token, 10, 8) + i, e = strconv.Atoi(l.token) if e != nil || l.err { return nil, &ParseError{f, "bad NSEC3 Flags", l}, "" } rr.Flags = uint8(i) <-c // zBlank l = <-c - i, e = strconv.ParseUint(l.token, 10, 16) + i, e = strconv.Atoi(l.token) if e != nil || l.err { return nil, &ParseError{f, "bad NSEC3 Iterations", l}, "" } @@ -1255,10 +1309,8 @@ func setNSEC3(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { if len(l.token) == 0 || l.err { return nil, &ParseError{f, "bad NSEC3 Salt", l}, "" } - if l.token != "-" { - rr.SaltLength = uint8(len(l.token)) / 2 - rr.Salt = l.token - } + rr.SaltLength = uint8(len(l.token)) / 2 + rr.Salt = l.token <-c l = <-c @@ -1298,35 +1350,32 @@ func setNSEC3PARAM(h RR_Header, c chan lex, o, f string) (RR, *ParseError, strin rr.Hdr = h l := <-c - if l.length == 0 { // dynamic update rr. + if l.length == 0 { return rr, nil, "" } - - i, e := strconv.ParseUint(l.token, 10, 8) + i, e := strconv.Atoi(l.token) if e != nil || l.err { return nil, &ParseError{f, "bad NSEC3PARAM Hash", l}, "" } rr.Hash = uint8(i) <-c // zBlank l = <-c - i, e = strconv.ParseUint(l.token, 10, 8) + i, e = strconv.Atoi(l.token) if e != nil || l.err { return nil, &ParseError{f, "bad NSEC3PARAM Flags", l}, "" } rr.Flags = uint8(i) <-c // zBlank l = <-c - i, e = strconv.ParseUint(l.token, 10, 16) + i, e = strconv.Atoi(l.token) if e != nil || l.err { return nil, &ParseError{f, "bad NSEC3PARAM Iterations", l}, "" } rr.Iterations = uint16(i) <-c l = <-c - if l.token != "-" { - rr.SaltLength = uint8(len(l.token)) - rr.Salt = l.token - } + rr.SaltLength = uint8(len(l.token)) + rr.Salt = l.token return rr, nil, "" } @@ -1335,10 +1384,9 @@ func setEUI48(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { rr.Hdr = h l := <-c - if l.length == 0 { // dynamic update rr. + if l.length == 0 { return rr, nil, "" } - if l.length != 17 || l.err { return nil, &ParseError{f, "bad EUI48 Address", l}, "" } @@ -1368,10 +1416,9 @@ func setEUI64(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { rr.Hdr = h l := <-c - if l.length == 0 { // dynamic update rr. + if l.length == 0 { return rr, nil, "" } - if l.length != 23 || l.err { return nil, &ParseError{f, "bad EUI64 Address", l}, "" } @@ -1401,18 +1448,17 @@ func setSSHFP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { rr.Hdr = h l := <-c - if l.length == 0 { // dynamic update rr. + if l.length == 0 { return rr, nil, "" } - - i, e := strconv.ParseUint(l.token, 10, 8) + i, e := strconv.Atoi(l.token) if e != nil || l.err { return nil, &ParseError{f, "bad SSHFP Algorithm", l}, "" } rr.Algorithm = uint8(i) <-c // zBlank l = <-c - i, e = strconv.ParseUint(l.token, 10, 8) + i, e = strconv.Atoi(l.token) if e != nil || l.err { return nil, &ParseError{f, "bad SSHFP Type", l}, "" } @@ -1431,25 +1477,24 @@ func setDNSKEYs(h RR_Header, c chan lex, o, f, typ string) (RR, *ParseError, str rr.Hdr = h l := <-c - if l.length == 0 { // dynamic update rr. + if l.length == 0 { return rr, nil, l.comment } - - i, e := strconv.ParseUint(l.token, 10, 16) + i, e := strconv.Atoi(l.token) if e != nil || l.err { return nil, &ParseError{f, "bad " + typ + " Flags", l}, "" } rr.Flags = uint16(i) <-c // zBlank l = <-c // zString - i, e = strconv.ParseUint(l.token, 10, 8) + i, e = strconv.Atoi(l.token) if e != nil || l.err { return nil, &ParseError{f, "bad " + typ + " Protocol", l}, "" } rr.Protocol = uint8(i) <-c // zBlank l = <-c // zString - i, e = strconv.ParseUint(l.token, 10, 8) + i, e = strconv.Atoi(l.token) if e != nil || l.err { return nil, &ParseError{f, "bad " + typ + " Algorithm", l}, "" } @@ -1488,25 +1533,24 @@ func setRKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { rr.Hdr = h l := <-c - if l.length == 0 { // dynamic update rr. + if l.length == 0 { return rr, nil, l.comment } - - i, e := strconv.ParseUint(l.token, 10, 16) + i, e := strconv.Atoi(l.token) if e != nil || l.err { return nil, &ParseError{f, "bad RKEY Flags", l}, "" } rr.Flags = uint16(i) <-c // zBlank l = <-c // zString - i, e = strconv.ParseUint(l.token, 10, 8) + i, e = strconv.Atoi(l.token) if e != nil || l.err { return nil, &ParseError{f, "bad RKEY Protocol", l}, "" } rr.Protocol = uint8(i) <-c // zBlank l = <-c // zString - i, e = strconv.ParseUint(l.token, 10, 8) + i, e = strconv.Atoi(l.token) if e != nil || l.err { return nil, &ParseError{f, "bad RKEY Algorithm", l}, "" } @@ -1544,12 +1588,10 @@ func setNIMLOC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { func setGPOS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { rr := new(GPOS) rr.Hdr = h - l := <-c - if l.length == 0 { // dynamic update rr. + if l.length == 0 { return rr, nil, "" } - _, e := strconv.ParseFloat(l.token, 64) if e != nil || l.err { return nil, &ParseError{f, "bad GPOS Longitude", l}, "" @@ -1575,20 +1617,18 @@ func setGPOS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { func setDSs(h RR_Header, c chan lex, o, f, typ string) (RR, *ParseError, string) { rr := new(DS) rr.Hdr = h - l := <-c - if l.length == 0 { // dynamic update rr. + if l.length == 0 { return rr, nil, l.comment } - - i, e := strconv.ParseUint(l.token, 10, 16) + i, e := strconv.Atoi(l.token) if e != nil || l.err { return nil, &ParseError{f, "bad " + typ + " KeyTag", l}, "" } rr.KeyTag = uint16(i) <-c // zBlank l = <-c - if i, e = strconv.ParseUint(l.token, 10, 8); e != nil { + if i, e := strconv.Atoi(l.token); e != nil { i, ok := StringToAlgorithm[l.tokenUpper] if !ok || l.err { return nil, &ParseError{f, "bad " + typ + " Algorithm", l}, "" @@ -1599,7 +1639,7 @@ func setDSs(h RR_Header, c chan lex, o, f, typ string) (RR, *ParseError, string) } <-c // zBlank l = <-c - i, e = strconv.ParseUint(l.token, 10, 8) + i, e = strconv.Atoi(l.token) if e != nil || l.err { return nil, &ParseError{f, "bad " + typ + " DigestType", l}, "" } @@ -1636,20 +1676,18 @@ func setCDS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { func setTA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { rr := new(TA) rr.Hdr = h - l := <-c - if l.length == 0 { // dynamic update rr. + if l.length == 0 { return rr, nil, l.comment } - - i, e := strconv.ParseUint(l.token, 10, 16) + i, e := strconv.Atoi(l.token) if e != nil || l.err { return nil, &ParseError{f, "bad TA KeyTag", l}, "" } rr.KeyTag = uint16(i) <-c // zBlank l = <-c - if i, e := strconv.ParseUint(l.token, 10, 8); e != nil { + if i, e := strconv.Atoi(l.token); e != nil { i, ok := StringToAlgorithm[l.tokenUpper] if !ok || l.err { return nil, &ParseError{f, "bad TA Algorithm", l}, "" @@ -1660,7 +1698,7 @@ func setTA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { } <-c // zBlank l = <-c - i, e = strconv.ParseUint(l.token, 10, 8) + i, e = strconv.Atoi(l.token) if e != nil || l.err { return nil, &ParseError{f, "bad TA DigestType", l}, "" } @@ -1676,27 +1714,25 @@ func setTA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { func setTLSA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { rr := new(TLSA) rr.Hdr = h - l := <-c - if l.length == 0 { // dynamic update rr. + if l.length == 0 { return rr, nil, l.comment } - - i, e := strconv.ParseUint(l.token, 10, 8) + i, e := strconv.Atoi(l.token) if e != nil || l.err { return nil, &ParseError{f, "bad TLSA Usage", l}, "" } rr.Usage = uint8(i) <-c // zBlank l = <-c - i, e = strconv.ParseUint(l.token, 10, 8) + i, e = strconv.Atoi(l.token) if e != nil || l.err { return nil, &ParseError{f, "bad TLSA Selector", l}, "" } rr.Selector = uint8(i) <-c // zBlank l = <-c - i, e = strconv.ParseUint(l.token, 10, 8) + i, e = strconv.Atoi(l.token) if e != nil || l.err { return nil, &ParseError{f, "bad TLSA MatchingType", l}, "" } @@ -1710,52 +1746,13 @@ func setTLSA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, c1 } -func setSMIMEA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(SMIMEA) - rr.Hdr = h - - l := <-c - if l.length == 0 { // dynamic update rr. - return rr, nil, l.comment - } - - i, e := strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return nil, &ParseError{f, "bad SMIMEA Usage", l}, "" - } - rr.Usage = uint8(i) - <-c // zBlank - l = <-c - i, e = strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return nil, &ParseError{f, "bad SMIMEA Selector", l}, "" - } - rr.Selector = uint8(i) - <-c // zBlank - l = <-c - i, e = strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return nil, &ParseError{f, "bad SMIMEA MatchingType", l}, "" - } - rr.MatchingType = uint8(i) - // So this needs be e2 (i.e. different than e), because...??t - s, e2, c1 := endingToString(c, "bad SMIMEA Certificate", f) - if e2 != nil { - return nil, e2, c1 - } - rr.Certificate = s - return rr, nil, c1 -} - func setRFC3597(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { rr := new(RFC3597) rr.Hdr = h - l := <-c if l.token != "\\#" { return nil, &ParseError{f, "bad RFC3597 Rdata", l}, "" } - <-c // zBlank l = <-c rdlength, e := strconv.Atoi(l.token) @@ -1786,18 +1783,6 @@ func setSPF(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, c1 } -func setAVC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(AVC) - rr.Hdr = h - - s, e, c1 := endingToTxtSlice(c, "bad AVC Txt", f) - if e != nil { - return nil, e, "" - } - rr.Txt = s - return rr, nil, c1 -} - func setTXT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { rr := new(TXT) rr.Hdr = h @@ -1829,18 +1814,18 @@ func setURI(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { rr.Hdr = h l := <-c - if l.length == 0 { // dynamic update rr. + if l.length == 0 { // Dynamic updates. return rr, nil, "" } - i, e := strconv.ParseUint(l.token, 10, 16) + i, e := strconv.Atoi(l.token) if e != nil || l.err { return nil, &ParseError{f, "bad URI Priority", l}, "" } rr.Priority = uint16(i) <-c // zBlank l = <-c - i, e = strconv.ParseUint(l.token, 10, 16) + i, e = strconv.Atoi(l.token) if e != nil || l.err { return nil, &ParseError{f, "bad URI Weight", l}, "" } @@ -1851,7 +1836,7 @@ func setURI(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { if err != nil { return nil, err, "" } - if len(s) != 1 { + if len(s) > 1 { return nil, &ParseError{f, "bad URI Target", l}, "" } rr.Target = s[0] @@ -1876,11 +1861,10 @@ func setNID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { rr.Hdr = h l := <-c - if l.length == 0 { // dynamic update rr. + if l.length == 0 { return rr, nil, "" } - - i, e := strconv.ParseUint(l.token, 10, 16) + i, e := strconv.Atoi(l.token) if e != nil || l.err { return nil, &ParseError{f, "bad NID Preference", l}, "" } @@ -1900,11 +1884,10 @@ func setL32(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { rr.Hdr = h l := <-c - if l.length == 0 { // dynamic update rr. + if l.length == 0 { return rr, nil, "" } - - i, e := strconv.ParseUint(l.token, 10, 16) + i, e := strconv.Atoi(l.token) if e != nil || l.err { return nil, &ParseError{f, "bad L32 Preference", l}, "" } @@ -1923,25 +1906,31 @@ func setLP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { rr.Hdr = h l := <-c - if l.length == 0 { // dynamic update rr. + if l.length == 0 { return rr, nil, "" } - - i, e := strconv.ParseUint(l.token, 10, 16) + i, e := strconv.Atoi(l.token) if e != nil || l.err { return nil, &ParseError{f, "bad LP Preference", l}, "" } rr.Preference = uint16(i) - <-c // zBlank l = <-c // zString rr.Fqdn = l.token - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Fqdn = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { return nil, &ParseError{f, "bad LP Fqdn", l}, "" } - rr.Fqdn = name - + if rr.Fqdn[l.length-1] != '.' { + rr.Fqdn = appendOrigin(rr.Fqdn, o) + } return rr, nil, "" } @@ -1950,11 +1939,10 @@ func setL64(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { rr.Hdr = h l := <-c - if l.length == 0 { // dynamic update rr. + if l.length == 0 { return rr, nil, "" } - - i, e := strconv.ParseUint(l.token, 10, 16) + i, e := strconv.Atoi(l.token) if e != nil || l.err { return nil, &ParseError{f, "bad L64 Preference", l}, "" } @@ -1972,13 +1960,11 @@ func setL64(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { func setUID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { rr := new(UID) rr.Hdr = h - l := <-c - if l.length == 0 { // dynamic update rr. + if l.length == 0 { return rr, nil, "" } - - i, e := strconv.ParseUint(l.token, 10, 32) + i, e := strconv.Atoi(l.token) if e != nil || l.err { return nil, &ParseError{f, "bad UID Uid", l}, "" } @@ -1989,13 +1975,11 @@ func setUID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { func setGID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { rr := new(GID) rr.Hdr = h - l := <-c - if l.length == 0 { // dynamic update rr. + if l.length == 0 { return rr, nil, "" } - - i, e := strconv.ParseUint(l.token, 10, 32) + i, e := strconv.Atoi(l.token) if e != nil || l.err { return nil, &ParseError{f, "bad GID Gid", l}, "" } @@ -2006,15 +1990,11 @@ func setGID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { func setUINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { rr := new(UINFO) rr.Hdr = h - s, e, c1 := endingToTxtSlice(c, "bad UINFO Uinfo", f) if e != nil { - return nil, e, c1 - } - if ln := len(s); ln == 0 { - return rr, nil, c1 + return nil, e, "" } - rr.Uinfo = s[0] // silently discard anything after the first character-string + rr.Uinfo = s[0] // silently discard anything above return rr, nil, c1 } @@ -2023,47 +2003,56 @@ func setPX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { rr.Hdr = h l := <-c - if l.length == 0 { // dynamic update rr. + if l.length == 0 { return rr, nil, "" } - - i, e := strconv.ParseUint(l.token, 10, 16) + i, e := strconv.Atoi(l.token) if e != nil || l.err { return nil, &ParseError{f, "bad PX Preference", l}, "" } rr.Preference = uint16(i) - <-c // zBlank l = <-c // zString rr.Map822 = l.token - map822, map822Ok := toAbsoluteName(l.token, o) - if l.err || !map822Ok { + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Map822 = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { return nil, &ParseError{f, "bad PX Map822", l}, "" } - rr.Map822 = map822 - + if rr.Map822[l.length-1] != '.' { + rr.Map822 = appendOrigin(rr.Map822, o) + } <-c // zBlank l = <-c // zString rr.Mapx400 = l.token - mapx400, mapx400Ok := toAbsoluteName(l.token, o) - if l.err || !mapx400Ok { + if l.token == "@" { + rr.Mapx400 = o + return rr, nil, "" + } + _, ok = IsDomainName(l.token) + if !ok || l.length == 0 || l.err { return nil, &ParseError{f, "bad PX Mapx400", l}, "" } - rr.Mapx400 = mapx400 - + if rr.Mapx400[l.length-1] != '.' { + rr.Mapx400 = appendOrigin(rr.Mapx400, o) + } return rr, nil, "" } func setCAA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { rr := new(CAA) rr.Hdr = h - l := <-c - if l.length == 0 { // dynamic update rr. + if l.length == 0 { return rr, nil, l.comment } - - i, err := strconv.ParseUint(l.token, 10, 8) + i, err := strconv.Atoi(l.token) if err != nil || l.err { return nil, &ParseError{f, "bad CAA Flag", l}, "" } @@ -2081,58 +2070,13 @@ func setCAA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { if e != nil { return nil, e, "" } - if len(s) != 1 { + if len(s) > 1 { return nil, &ParseError{f, "bad CAA Value", l}, "" } rr.Value = s[0] return rr, nil, c1 } -func setTKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(TKEY) - rr.Hdr = h - - l := <-c - - // Algorithm - if l.value != zString { - return nil, &ParseError{f, "bad TKEY algorithm", l}, "" - } - rr.Algorithm = l.token - <-c // zBlank - - // Get the key length and key values - l = <-c - i, err := strconv.ParseUint(l.token, 10, 8) - if err != nil || l.err { - return nil, &ParseError{f, "bad TKEY key length", l}, "" - } - rr.KeySize = uint16(i) - <-c // zBlank - l = <-c - if l.value != zString { - return nil, &ParseError{f, "bad TKEY key", l}, "" - } - rr.Key = l.token - <-c // zBlank - - // Get the otherdata length and string data - l = <-c - i, err = strconv.ParseUint(l.token, 10, 8) - if err != nil || l.err { - return nil, &ParseError{f, "bad TKEY otherdata length", l}, "" - } - rr.OtherLen = uint16(i) - <-c // zBlank - l = <-c - if l.value != zString { - return nil, &ParseError{f, "bad TKEY otherday", l}, "" - } - rr.OtherData = l.token - - return rr, nil, "" -} - var typeToparserFunc = map[uint16]parserFunc{ TypeAAAA: {setAAAA, false}, TypeAFSDB: {setAFSDB, false}, @@ -2142,7 +2086,6 @@ var typeToparserFunc = map[uint16]parserFunc{ TypeCDNSKEY: {setCDNSKEY, true}, TypeCERT: {setCERT, true}, TypeCNAME: {setCNAME, false}, - TypeCSYNC: {setCSYNC, true}, TypeDHCID: {setDHCID, true}, TypeDLV: {setDLV, true}, TypeDNAME: {setDNAME, false}, @@ -2185,10 +2128,8 @@ var typeToparserFunc = map[uint16]parserFunc{ TypeRP: {setRP, false}, TypeRRSIG: {setRRSIG, true}, TypeRT: {setRT, false}, - TypeSMIMEA: {setSMIMEA, true}, TypeSOA: {setSOA, false}, TypeSPF: {setSPF, true}, - TypeAVC: {setAVC, true}, TypeSRV: {setSRV, false}, TypeSSHFP: {setSSHFP, true}, TypeTALINK: {setTALINK, false}, @@ -2199,5 +2140,4 @@ var typeToparserFunc = map[uint16]parserFunc{ TypeUINFO: {setUINFO, true}, TypeURI: {setURI, true}, TypeX25: {setX25, false}, - TypeTKEY: {setTKEY, true}, } diff --git a/vendor/github.com/miekg/dns/scan_test.go b/vendor/github.com/miekg/dns/scan_test.go deleted file mode 100644 index e43ad4478198..000000000000 --- a/vendor/github.com/miekg/dns/scan_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package dns - -import ( - "io/ioutil" - "os" - "strings" - "testing" -) - -func TestParseZoneInclude(t *testing.T) { - - tmpfile, err := ioutil.TempFile("", "dns") - if err != nil { - t.Fatalf("could not create tmpfile for test: %s", err) - } - - if _, err := tmpfile.WriteString("foo\tIN\tA\t127.0.0.1"); err != nil { - t.Fatalf("unable to write content to tmpfile %q: %s", tmpfile.Name(), err) - } - if err := tmpfile.Close(); err != nil { - t.Fatalf("could not close tmpfile %q: %s", tmpfile.Name(), err) - } - - zone := "$ORIGIN example.org.\n$INCLUDE " + tmpfile.Name() - - tok := ParseZone(strings.NewReader(zone), "", "") - for x := range tok { - if x.Error != nil { - t.Fatalf("expected no error, but got %s", x.Error) - } - if x.RR.Header().Name != "foo.example.org." { - t.Fatalf("expected %s, but got %s", "foo.example.org.", x.RR.Header().Name) - } - } - - os.Remove(tmpfile.Name()) - - tok = ParseZone(strings.NewReader(zone), "", "") - for x := range tok { - if x.Error == nil { - t.Fatalf("expected first token to contain an error but it didn't") - } - if !strings.Contains(x.Error.Error(), "failed to open") || - !strings.Contains(x.Error.Error(), tmpfile.Name()) { - t.Fatalf(`expected error to contain: "failed to open" and %q but got: %s`, tmpfile.Name(), x.Error) - } - } -} diff --git a/vendor/github.com/miekg/dns/scanner.go b/vendor/github.com/miekg/dns/scanner.go index 424e5af9f593..c29bc2f388e8 100644 --- a/vendor/github.com/miekg/dns/scanner.go +++ b/vendor/github.com/miekg/dns/scanner.go @@ -4,7 +4,6 @@ package dns import ( "bufio" - "context" "io" "text/scanner" ) @@ -13,18 +12,13 @@ type scan struct { src *bufio.Reader position scanner.Position eof bool // Have we just seen a eof - ctx context.Context } -func scanInit(r io.Reader) (*scan, context.CancelFunc) { +func scanInit(r io.Reader) *scan { s := new(scan) s.src = bufio.NewReader(r) s.position.Line = 1 - - ctx, cancel := context.WithCancel(context.Background()) - s.ctx = ctx - - return s, cancel + return s } // tokenText returns the next byte from the input @@ -33,13 +27,6 @@ func (s *scan) tokenText() (byte, error) { if err != nil { return c, err } - select { - case <-s.ctx.Done(): - return c, context.Canceled - default: - break - } - // delay the newline handling until the next token is delivered, // fixes off-by-one errors when reporting a parse error. if s.eof == true { diff --git a/vendor/github.com/miekg/dns/server.go b/vendor/github.com/miekg/dns/server.go index 2d98f148883a..2b4bff49f27b 100644 --- a/vendor/github.com/miekg/dns/server.go +++ b/vendor/github.com/miekg/dns/server.go @@ -9,19 +9,12 @@ import ( "io" "net" "sync" - "sync/atomic" "time" ) -// Default maximum number of TCP queries before we close the socket. +// Maximum number of TCP queries before we close the socket. const maxTCPQueries = 128 -// Interval for stop worker if no load -const idleWorkerTimeout = 10 * time.Second - -// Maximum number of workers -const maxWorkersCount = 10000 - // Handler is implemented by any value that implements ServeDNS. type Handler interface { ServeDNS(w ResponseWriter, r *Msg) @@ -50,7 +43,6 @@ type ResponseWriter interface { } type response struct { - msg []byte hijacked bool // connection has been hijacked by handler tsigStatus error tsigTimersOnly bool @@ -59,6 +51,7 @@ type response struct { udp *net.UDPConn // i/o connection if UDP was used tcp net.Conn // i/o connection if TCP was used udpSession *SessionUDP // oob data to get egress interface right + remoteAddr net.Addr // address of the client writer Writer // writer to output the raw DNS bits } @@ -154,7 +147,7 @@ func (mux *ServeMux) match(q string, t uint16) Handler { b[i] |= ('a' - 'A') } } - if h, ok := mux.z[string(b[:l])]; ok { // causes garbage, might want to change the map key + if h, ok := mux.z[string(b[:l])]; ok { // 'causes garbage, might want to change the map key if t != TypeDS { return h } @@ -292,7 +285,7 @@ type Server struct { WriteTimeout time.Duration // TCP idle timeout for multiple queries, if nil, defaults to 8 * time.Second (RFC 5966). IdleTimeout func() time.Duration - // Secret(s) for Tsig map[]. The zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2). + // Secret(s) for Tsig map[]. TsigSecret map[string]string // Unsafe instructs the server to disregard any sanity checks and directly hand the message to // the handler. It will specifically not check if the query has the QR bit not set. @@ -303,61 +296,13 @@ type Server struct { DecorateReader DecorateReader // DecorateWriter is optional, allows customization of the process that writes raw DNS messages. DecorateWriter DecorateWriter - // Maximum number of TCP queries before we close the socket. Default is maxTCPQueries (unlimited if -1). - MaxTCPQueries int - - // UDP packet or TCP connection queue - queue chan *response - // Workers count - workersCount int32 - // Shutdown handling - lock sync.RWMutex - started bool -} - -func (srv *Server) worker(w *response) { - srv.serve(w) - for { - count := atomic.LoadInt32(&srv.workersCount) - if count > maxWorkersCount { - return - } - if atomic.CompareAndSwapInt32(&srv.workersCount, count, count+1) { - break - } - } + // Graceful shutdown handling - defer atomic.AddInt32(&srv.workersCount, -1) + inFlight sync.WaitGroup - inUse := false - timeout := time.NewTimer(idleWorkerTimeout) - defer timeout.Stop() -LOOP: - for { - select { - case w, ok := <-srv.queue: - if !ok { - break LOOP - } - inUse = true - srv.serve(w) - case <-timeout.C: - if !inUse { - break LOOP - } - inUse = false - timeout.Reset(idleWorkerTimeout) - } - } -} - -func (srv *Server) spawnWorker(w *response) { - select { - case srv.queue <- w: - default: - go srv.worker(w) - } + lock sync.RWMutex + started bool } // ListenAndServe starts a nameserver on the configured address in *Server. @@ -367,7 +312,6 @@ func (srv *Server) ListenAndServe() error { if srv.started { return &Error{err: "server already started"} } - addr := srv.Addr if addr == "" { addr = ":domain" @@ -375,8 +319,6 @@ func (srv *Server) ListenAndServe() error { if srv.UDPSize == 0 { srv.UDPSize = MinMsgSize } - srv.queue = make(chan *response) - defer close(srv.queue) switch srv.Net { case "tcp", "tcp4", "tcp6": a, err := net.ResolveTCPAddr(srv.Net, addr) @@ -397,7 +339,7 @@ func (srv *Server) ListenAndServe() error { network := "tcp" if srv.Net == "tcp4-tls" { network = "tcp4" - } else if srv.Net == "tcp6-tls" { + } else if srv.Net == "tcp6" { network = "tcp6" } @@ -441,18 +383,13 @@ func (srv *Server) ActivateAndServe() error { if srv.started { return &Error{err: "server already started"} } - pConn := srv.PacketConn l := srv.Listener - srv.queue = make(chan *response) - defer close(srv.queue) if pConn != nil { if srv.UDPSize == 0 { srv.UDPSize = MinMsgSize } - // Check PacketConn interface's type is valid and value - // is not nil - if t, ok := pConn.(*net.UDPConn); ok && t != nil { + if t, ok := pConn.(*net.UDPConn); ok { if e := setUDPSocketOptions(t); e != nil { return e } @@ -473,8 +410,10 @@ func (srv *Server) ActivateAndServe() error { return &Error{err: "bad listeners"} } -// Shutdown shuts down a server. After a call to Shutdown, ListenAndServe and -// ActivateAndServe will return. +// Shutdown gracefully shuts down a server. After a call to Shutdown, ListenAndServe and +// ActivateAndServe will return. All in progress queries are completed before the server +// is taken down. If the Shutdown is taking longer than the reading timeout an error +// is returned. func (srv *Server) Shutdown() error { srv.lock.Lock() if !srv.started { @@ -490,7 +429,19 @@ func (srv *Server) Shutdown() error { if srv.Listener != nil { srv.Listener.Close() } - return nil + + fin := make(chan bool) + go func() { + srv.inFlight.Wait() + fin <- true + }() + + select { + case <-time.After(srv.getReadTimeout()): + return &Error{err: "server shutdown is pending"} + case <-fin: + return nil + } } // getReadTimeout is a helper func to use system timeout if server did not intend to change it. @@ -503,6 +454,7 @@ func (srv *Server) getReadTimeout() time.Duration { } // serveTCP starts a TCP listener for the server. +// Each request is handled in a separate goroutine. func (srv *Server) serveTCP(l net.Listener) error { defer l.Close() @@ -510,8 +462,26 @@ func (srv *Server) serveTCP(l net.Listener) error { srv.NotifyStartedFunc() } + reader := Reader(&defaultReader{srv}) + if srv.DecorateReader != nil { + reader = srv.DecorateReader(reader) + } + + handler := srv.Handler + if handler == nil { + handler = DefaultServeMux + } + rtimeout := srv.getReadTimeout() + // deadline is not used here for { rw, err := l.Accept() + if err != nil { + if neterr, ok := err.(net.Error); ok && neterr.Temporary() { + continue + } + return err + } + m, err := reader.ReadTCP(rw, rtimeout) srv.lock.RLock() if !srv.started { srv.lock.RUnlock() @@ -519,16 +489,15 @@ func (srv *Server) serveTCP(l net.Listener) error { } srv.lock.RUnlock() if err != nil { - if neterr, ok := err.(net.Error); ok && neterr.Temporary() { - continue - } - return err + continue } - srv.spawnWorker(&response{tsigSecret: srv.TsigSecret, tcp: rw}) + srv.inFlight.Add(1) + go srv.serve(rw.RemoteAddr(), handler, m, nil, nil, rw) } } // serveUDP starts a UDP listener for the server. +// Each request is handled in a separate goroutine. func (srv *Server) serveUDP(l *net.UDPConn) error { defer l.Close() @@ -541,6 +510,10 @@ func (srv *Server) serveUDP(l *net.UDPConn) error { reader = srv.DecorateReader(reader) } + handler := srv.Handler + if handler == nil { + handler = DefaultServeMux + } rtimeout := srv.getReadTimeout() // deadline is not used here for { @@ -552,106 +525,85 @@ func (srv *Server) serveUDP(l *net.UDPConn) error { } srv.lock.RUnlock() if err != nil { - if netErr, ok := err.(net.Error); ok && netErr.Temporary() { - continue - } - return err - } - if len(m) < headerSize { continue } - srv.spawnWorker(&response{msg: m, tsigSecret: srv.TsigSecret, udp: l, udpSession: s}) + srv.inFlight.Add(1) + go srv.serve(s.RemoteAddr(), handler, m, l, s, nil) } } -func (srv *Server) serve(w *response) { +// Serve a new connection. +func (srv *Server) serve(a net.Addr, h Handler, m []byte, u *net.UDPConn, s *SessionUDP, t net.Conn) { + defer srv.inFlight.Done() + + w := &response{tsigSecret: srv.TsigSecret, udp: u, tcp: t, remoteAddr: a, udpSession: s} if srv.DecorateWriter != nil { w.writer = srv.DecorateWriter(w) } else { w.writer = w } - if w.udp != nil { - // serve UDP - srv.serveDNS(w) - return - } + q := 0 // counter for the amount of TCP queries we get reader := Reader(&defaultReader{srv}) if srv.DecorateReader != nil { reader = srv.DecorateReader(reader) } - - defer func() { - if !w.hijacked { - w.Close() - } - }() - - idleTimeout := tcpIdleTimeout - if srv.IdleTimeout != nil { - idleTimeout = srv.IdleTimeout() - } - - timeout := srv.getReadTimeout() - - limit := srv.MaxTCPQueries - if limit == 0 { - limit = maxTCPQueries - } - - for q := 0; q < limit || limit == -1; q++ { - var err error - w.msg, err = reader.ReadTCP(w.tcp, timeout) - if err != nil { - // TODO(tmthrgd): handle error - break - } - srv.serveDNS(w) - if w.tcp == nil { - break // Close() was called - } - if w.hijacked { - break // client will call Close() themselves - } - // The first read uses the read timeout, the rest use the - // idle timeout. - timeout = idleTimeout - } -} - -func (srv *Server) serveDNS(w *response) { +Redo: req := new(Msg) - err := req.Unpack(w.msg) + err := req.Unpack(m) if err != nil { // Send a FormatError back x := new(Msg) x.SetRcodeFormatError(req) w.WriteMsg(x) - return + goto Exit } if !srv.Unsafe && req.Response { - return + goto Exit } w.tsigStatus = nil if w.tsigSecret != nil { if t := req.IsTsig(); t != nil { - if secret, ok := w.tsigSecret[t.Hdr.Name]; ok { - w.tsigStatus = TsigVerify(w.msg, secret, "", false) - } else { - w.tsigStatus = ErrSecret + secret := t.Hdr.Name + if _, ok := w.tsigSecret[secret]; !ok { + w.tsigStatus = ErrKeyAlg } + w.tsigStatus = TsigVerify(m, w.tsigSecret[secret], "", false) w.tsigTimersOnly = false w.tsigRequestMAC = req.Extra[len(req.Extra)-1].(*TSIG).MAC } } + h.ServeDNS(w, req) // Writes back to the client - handler := srv.Handler - if handler == nil { - handler = DefaultServeMux +Exit: + if w.tcp == nil { + return + } + // TODO(miek): make this number configurable? + if q > maxTCPQueries { // close socket after this many queries + w.Close() + return } - handler.ServeDNS(w, req) // Writes back to the client + if w.hijacked { + return // client calls Close() + } + if u != nil { // UDP, "close" and return + w.Close() + return + } + idleTimeout := tcpIdleTimeout + if srv.IdleTimeout != nil { + idleTimeout = srv.IdleTimeout() + } + m, err = reader.ReadTCP(w.tcp, idleTimeout) + if err == nil { + q++ + goto Redo + } + w.Close() + return } func (srv *Server) readTCP(conn net.Conn, timeout time.Duration) ([]byte, error) { @@ -693,8 +645,11 @@ func (srv *Server) readUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *S conn.SetReadDeadline(time.Now().Add(timeout)) m := make([]byte, srv.UDPSize) n, s, err := ReadFromSessionUDP(conn, m) - if err != nil { - return nil, nil, err + if err != nil || n == 0 { + if err != nil { + return nil, nil, err + } + return nil, nil, ErrShortRead } m = m[:n] return m, s, nil @@ -754,12 +709,7 @@ func (w *response) LocalAddr() net.Addr { } // RemoteAddr implements the ResponseWriter.RemoteAddr method. -func (w *response) RemoteAddr() net.Addr { - if w.tcp != nil { - return w.tcp.RemoteAddr() - } - return w.udpSession.RemoteAddr() -} +func (w *response) RemoteAddr() net.Addr { return w.remoteAddr } // TsigStatus implements the ResponseWriter.TsigStatus method. func (w *response) TsigStatus() error { return w.tsigStatus } diff --git a/vendor/github.com/miekg/dns/server_test.go b/vendor/github.com/miekg/dns/server_test.go index 6fc9e88c14c5..1b5cbc97eedc 100644 --- a/vendor/github.com/miekg/dns/server_test.go +++ b/vendor/github.com/miekg/dns/server_test.go @@ -20,7 +20,7 @@ func HelloServer(w ResponseWriter, req *Msg) { w.WriteMsg(m) } -func HelloServerBadID(w ResponseWriter, req *Msg) { +func HelloServerBadId(w ResponseWriter, req *Msg) { m := new(Msg) m.SetReply(req) m.Id++ @@ -30,16 +30,6 @@ func HelloServerBadID(w ResponseWriter, req *Msg) { w.WriteMsg(m) } -func HelloServerEchoAddrPort(w ResponseWriter, req *Msg) { - m := new(Msg) - m.SetReply(req) - - remoteAddr := w.RemoteAddr().String() - m.Extra = make([]RR, 1) - m.Extra[0] = &TXT{Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{remoteAddr}} - w.WriteMsg(m) -} - func AnotherHelloServer(w ResponseWriter, req *Msg) { m := new(Msg) m.SetReply(req) @@ -55,7 +45,7 @@ func RunLocalUDPServer(laddr string) (*Server, string, error) { return server, l, err } -func RunLocalUDPServerWithFinChan(laddr string) (*Server, string, chan error, error) { +func RunLocalUDPServerWithFinChan(laddr string) (*Server, string, chan struct{}, error) { pc, err := net.ListenPacket("udp", laddr) if err != nil { return nil, "", nil, err @@ -66,13 +56,11 @@ func RunLocalUDPServerWithFinChan(laddr string) (*Server, string, chan error, er waitLock.Lock() server.NotifyStartedFunc = waitLock.Unlock - // fin must be buffered so the goroutine below won't block - // forever if fin is never read from. This always happens - // in RunLocalUDPServer and can happen in TestShutdownUDP. - fin := make(chan error, 1) + fin := make(chan struct{}, 0) go func() { - fin <- server.ActivateAndServe() + server.ActivateAndServe() + close(fin) pc.Close() }() @@ -102,15 +90,9 @@ func RunLocalUDPServerUnsafe(laddr string) (*Server, string, error) { } func RunLocalTCPServer(laddr string) (*Server, string, error) { - server, l, _, err := RunLocalTCPServerWithFinChan(laddr) - - return server, l, err -} - -func RunLocalTCPServerWithFinChan(laddr string) (*Server, string, chan error, error) { l, err := net.Listen("tcp", laddr) if err != nil { - return nil, "", nil, err + return nil, "", err } server := &Server{Listener: l, ReadTimeout: time.Hour, WriteTimeout: time.Hour} @@ -119,17 +101,13 @@ func RunLocalTCPServerWithFinChan(laddr string) (*Server, string, chan error, er waitLock.Lock() server.NotifyStartedFunc = waitLock.Unlock - // See the comment in RunLocalUDPServerWithFinChan as to - // why fin must be buffered. - fin := make(chan error, 1) - go func() { - fin <- server.ActivateAndServe() + server.ActivateAndServe() l.Close() }() waitLock.Lock() - return server, l.Addr().String(), fin, nil + return server, l.Addr().String(), nil } func RunLocalTLSServer(laddr string, config *tls.Config) (*Server, string, error) { @@ -159,7 +137,7 @@ func TestServing(t *testing.T) { defer HandleRemove("miek.nl.") defer HandleRemove("example.com.") - s, addrstr, err := RunLocalUDPServer(":0") + s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") if err != nil { t.Fatalf("unable to run test server: %v", err) } @@ -214,7 +192,7 @@ func TestServingTLS(t *testing.T) { Certificates: []tls.Certificate{cert}, } - s, addrstr, err := RunLocalTLSServer(":0", &config) + s, addrstr, err := RunLocalTLSServer("127.0.0.1:0", &config) if err != nil { t.Fatalf("unable to run test server: %v", err) } @@ -259,77 +237,13 @@ func TestServingTLS(t *testing.T) { } } -func TestServingListenAndServe(t *testing.T) { - HandleFunc("example.com.", AnotherHelloServer) - defer HandleRemove("example.com.") - - waitLock := sync.Mutex{} - server := &Server{Addr: ":0", Net: "udp", ReadTimeout: time.Hour, WriteTimeout: time.Hour, NotifyStartedFunc: waitLock.Unlock} - waitLock.Lock() - - go func() { - server.ListenAndServe() - }() - waitLock.Lock() - - c, m := new(Client), new(Msg) - m.SetQuestion("example.com.", TypeTXT) - addr := server.PacketConn.LocalAddr().String() // Get address via the PacketConn that gets set. - r, _, err := c.Exchange(m, addr) - if err != nil { - t.Fatal("failed to exchange example.com", err) - } - txt := r.Extra[0].(*TXT).Txt[0] - if txt != "Hello example" { - t.Error("unexpected result for example.com", txt, "!= Hello example") - } - server.Shutdown() -} - -func TestServingListenAndServeTLS(t *testing.T) { - HandleFunc("example.com.", AnotherHelloServer) - defer HandleRemove("example.com.") - - cert, err := tls.X509KeyPair(CertPEMBlock, KeyPEMBlock) - if err != nil { - t.Fatalf("unable to build certificate: %v", err) - } - - config := &tls.Config{ - Certificates: []tls.Certificate{cert}, - } - - waitLock := sync.Mutex{} - server := &Server{Addr: ":0", Net: "tcp", TLSConfig: config, ReadTimeout: time.Hour, WriteTimeout: time.Hour, NotifyStartedFunc: waitLock.Unlock} - waitLock.Lock() - - go func() { - server.ListenAndServe() - }() - waitLock.Lock() - - c, m := new(Client), new(Msg) - c.Net = "tcp" - m.SetQuestion("example.com.", TypeTXT) - addr := server.Listener.Addr().String() // Get address via the Listener that gets set. - r, _, err := c.Exchange(m, addr) - if err != nil { - t.Fatal(err) - } - txt := r.Extra[0].(*TXT).Txt[0] - if txt != "Hello example" { - t.Error("unexpected result for example.com", txt, "!= Hello example") - } - server.Shutdown() -} - func BenchmarkServe(b *testing.B) { b.StopTimer() HandleFunc("miek.nl.", HelloServer) defer HandleRemove("miek.nl.") a := runtime.GOMAXPROCS(4) - s, addrstr, err := RunLocalUDPServer(":0") + s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") if err != nil { b.Fatalf("unable to run test server: %v", err) } @@ -382,7 +296,7 @@ func BenchmarkServeCompress(b *testing.B) { HandleFunc("miek.nl.", HelloServerCompress) defer HandleRemove("miek.nl.") a := runtime.GOMAXPROCS(4) - s, addrstr, err := RunLocalUDPServer(":0") + s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") if err != nil { b.Fatalf("unable to run test server: %v", err) } @@ -483,7 +397,7 @@ func TestServingLargeResponses(t *testing.T) { HandleFunc("example.", HelloServerLargeResponse) defer HandleRemove("example.") - s, addrstr, err := RunLocalUDPServer(":0") + s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") if err != nil { t.Fatalf("unable to run test server: %v", err) } @@ -523,7 +437,7 @@ func TestServingResponse(t *testing.T) { t.Skip("skipping test in short mode.") } HandleFunc("miek.nl.", HelloServer) - s, addrstr, err := RunLocalUDPServer(":0") + s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") if err != nil { t.Fatalf("unable to run test server: %v", err) } @@ -543,7 +457,7 @@ func TestServingResponse(t *testing.T) { } s.Shutdown() - s, addrstr, err = RunLocalUDPServerUnsafe(":0") + s, addrstr, err = RunLocalUDPServerUnsafe("127.0.0.1:0") if err != nil { t.Fatalf("unable to run test server: %v", err) } @@ -557,21 +471,13 @@ func TestServingResponse(t *testing.T) { } func TestShutdownTCP(t *testing.T) { - s, _, fin, err := RunLocalTCPServerWithFinChan(":0") + s, _, err := RunLocalTCPServer("127.0.0.1:0") if err != nil { t.Fatalf("unable to run test server: %v", err) } err = s.Shutdown() if err != nil { - t.Fatalf("could not shutdown test TCP server, %v", err) - } - select { - case err := <-fin: - if err != nil { - t.Errorf("error returned from ActivateAndServe, %v", err) - } - case <-time.After(2 * time.Second): - t.Error("could not shutdown test TCP server. Gave up waiting") + t.Errorf("could not shutdown test TCP server, %v", err) } } @@ -585,7 +491,7 @@ func TestShutdownTLS(t *testing.T) { Certificates: []tls.Certificate{cert}, } - s, _, err := RunLocalTLSServer(":0", &config) + s, _, err := RunLocalTLSServer("127.0.0.1:0", &config) if err != nil { t.Fatalf("unable to run test server: %v", err) } @@ -613,7 +519,7 @@ func (t *trigger) Get() bool { func TestHandlerCloseTCP(t *testing.T) { - ln, err := net.Listen("tcp", ":0") + ln, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { panic(err) } @@ -637,12 +543,12 @@ func TestHandlerCloseTCP(t *testing.T) { exchange: _, _, err := c.Exchange(m, addr) if err != nil && err != io.EOF { - t.Errorf("exchange failed: %s\n", err) + t.Logf("exchange failed: %s\n", err) if tries == 3 { return } time.Sleep(time.Second / 10) - tries++ + tries += 1 goto exchange } }() @@ -653,7 +559,7 @@ func TestHandlerCloseTCP(t *testing.T) { } func TestShutdownUDP(t *testing.T) { - s, _, fin, err := RunLocalUDPServerWithFinChan(":0") + s, _, fin, err := RunLocalUDPServerWithFinChan("127.0.0.1:0") if err != nil { t.Fatalf("unable to run test server: %v", err) } @@ -662,28 +568,9 @@ func TestShutdownUDP(t *testing.T) { t.Errorf("could not shutdown test UDP server, %v", err) } select { - case err := <-fin: - if err != nil { - t.Errorf("error returned from ActivateAndServe, %v", err) - } + case <-fin: case <-time.After(2 * time.Second): - t.Error("could not shutdown test UDP server. Gave up waiting") - } -} - -func TestServerStartStopRace(t *testing.T) { - for i := 0; i < 10; i++ { - var err error - s := &Server{} - s, _, _, err = RunLocalUDPServerWithFinChan(":0") - if err != nil { - t.Fatalf("could not start server: %s", err) - } - go func() { - if err := s.Shutdown(); err != nil { - t.Fatalf("could not stop server: %s", err) - } - }() + t.Error("Could not shutdown test UDP server. Gave up waiting") } } @@ -703,7 +590,7 @@ func ExampleDecorateWriter() { }) // simple UDP server - pc, err := net.ListenPacket("udp", ":0") + pc, err := net.ListenPacket("udp", "127.0.0.1:0") if err != nil { fmt.Println(err.Error()) return diff --git a/vendor/github.com/miekg/dns/sig0.go b/vendor/github.com/miekg/dns/sig0.go index f31e9e6843dd..2dce06af82f6 100644 --- a/vendor/github.com/miekg/dns/sig0.go +++ b/vendor/github.com/miekg/dns/sig0.go @@ -60,15 +60,16 @@ func (rr *SIG) Sign(k crypto.Signer, m *Msg) ([]byte, error) { } rr.Signature = toBase64(signature) + sig := string(signature) - buf = append(buf, signature...) + buf = append(buf, sig...) if len(buf) > int(^uint16(0)) { return nil, ErrBuf } // Adjust sig data length rdoff := len(mbuf) + 1 + 2 + 2 + 4 rdlen := binary.BigEndian.Uint16(buf[rdoff:]) - rdlen += uint16(len(signature)) + rdlen += uint16(len(sig)) binary.BigEndian.PutUint16(buf[rdoff:], rdlen) // Adjust additional count adc := binary.BigEndian.Uint16(buf[10:]) diff --git a/vendor/github.com/miekg/dns/smimea.go b/vendor/github.com/miekg/dns/smimea.go deleted file mode 100644 index 4e7ded4b386e..000000000000 --- a/vendor/github.com/miekg/dns/smimea.go +++ /dev/null @@ -1,47 +0,0 @@ -package dns - -import ( - "crypto/sha256" - "crypto/x509" - "encoding/hex" -) - -// Sign creates a SMIMEA record from an SSL certificate. -func (r *SMIMEA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (err error) { - r.Hdr.Rrtype = TypeSMIMEA - r.Usage = uint8(usage) - r.Selector = uint8(selector) - r.MatchingType = uint8(matchingType) - - r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert) - if err != nil { - return err - } - return nil -} - -// Verify verifies a SMIMEA record against an SSL certificate. If it is OK -// a nil error is returned. -func (r *SMIMEA) Verify(cert *x509.Certificate) error { - c, err := CertificateToDANE(r.Selector, r.MatchingType, cert) - if err != nil { - return err // Not also ErrSig? - } - if r.Certificate == c { - return nil - } - return ErrSig // ErrSig, really? -} - -// SMIMEAName returns the ownername of a SMIMEA resource record as per the -// format specified in RFC 'draft-ietf-dane-smime-12' Section 2 and 3 -func SMIMEAName(email, domain string) (string, error) { - hasher := sha256.New() - hasher.Write([]byte(email)) - - // RFC Section 3: "The local-part is hashed using the SHA2-256 - // algorithm with the hash truncated to 28 octets and - // represented in its hexadecimal representation to become the - // left-most label in the prepared domain name" - return hex.EncodeToString(hasher.Sum(nil)[:28]) + "." + "_smimecert." + domain, nil -} diff --git a/vendor/github.com/miekg/dns/tlsa.go b/vendor/github.com/miekg/dns/tlsa.go index 431e2fb5afca..34fe6615aa2f 100644 --- a/vendor/github.com/miekg/dns/tlsa.go +++ b/vendor/github.com/miekg/dns/tlsa.go @@ -1,11 +1,50 @@ package dns import ( + "crypto/sha256" + "crypto/sha512" "crypto/x509" + "encoding/hex" + "errors" + "io" "net" "strconv" ) +// CertificateToDANE converts a certificate to a hex string as used in the TLSA record. +func CertificateToDANE(selector, matchingType uint8, cert *x509.Certificate) (string, error) { + switch matchingType { + case 0: + switch selector { + case 0: + return hex.EncodeToString(cert.Raw), nil + case 1: + return hex.EncodeToString(cert.RawSubjectPublicKeyInfo), nil + } + case 1: + h := sha256.New() + switch selector { + case 0: + io.WriteString(h, string(cert.Raw)) + return hex.EncodeToString(h.Sum(nil)), nil + case 1: + io.WriteString(h, string(cert.RawSubjectPublicKeyInfo)) + return hex.EncodeToString(h.Sum(nil)), nil + } + case 2: + h := sha512.New() + switch selector { + case 0: + io.WriteString(h, string(cert.Raw)) + return hex.EncodeToString(h.Sum(nil)), nil + case 1: + io.WriteString(h, string(cert.RawSubjectPublicKeyInfo)) + return hex.EncodeToString(h.Sum(nil)), nil + } + } + return "", errors.New("dns: bad TLSA MatchingType or TLSA Selector") +} + // Sign creates a TLSA record from an SSL certificate. func (r *TLSA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (err error) { r.Hdr.Rrtype = TypeTLSA diff --git a/vendor/github.com/miekg/dns/tsig.go b/vendor/github.com/miekg/dns/tsig.go index 4837b4ab1fd8..78365e1c5bac 100644 --- a/vendor/github.com/miekg/dns/tsig.go +++ b/vendor/github.com/miekg/dns/tsig.go @@ -9,6 +9,7 @@ import ( "encoding/binary" "encoding/hex" "hash" + "io" "strconv" "strings" "time" @@ -123,7 +124,7 @@ func TsigGenerate(m *Msg, secret, requestMAC string, timersOnly bool) ([]byte, s default: return nil, "", ErrKeyAlg } - h.Write(buf) + io.WriteString(h, string(buf)) t.MAC = hex.EncodeToString(h.Sum(nil)) t.MACSize = uint16(len(t.MAC) / 2) // Size is half! @@ -208,9 +209,6 @@ func tsigBuffer(msgbuf []byte, rr *TSIG, requestMAC string, timersOnly bool) []b rr.Fudge = 300 // Standard (RFC) default. } - // Replace message ID in header with original ID from TSIG - binary.BigEndian.PutUint16(msgbuf[0:2], rr.OrigId) - if requestMAC != "" { m := new(macWireFmt) m.MACSize = uint16(len(requestMAC) / 2) diff --git a/vendor/github.com/miekg/dns/tsig_test.go b/vendor/github.com/miekg/dns/tsig_test.go index 4bc52733c70c..48b9988b662b 100644 --- a/vendor/github.com/miekg/dns/tsig_test.go +++ b/vendor/github.com/miekg/dns/tsig_test.go @@ -1,7 +1,6 @@ package dns import ( - "encoding/binary" "testing" "time" ) @@ -23,20 +22,6 @@ func TestTsig(t *testing.T) { if err != nil { t.Fatal(err) } - - // TSIG accounts for ID substitution. This means if the message ID is - // changed by a forwarder, we should still be able to verify the TSIG. - m = newTsig(HmacMD5) - buf, _, err = TsigGenerate(m, "pRZgBrBvI4NAHZYhxmhs/Q==", "", false) - if err != nil { - t.Fatal(err) - } - - binary.BigEndian.PutUint16(buf[0:2], uint16(42)) - err = TsigVerify(buf, "pRZgBrBvI4NAHZYhxmhs/Q==", "", false) - if err != nil { - t.Fatal(err) - } } func TestTsigCase(t *testing.T) { diff --git a/vendor/github.com/miekg/dns/types.go b/vendor/github.com/miekg/dns/types.go index a779ca8abc0e..e7370647900a 100644 --- a/vendor/github.com/miekg/dns/types.go +++ b/vendor/github.com/miekg/dns/types.go @@ -70,7 +70,6 @@ const ( TypeNSEC3 uint16 = 50 TypeNSEC3PARAM uint16 = 51 TypeTLSA uint16 = 52 - TypeSMIMEA uint16 = 53 TypeHIP uint16 = 55 TypeNINFO uint16 = 56 TypeRKEY uint16 = 57 @@ -78,7 +77,6 @@ const ( TypeCDS uint16 = 59 TypeCDNSKEY uint16 = 60 TypeOPENPGPKEY uint16 = 61 - TypeCSYNC uint16 = 62 TypeSPF uint16 = 99 TypeUINFO uint16 = 100 TypeUID uint16 = 101 @@ -92,7 +90,6 @@ const ( TypeEUI64 uint16 = 109 TypeURI uint16 = 256 TypeCAA uint16 = 257 - TypeAVC uint16 = 258 TypeTKEY uint16 = 249 TypeTSIG uint16 = 250 @@ -116,27 +113,27 @@ const ( ClassNONE = 254 ClassANY = 255 - // Message Response Codes, see https://www.iana.org/assignments/dns-parameters/dns-parameters.xhtml - RcodeSuccess = 0 // NoError - No Error [DNS] - RcodeFormatError = 1 // FormErr - Format Error [DNS] - RcodeServerFailure = 2 // ServFail - Server Failure [DNS] - RcodeNameError = 3 // NXDomain - Non-Existent Domain [DNS] - RcodeNotImplemented = 4 // NotImp - Not Implemented [DNS] - RcodeRefused = 5 // Refused - Query Refused [DNS] - RcodeYXDomain = 6 // YXDomain - Name Exists when it should not [DNS Update] - RcodeYXRrset = 7 // YXRRSet - RR Set Exists when it should not [DNS Update] - RcodeNXRrset = 8 // NXRRSet - RR Set that should exist does not [DNS Update] - RcodeNotAuth = 9 // NotAuth - Server Not Authoritative for zone [DNS Update] - RcodeNotZone = 10 // NotZone - Name not contained in zone [DNS Update/TSIG] - RcodeBadSig = 16 // BADSIG - TSIG Signature Failure [TSIG] - RcodeBadVers = 16 // BADVERS - Bad OPT Version [EDNS0] - RcodeBadKey = 17 // BADKEY - Key not recognized [TSIG] - RcodeBadTime = 18 // BADTIME - Signature out of time window [TSIG] - RcodeBadMode = 19 // BADMODE - Bad TKEY Mode [TKEY] - RcodeBadName = 20 // BADNAME - Duplicate key name [TKEY] - RcodeBadAlg = 21 // BADALG - Algorithm not supported [TKEY] - RcodeBadTrunc = 22 // BADTRUNC - Bad Truncation [TSIG] - RcodeBadCookie = 23 // BADCOOKIE - Bad/missing Server Cookie [DNS Cookies] + // Message Response Codes. + RcodeSuccess = 0 + RcodeFormatError = 1 + RcodeServerFailure = 2 + RcodeNameError = 3 + RcodeNotImplemented = 4 + RcodeRefused = 5 + RcodeYXDomain = 6 + RcodeYXRrset = 7 + RcodeNXRrset = 8 + RcodeNotAuth = 9 + RcodeNotZone = 10 + RcodeBadSig = 16 // TSIG + RcodeBadVers = 16 // EDNS0 + RcodeBadKey = 17 + RcodeBadTime = 18 + RcodeBadMode = 19 // TKEY + RcodeBadName = 20 + RcodeBadAlg = 21 + RcodeBadTrunc = 22 // TSIG + RcodeBadCookie = 23 // DNS Cookies // Message Opcodes. There is no 3. OpcodeQuery = 0 @@ -146,7 +143,7 @@ const ( OpcodeUpdate = 5 ) -// Header is the wire format for the DNS packet header. +// Headers is the wire format for the DNS packet header. type Header struct { Id uint16 Bits uint16 @@ -165,15 +162,14 @@ const ( _Z = 1 << 6 // Z _AD = 1 << 5 // authticated data _CD = 1 << 4 // checking disabled -) -// Various constants used in the LOC RR, See RFC 1887. -const ( LOC_EQUATOR = 1 << 31 // RFC 1876, Section 2. LOC_PRIMEMERIDIAN = 1 << 31 // RFC 1876, Section 2. - LOC_HOURS = 60 * 1000 - LOC_DEGREES = 60 * LOC_HOURS - LOC_ALTITUDEBASE = 100000 + + LOC_HOURS = 60 * 1000 + LOC_DEGREES = 60 * LOC_HOURS + + LOC_ALTITUDEBASE = 100000 ) // Different Certificate Types, see RFC 4398, Section 2.1 @@ -239,7 +235,6 @@ type ANY struct { func (rr *ANY) String() string { return rr.Hdr.String() } -// CNAME RR. See RFC 1034. type CNAME struct { Hdr RR_Header Target string `dns:"cdomain-name"` @@ -247,7 +242,6 @@ type CNAME struct { func (rr *CNAME) String() string { return rr.Hdr.String() + sprintName(rr.Target) } -// HINFO RR. See RFC 1034. type HINFO struct { Hdr RR_Header Cpu string @@ -258,7 +252,6 @@ func (rr *HINFO) String() string { return rr.Hdr.String() + sprintTxt([]string{rr.Cpu, rr.Os}) } -// MB RR. See RFC 1035. type MB struct { Hdr RR_Header Mb string `dns:"cdomain-name"` @@ -266,7 +259,6 @@ type MB struct { func (rr *MB) String() string { return rr.Hdr.String() + sprintName(rr.Mb) } -// MG RR. See RFC 1035. type MG struct { Hdr RR_Header Mg string `dns:"cdomain-name"` @@ -274,7 +266,6 @@ type MG struct { func (rr *MG) String() string { return rr.Hdr.String() + sprintName(rr.Mg) } -// MINFO RR. See RFC 1035. type MINFO struct { Hdr RR_Header Rmail string `dns:"cdomain-name"` @@ -285,7 +276,6 @@ func (rr *MINFO) String() string { return rr.Hdr.String() + sprintName(rr.Rmail) + " " + sprintName(rr.Email) } -// MR RR. See RFC 1035. type MR struct { Hdr RR_Header Mr string `dns:"cdomain-name"` @@ -295,7 +285,6 @@ func (rr *MR) String() string { return rr.Hdr.String() + sprintName(rr.Mr) } -// MF RR. See RFC 1035. type MF struct { Hdr RR_Header Mf string `dns:"cdomain-name"` @@ -305,7 +294,6 @@ func (rr *MF) String() string { return rr.Hdr.String() + sprintName(rr.Mf) } -// MD RR. See RFC 1035. type MD struct { Hdr RR_Header Md string `dns:"cdomain-name"` @@ -315,7 +303,6 @@ func (rr *MD) String() string { return rr.Hdr.String() + sprintName(rr.Md) } -// MX RR. See RFC 1035. type MX struct { Hdr RR_Header Preference uint16 @@ -326,7 +313,6 @@ func (rr *MX) String() string { return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Mx) } -// AFSDB RR. See RFC 1183. type AFSDB struct { Hdr RR_Header Subtype uint16 @@ -337,7 +323,6 @@ func (rr *AFSDB) String() string { return rr.Hdr.String() + strconv.Itoa(int(rr.Subtype)) + " " + sprintName(rr.Hostname) } -// X25 RR. See RFC 1183, Section 3.1. type X25 struct { Hdr RR_Header PSDNAddress string @@ -347,7 +332,6 @@ func (rr *X25) String() string { return rr.Hdr.String() + rr.PSDNAddress } -// RT RR. See RFC 1183, Section 3.3. type RT struct { Hdr RR_Header Preference uint16 @@ -358,7 +342,6 @@ func (rr *RT) String() string { return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Host) } -// NS RR. See RFC 1035. type NS struct { Hdr RR_Header Ns string `dns:"cdomain-name"` @@ -368,7 +351,6 @@ func (rr *NS) String() string { return rr.Hdr.String() + sprintName(rr.Ns) } -// PTR RR. See RFC 1035. type PTR struct { Hdr RR_Header Ptr string `dns:"cdomain-name"` @@ -378,7 +360,6 @@ func (rr *PTR) String() string { return rr.Hdr.String() + sprintName(rr.Ptr) } -// RP RR. See RFC 1138, Section 2.2. type RP struct { Hdr RR_Header Mbox string `dns:"domain-name"` @@ -389,7 +370,6 @@ func (rr *RP) String() string { return rr.Hdr.String() + rr.Mbox + " " + sprintTxt([]string{rr.Txt}) } -// SOA RR. See RFC 1035. type SOA struct { Hdr RR_Header Ns string `dns:"cdomain-name"` @@ -410,7 +390,6 @@ func (rr *SOA) String() string { " " + strconv.FormatInt(int64(rr.Minttl), 10) } -// TXT RR. See RFC 1035. type TXT struct { Hdr RR_Header Txt []string `dns:"txt"` @@ -500,6 +479,12 @@ func appendDomainNameByte(s []byte, b byte) []byte { func appendTXTStringByte(s []byte, b byte) []byte { switch b { + case '\t': + return append(s, '\\', 't') + case '\r': + return append(s, '\\', 'r') + case '\n': + return append(s, '\\', 'n') case '"', '\\': return append(s, '\\', b) } @@ -539,11 +524,19 @@ func nextByte(b []byte, offset int) (byte, int) { return dddToByte(b[offset+1:]), 4 } } - // not \ddd, just an RFC 1035 "quoted" character - return b[offset+1], 2 + // not \ddd, maybe a control char + switch b[offset+1] { + case 't': + return '\t', 2 + case 'r': + return '\r', 2 + case 'n': + return '\n', 2 + default: + return b[offset+1], 2 + } } -// SPF RR. See RFC 4408, Section 3.1.1. type SPF struct { Hdr RR_Header Txt []string `dns:"txt"` @@ -551,15 +544,6 @@ type SPF struct { func (rr *SPF) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) } -// AVC RR. See https://www.iana.org/assignments/dns-parameters/AVC/avc-completed-template. -type AVC struct { - Hdr RR_Header - Txt []string `dns:"txt"` -} - -func (rr *AVC) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) } - -// SRV RR. See RFC 2782. type SRV struct { Hdr RR_Header Priority uint16 @@ -575,7 +559,6 @@ func (rr *SRV) String() string { strconv.Itoa(int(rr.Port)) + " " + sprintName(rr.Target) } -// NAPTR RR. See RFC 2915. type NAPTR struct { Hdr RR_Header Order uint16 @@ -596,7 +579,7 @@ func (rr *NAPTR) String() string { rr.Replacement } -// CERT RR. See RFC 4398. +// The CERT resource record, see RFC 4398. type CERT struct { Hdr RR_Header Type uint16 @@ -622,7 +605,7 @@ func (rr *CERT) String() string { " " + rr.Certificate } -// DNAME RR. See RFC 2672. +// The DNAME resource record, see RFC 2672. type DNAME struct { Hdr RR_Header Target string `dns:"domain-name"` @@ -632,7 +615,6 @@ func (rr *DNAME) String() string { return rr.Hdr.String() + sprintName(rr.Target) } -// A RR. See RFC 1035. type A struct { Hdr RR_Header A net.IP `dns:"a"` @@ -645,7 +627,6 @@ func (rr *A) String() string { return rr.Hdr.String() + rr.A.String() } -// AAAA RR. See RFC 3596. type AAAA struct { Hdr RR_Header AAAA net.IP `dns:"aaaa"` @@ -658,7 +639,6 @@ func (rr *AAAA) String() string { return rr.Hdr.String() + rr.AAAA.String() } -// PX RR. See RFC 2163. type PX struct { Hdr RR_Header Preference uint16 @@ -670,7 +650,6 @@ func (rr *PX) String() string { return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Map822) + " " + sprintName(rr.Mapx400) } -// GPOS RR. See RFC 1712. type GPOS struct { Hdr RR_Header Longitude string @@ -682,7 +661,6 @@ func (rr *GPOS) String() string { return rr.Hdr.String() + rr.Longitude + " " + rr.Latitude + " " + rr.Altitude } -// LOC RR. See RFC RFC 1876. type LOC struct { Hdr RR_Header Version uint8 @@ -759,12 +737,11 @@ func (rr *LOC) String() string { return s } -// SIG RR. See RFC 2535. The SIG RR is identical to RRSIG and nowadays only used for SIG(0), See RFC 2931. +// SIG is identical to RRSIG and nowadays only used for SIG(0), RFC2931. type SIG struct { RRSIG } -// RRSIG RR. See RFC 4034 and RFC 3755. type RRSIG struct { Hdr RR_Header TypeCovered uint16 @@ -792,7 +769,6 @@ func (rr *RRSIG) String() string { return s } -// NSEC RR. See RFC 4034 and RFC 3755. type NSEC struct { Hdr RR_Header NextDomain string `dns:"domain-name"` @@ -820,13 +796,14 @@ func (rr *NSEC) len() int { return l } -// DLV RR. See RFC 4431. -type DLV struct{ DS } +type DLV struct { + DS +} -// CDS RR. See RFC 7344. -type CDS struct{ DS } +type CDS struct { + DS +} -// DS RR. See RFC 4034 and RFC 3658. type DS struct { Hdr RR_Header KeyTag uint16 @@ -842,7 +819,6 @@ func (rr *DS) String() string { " " + strings.ToUpper(rr.Digest) } -// KX RR. See RFC 2230. type KX struct { Hdr RR_Header Preference uint16 @@ -854,7 +830,6 @@ func (rr *KX) String() string { " " + sprintName(rr.Exchanger) } -// TA RR. See http://www.watson.org/~weiler/INI1999-19.pdf. type TA struct { Hdr RR_Header KeyTag uint16 @@ -870,7 +845,6 @@ func (rr *TA) String() string { " " + strings.ToUpper(rr.Digest) } -// TALINK RR. See https://www.iana.org/assignments/dns-parameters/TALINK/talink-completed-template. type TALINK struct { Hdr RR_Header PreviousName string `dns:"domain-name"` @@ -882,7 +856,6 @@ func (rr *TALINK) String() string { sprintName(rr.PreviousName) + " " + sprintName(rr.NextName) } -// SSHFP RR. See RFC RFC 4255. type SSHFP struct { Hdr RR_Header Algorithm uint8 @@ -896,17 +869,14 @@ func (rr *SSHFP) String() string { " " + strings.ToUpper(rr.FingerPrint) } -// KEY RR. See RFC RFC 2535. type KEY struct { DNSKEY } -// CDNSKEY RR. See RFC 7344. type CDNSKEY struct { DNSKEY } -// DNSKEY RR. See RFC 4034 and RFC 3755. type DNSKEY struct { Hdr RR_Header Flags uint16 @@ -922,7 +892,6 @@ func (rr *DNSKEY) String() string { " " + rr.PublicKey } -// RKEY RR. See https://www.iana.org/assignments/dns-parameters/RKEY/rkey-completed-template. type RKEY struct { Hdr RR_Header Flags uint16 @@ -938,7 +907,6 @@ func (rr *RKEY) String() string { " " + rr.PublicKey } -// NSAPPTR RR. See RFC 1348. type NSAPPTR struct { Hdr RR_Header Ptr string `dns:"domain-name"` @@ -946,7 +914,6 @@ type NSAPPTR struct { func (rr *NSAPPTR) String() string { return rr.Hdr.String() + sprintName(rr.Ptr) } -// NSEC3 RR. See RFC 5155. type NSEC3 struct { Hdr RR_Header Hash uint8 @@ -985,14 +952,13 @@ func (rr *NSEC3) len() int { return l } -// NSEC3PARAM RR. See RFC 5155. type NSEC3PARAM struct { Hdr RR_Header Hash uint8 Flags uint8 Iterations uint16 SaltLength uint8 - Salt string `dns:"size-hex:SaltLength"` + Salt string `dns:"hex"` } func (rr *NSEC3PARAM) String() string { @@ -1004,7 +970,6 @@ func (rr *NSEC3PARAM) String() string { return s } -// TKEY RR. See RFC 2930. type TKEY struct { Hdr RR_Header Algorithm string `dns:"domain-name"` @@ -1013,21 +978,17 @@ type TKEY struct { Mode uint16 Error uint16 KeySize uint16 - Key string `dns:"size-hex:KeySize"` + Key string OtherLen uint16 - OtherData string `dns:"size-hex:OtherLen"` + OtherData string } -// TKEY has no official presentation format, but this will suffice. func (rr *TKEY) String() string { - s := "\n;; TKEY PSEUDOSECTION:\n" - s += rr.Hdr.String() + " " + rr.Algorithm + " " + - strconv.Itoa(int(rr.KeySize)) + " " + rr.Key + " " + - strconv.Itoa(int(rr.OtherLen)) + " " + rr.OtherData - return s + // It has no presentation format + return "" } -// RFC3597 represents an unknown/generic RR. See RFC 3597. +// RFC3597 represents an unknown/generic RR. type RFC3597 struct { Hdr RR_Header Rdata string `dns:"hex"` @@ -1051,7 +1012,6 @@ func rfc3597Header(h RR_Header) string { return s } -// URI RR. See RFC 7553. type URI struct { Hdr RR_Header Priority uint16 @@ -1064,7 +1024,6 @@ func (rr *URI) String() string { " " + strconv.Itoa(int(rr.Weight)) + " " + sprintTxtOctet(rr.Target) } -// DHCID RR. See RFC 4701. type DHCID struct { Hdr RR_Header Digest string `dns:"base64"` @@ -1072,7 +1031,6 @@ type DHCID struct { func (rr *DHCID) String() string { return rr.Hdr.String() + rr.Digest } -// TLSA RR. See RFC 6698. type TLSA struct { Hdr RR_Header Usage uint8 @@ -1089,30 +1047,6 @@ func (rr *TLSA) String() string { " " + rr.Certificate } -// SMIMEA RR. See RFC 8162. -type SMIMEA struct { - Hdr RR_Header - Usage uint8 - Selector uint8 - MatchingType uint8 - Certificate string `dns:"hex"` -} - -func (rr *SMIMEA) String() string { - s := rr.Hdr.String() + - strconv.Itoa(int(rr.Usage)) + - " " + strconv.Itoa(int(rr.Selector)) + - " " + strconv.Itoa(int(rr.MatchingType)) - - // Every Nth char needs a space on this output. If we output - // this as one giant line, we can't read it can in because in some cases - // the cert length overflows scan.maxTok (2048). - sx := splitN(rr.Certificate, 1024) // conservative value here - s += " " + strings.Join(sx, " ") - return s -} - -// HIP RR. See RFC 8005. type HIP struct { Hdr RR_Header HitLength uint8 @@ -1134,7 +1068,6 @@ func (rr *HIP) String() string { return s } -// NINFO RR. See https://www.iana.org/assignments/dns-parameters/NINFO/ninfo-completed-template. type NINFO struct { Hdr RR_Header ZSData []string `dns:"txt"` @@ -1142,7 +1075,6 @@ type NINFO struct { func (rr *NINFO) String() string { return rr.Hdr.String() + sprintTxt(rr.ZSData) } -// NID RR. See RFC RFC 6742. type NID struct { Hdr RR_Header Preference uint16 @@ -1156,7 +1088,6 @@ func (rr *NID) String() string { return s } -// L32 RR, See RFC 6742. type L32 struct { Hdr RR_Header Preference uint16 @@ -1171,7 +1102,6 @@ func (rr *L32) String() string { " " + rr.Locator32.String() } -// L64 RR, See RFC 6742. type L64 struct { Hdr RR_Header Preference uint16 @@ -1185,7 +1115,6 @@ func (rr *L64) String() string { return s } -// LP RR. See RFC 6742. type LP struct { Hdr RR_Header Preference uint16 @@ -1196,7 +1125,6 @@ func (rr *LP) String() string { return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Fqdn) } -// EUI48 RR. See RFC 7043. type EUI48 struct { Hdr RR_Header Address uint64 `dns:"uint48"` @@ -1204,7 +1132,6 @@ type EUI48 struct { func (rr *EUI48) String() string { return rr.Hdr.String() + euiToString(rr.Address, 48) } -// EUI64 RR. See RFC 7043. type EUI64 struct { Hdr RR_Header Address uint64 @@ -1212,7 +1139,6 @@ type EUI64 struct { func (rr *EUI64) String() string { return rr.Hdr.String() + euiToString(rr.Address, 64) } -// CAA RR. See RFC 6844. type CAA struct { Hdr RR_Header Flag uint8 @@ -1224,7 +1150,6 @@ func (rr *CAA) String() string { return rr.Hdr.String() + strconv.Itoa(int(rr.Flag)) + " " + rr.Tag + " " + sprintTxtOctet(rr.Value) } -// UID RR. Deprecated, IANA-Reserved. type UID struct { Hdr RR_Header Uid uint32 @@ -1232,7 +1157,6 @@ type UID struct { func (rr *UID) String() string { return rr.Hdr.String() + strconv.FormatInt(int64(rr.Uid), 10) } -// GID RR. Deprecated, IANA-Reserved. type GID struct { Hdr RR_Header Gid uint32 @@ -1240,7 +1164,6 @@ type GID struct { func (rr *GID) String() string { return rr.Hdr.String() + strconv.FormatInt(int64(rr.Gid), 10) } -// UINFO RR. Deprecated, IANA-Reserved. type UINFO struct { Hdr RR_Header Uinfo string @@ -1248,7 +1171,6 @@ type UINFO struct { func (rr *UINFO) String() string { return rr.Hdr.String() + sprintTxt([]string{rr.Uinfo}) } -// EID RR. See http://ana-3.lcs.mit.edu/~jnc/nimrod/dns.txt. type EID struct { Hdr RR_Header Endpoint string `dns:"hex"` @@ -1256,7 +1178,6 @@ type EID struct { func (rr *EID) String() string { return rr.Hdr.String() + strings.ToUpper(rr.Endpoint) } -// NIMLOC RR. See http://ana-3.lcs.mit.edu/~jnc/nimrod/dns.txt. type NIMLOC struct { Hdr RR_Header Locator string `dns:"hex"` @@ -1264,7 +1185,6 @@ type NIMLOC struct { func (rr *NIMLOC) String() string { return rr.Hdr.String() + strings.ToUpper(rr.Locator) } -// OPENPGPKEY RR. See RFC 7929. type OPENPGPKEY struct { Hdr RR_Header PublicKey string `dns:"base64"` @@ -1272,36 +1192,6 @@ type OPENPGPKEY struct { func (rr *OPENPGPKEY) String() string { return rr.Hdr.String() + rr.PublicKey } -// CSYNC RR. See RFC 7477. -type CSYNC struct { - Hdr RR_Header - Serial uint32 - Flags uint16 - TypeBitMap []uint16 `dns:"nsec"` -} - -func (rr *CSYNC) String() string { - s := rr.Hdr.String() + strconv.FormatInt(int64(rr.Serial), 10) + " " + strconv.Itoa(int(rr.Flags)) - - for i := 0; i < len(rr.TypeBitMap); i++ { - s += " " + Type(rr.TypeBitMap[i]).String() - } - return s -} - -func (rr *CSYNC) len() int { - l := rr.Hdr.len() + 4 + 2 - lastwindow := uint32(2 ^ 32 + 1) - for _, t := range rr.TypeBitMap { - window := t / 256 - if uint32(window) != lastwindow { - l += 1 + 32 - } - lastwindow = uint32(window) - } - return l -} - // TimeToString translates the RRSIG's incep. and expir. times to the // string representation used when printing the record. // It takes serial arithmetic (RFC 1982) into account. @@ -1329,7 +1219,8 @@ func StringToTime(s string) (uint32, error) { return uint32(t.Unix() - (mod * year68)), nil } -// saltToString converts a NSECX salt to uppercase and returns "-" when it is empty. +// saltToString converts a NSECX salt to uppercase and +// returns "-" when it is empty func saltToString(s string) string { if len(s) == 0 { return "-" @@ -1357,25 +1248,3 @@ func copyIP(ip net.IP) net.IP { copy(p, ip) return p } - -// SplitN splits a string into N sized string chunks. -// This might become an exported function once. -func splitN(s string, n int) []string { - if len(s) < n { - return []string{s} - } - sx := []string{} - p, i := 0, n - for { - if i <= len(s) { - sx = append(sx, s[p:i]) - } else { - sx = append(sx, s[p:]) - break - - } - p, i = p+n, i+n - } - - return sx -} diff --git a/vendor/github.com/miekg/dns/types_generate.go b/vendor/github.com/miekg/dns/types_generate.go index b8db4f361cdd..bf80da329c31 100644 --- a/vendor/github.com/miekg/dns/types_generate.go +++ b/vendor/github.com/miekg/dns/types_generate.go @@ -23,11 +23,11 @@ var skipLen = map[string]struct{}{ "NSEC": {}, "NSEC3": {}, "OPT": {}, - "CSYNC": {}, } var packageHdr = ` -// Code generated by "go run types_generate.go"; DO NOT EDIT. +// *** DO NOT MODIFY *** +// AUTOGENERATED BY go generate from type_generate.go package dns @@ -56,6 +56,7 @@ var TypeToString = map[uint16]string{ `)) var headerFunc = template.Must(template.New("headerFunc").Parse(` +// Header() functions {{range .}} func (rr *{{.}}) Header() *RR_Header { return &rr.Hdr } {{end}} @@ -181,8 +182,6 @@ func main() { fallthrough case st.Tag(i) == `dns:"base64"`: o("l += base64.StdEncoding.DecodedLen(len(rr.%s))\n") - case strings.HasPrefix(st.Tag(i), `dns:"size-hex:`): // this has an extra field where the length is stored - o("l += len(rr.%s)/2\n") case strings.HasPrefix(st.Tag(i), `dns:"size-hex`): fallthrough case st.Tag(i) == `dns:"hex"`: @@ -198,7 +197,7 @@ func main() { case st.Tag(i) == "": switch st.Field(i).Type().(*types.Basic).Kind() { case types.Uint8: - o("l++ // %s\n") + o("l += 1 // %s\n") case types.Uint16: o("l += 2 // %s\n") case types.Uint32: @@ -226,7 +225,7 @@ func main() { continue } fmt.Fprintf(b, "func (rr *%s) copy() RR {\n", name) - fields := []string{"rr.Hdr"} + fields := []string{"*rr.Hdr.copyHeader()"} for i := 1; i < st.NumFields(); i++ { f := st.Field(i).Name() if sl, ok := st.Field(i).Type().(*types.Slice); ok { diff --git a/vendor/github.com/miekg/dns/types_test.go b/vendor/github.com/miekg/dns/types_test.go index 3dbddee1e88d..118612946bce 100644 --- a/vendor/github.com/miekg/dns/types_test.go +++ b/vendor/github.com/miekg/dns/types_test.go @@ -40,35 +40,3 @@ func TestCmToM(t *testing.T) { t.Error("9, 9") } } - -func TestSplitN(t *testing.T) { - xs := splitN("abc", 5) - if len(xs) != 1 && xs[0] != "abc" { - t.Errorf("failure to split abc") - } - - s := "" - for i := 0; i < 255; i++ { - s += "a" - } - - xs = splitN(s, 255) - if len(xs) != 1 && xs[0] != s { - t.Errorf("failure to split 255 char long string") - } - - s += "b" - xs = splitN(s, 255) - if len(xs) != 2 || xs[1] != "b" { - t.Errorf("failure to split 256 char long string: %d", len(xs)) - } - - // Make s longer - for i := 0; i < 255; i++ { - s += "a" - } - xs = splitN(s, 255) - if len(xs) != 3 || xs[2] != "a" { - t.Errorf("failure to split 510 char long string: %d", len(xs)) - } -} diff --git a/vendor/github.com/miekg/dns/udp.go b/vendor/github.com/miekg/dns/udp.go index a4826ee2ffd6..c79c6c88371b 100644 --- a/vendor/github.com/miekg/dns/udp.go +++ b/vendor/github.com/miekg/dns/udp.go @@ -1,30 +1,12 @@ -// +build !windows +// +build !windows,!plan9 package dns import ( "net" - - "golang.org/x/net/ipv4" - "golang.org/x/net/ipv6" + "syscall" ) -// This is the required size of the OOB buffer to pass to ReadMsgUDP. -var udpOOBSize = func() int { - // We can't know whether we'll get an IPv4 control message or an - // IPv6 control message ahead of time. To get around this, we size - // the buffer equal to the largest of the two. - - oob4 := ipv4.NewControlMessage(ipv4.FlagDst | ipv4.FlagInterface) - oob6 := ipv6.NewControlMessage(ipv6.FlagDst | ipv6.FlagInterface) - - if len(oob4) > len(oob6) { - return len(oob4) - } - - return len(oob6) -}() - // SessionUDP holds the remote address and the associated // out-of-band data. type SessionUDP struct { @@ -35,10 +17,33 @@ type SessionUDP struct { // RemoteAddr returns the remote network address. func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr } +// setUDPSocketOptions sets the UDP socket options. +// This function is implemented on a per platform basis. See udp_*.go for more details +func setUDPSocketOptions(conn *net.UDPConn) error { + sa, err := getUDPSocketName(conn) + if err != nil { + return err + } + switch sa.(type) { + case *syscall.SockaddrInet6: + v6only, err := getUDPSocketOptions6Only(conn) + if err != nil { + return err + } + setUDPSocketOptions6(conn) + if !v6only { + setUDPSocketOptions4(conn) + } + case *syscall.SockaddrInet4: + setUDPSocketOptions4(conn) + } + return nil +} + // ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a // net.UDPAddr. func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) { - oob := make([]byte, udpOOBSize) + oob := make([]byte, 40) n, oobn, _, raddr, err := conn.ReadMsgUDP(b, oob) if err != nil { return n, nil, err @@ -46,57 +51,8 @@ func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) { return n, &SessionUDP{raddr, oob[:oobn]}, err } -// WriteToSessionUDP acts just like net.UDPConn.WriteTo(), but uses a *SessionUDP instead of a net.Addr. +// WriteToSessionUDP acts just like net.UDPConn.WritetTo(), but uses a *SessionUDP instead of a net.Addr. func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) { - oob := correctSource(session.context) - n, _, err := conn.WriteMsgUDP(b, oob, session.raddr) + n, _, err := conn.WriteMsgUDP(b, session.context, session.raddr) return n, err } - -func setUDPSocketOptions(conn *net.UDPConn) error { - // Try setting the flags for both families and ignore the errors unless they - // both error. - err6 := ipv6.NewPacketConn(conn).SetControlMessage(ipv6.FlagDst|ipv6.FlagInterface, true) - err4 := ipv4.NewPacketConn(conn).SetControlMessage(ipv4.FlagDst|ipv4.FlagInterface, true) - if err6 != nil && err4 != nil { - return err4 - } - return nil -} - -// parseDstFromOOB takes oob data and returns the destination IP. -func parseDstFromOOB(oob []byte) net.IP { - // Start with IPv6 and then fallback to IPv4 - // TODO(fastest963): Figure out a way to prefer one or the other. Looking at - // the lvl of the header for a 0 or 41 isn't cross-platform. - cm6 := new(ipv6.ControlMessage) - if cm6.Parse(oob) == nil && cm6.Dst != nil { - return cm6.Dst - } - cm4 := new(ipv4.ControlMessage) - if cm4.Parse(oob) == nil && cm4.Dst != nil { - return cm4.Dst - } - return nil -} - -// correctSource takes oob data and returns new oob data with the Src equal to the Dst -func correctSource(oob []byte) []byte { - dst := parseDstFromOOB(oob) - if dst == nil { - return nil - } - // If the dst is definitely an IPv6, then use ipv6's ControlMessage to - // respond otherwise use ipv4's because ipv6's marshal ignores ipv4 - // addresses. - if dst.To4() == nil { - cm := new(ipv6.ControlMessage) - cm.Src = dst - oob = cm.Marshal() - } else { - cm := new(ipv4.ControlMessage) - cm.Src = dst - oob = cm.Marshal() - } - return oob -} diff --git a/vendor/github.com/miekg/dns/udp_linux.go b/vendor/github.com/miekg/dns/udp_linux.go new file mode 100644 index 000000000000..c62d21881b63 --- /dev/null +++ b/vendor/github.com/miekg/dns/udp_linux.go @@ -0,0 +1,73 @@ +// +build linux + +package dns + +// See: +// * http://stackoverflow.com/questions/3062205/setting-the-source-ip-for-a-udp-socket and +// * http://blog.powerdns.com/2012/10/08/on-binding-datagram-udp-sockets-to-the-any-addresses/ +// +// Why do we need this: When listening on 0.0.0.0 with UDP so kernel decides what is the outgoing +// interface, this might not always be the correct one. This code will make sure the egress +// packet's interface matched the ingress' one. + +import ( + "net" + "syscall" +) + +// setUDPSocketOptions4 prepares the v4 socket for sessions. +func setUDPSocketOptions4(conn *net.UDPConn) error { + file, err := conn.File() + if err != nil { + return err + } + if err := syscall.SetsockoptInt(int(file.Fd()), syscall.IPPROTO_IP, syscall.IP_PKTINFO, 1); err != nil { + return err + } + // Calling File() above results in the connection becoming blocking, we must fix that. + // See https://github.com/miekg/dns/issues/279 + err = syscall.SetNonblock(int(file.Fd()), true) + if err != nil { + return err + } + return nil +} + +// setUDPSocketOptions6 prepares the v6 socket for sessions. +func setUDPSocketOptions6(conn *net.UDPConn) error { + file, err := conn.File() + if err != nil { + return err + } + if err := syscall.SetsockoptInt(int(file.Fd()), syscall.IPPROTO_IPV6, syscall.IPV6_RECVPKTINFO, 1); err != nil { + return err + } + err = syscall.SetNonblock(int(file.Fd()), true) + if err != nil { + return err + } + return nil +} + +// getUDPSocketOption6Only return true if the socket is v6 only and false when it is v4/v6 combined +// (dualstack). +func getUDPSocketOptions6Only(conn *net.UDPConn) (bool, error) { + file, err := conn.File() + if err != nil { + return false, err + } + // dual stack. See http://stackoverflow.com/questions/1618240/how-to-support-both-ipv4-and-ipv6-connections + v6only, err := syscall.GetsockoptInt(int(file.Fd()), syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY) + if err != nil { + return false, err + } + return v6only == 1, nil +} + +func getUDPSocketName(conn *net.UDPConn) (syscall.Sockaddr, error) { + file, err := conn.File() + if err != nil { + return nil, err + } + return syscall.Getsockname(int(file.Fd())) +} diff --git a/vendor/github.com/miekg/dns/udp_other.go b/vendor/github.com/miekg/dns/udp_other.go new file mode 100644 index 000000000000..d40732441b0f --- /dev/null +++ b/vendor/github.com/miekg/dns/udp_other.go @@ -0,0 +1,17 @@ +// +build !linux,!plan9 + +package dns + +import ( + "net" + "syscall" +) + +// These do nothing. See udp_linux.go for an example of how to implement this. + +// We tried to adhire to some kind of naming scheme. + +func setUDPSocketOptions4(conn *net.UDPConn) error { return nil } +func setUDPSocketOptions6(conn *net.UDPConn) error { return nil } +func getUDPSocketOptions6Only(conn *net.UDPConn) (bool, error) { return false, nil } +func getUDPSocketName(conn *net.UDPConn) (syscall.Sockaddr, error) { return nil, nil } diff --git a/vendor/github.com/miekg/dns/udp_plan9.go b/vendor/github.com/miekg/dns/udp_plan9.go new file mode 100644 index 000000000000..b794deeba0ff --- /dev/null +++ b/vendor/github.com/miekg/dns/udp_plan9.go @@ -0,0 +1,34 @@ +package dns + +import ( + "net" +) + +func setUDPSocketOptions(conn *net.UDPConn) error { return nil } + +// SessionUDP holds the remote address and the associated +// out-of-band data. +type SessionUDP struct { + raddr *net.UDPAddr + context []byte +} + +// RemoteAddr returns the remote network address. +func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr } + +// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a +// net.UDPAddr. +func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) { + oob := make([]byte, 40) + n, oobn, _, raddr, err := conn.ReadMsgUDP(b, oob) + if err != nil { + return n, nil, err + } + return n, &SessionUDP{raddr, oob[:oobn]}, err +} + +// WriteToSessionUDP acts just like net.UDPConn.WritetTo(), but uses a *SessionUDP instead of a net.Addr. +func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) { + n, _, err := conn.WriteMsgUDP(b, session.context, session.raddr) + return n, err +} diff --git a/vendor/github.com/miekg/dns/udp_test.go b/vendor/github.com/miekg/dns/udp_test.go deleted file mode 100644 index f27d79df7070..000000000000 --- a/vendor/github.com/miekg/dns/udp_test.go +++ /dev/null @@ -1,140 +0,0 @@ -// +build linux,!appengine - -package dns - -import ( - "bytes" - "net" - "runtime" - "strings" - "testing" - "time" - - "golang.org/x/net/ipv4" - "golang.org/x/net/ipv6" -) - -func TestSetUDPSocketOptions(t *testing.T) { - // returns an error if we cannot resolve that address - testFamily := func(n, addr string) error { - a, err := net.ResolveUDPAddr(n, addr) - if err != nil { - return err - } - c, err := net.ListenUDP(n, a) - if err != nil { - return err - } - if err := setUDPSocketOptions(c); err != nil { - t.Fatalf("failed to set socket options: %v", err) - } - ch := make(chan *SessionUDP) - go func() { - // Set some deadline so this goroutine doesn't hang forever - c.SetReadDeadline(time.Now().Add(time.Minute)) - b := make([]byte, 1) - _, sess, err := ReadFromSessionUDP(c, b) - if err != nil { - t.Fatalf("failed to read from conn: %v", err) - } - ch <- sess - }() - - c2, err := net.Dial("udp", c.LocalAddr().String()) - if err != nil { - t.Fatalf("failed to dial udp: %v", err) - } - if _, err := c2.Write([]byte{1}); err != nil { - t.Fatalf("failed to write to conn: %v", err) - } - sess := <-ch - if len(sess.context) == 0 { - t.Fatalf("empty session context: %v", sess) - } - ip := parseDstFromOOB(sess.context) - if ip == nil { - t.Fatalf("failed to parse dst: %v", sess) - } - if !strings.Contains(c.LocalAddr().String(), ip.String()) { - t.Fatalf("dst was different than listen addr: %v != %v", ip.String(), c.LocalAddr().String()) - } - return nil - } - - // we require that ipv4 be supported - if err := testFamily("udp4", "127.0.0.1:0"); err != nil { - t.Fatalf("failed to test socket options on IPv4: %v", err) - } - // IPv6 might not be supported so these will just log - if err := testFamily("udp6", "[::1]:0"); err != nil { - t.Logf("failed to test socket options on IPv6-only: %v", err) - } - if err := testFamily("udp", "[::1]:0"); err != nil { - t.Logf("failed to test socket options on IPv6/IPv4: %v", err) - } -} - -func TestParseDstFromOOB(t *testing.T) { - if runtime.GOARCH != "amd64" { - // The cmsghdr struct differs in the width (32/64-bit) of - // lengths and the struct padding between architectures. - // The data below was only written with amd64 in mind, and - // thus the test must be skipped on other architectures. - t.Skip("skipping test on unsupported architecture") - } - - // dst is :ffff:100.100.100.100 - oob := []byte{36, 0, 0, 0, 0, 0, 0, 0, 41, 0, 0, 0, 50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 100, 100, 100, 100, 2, 0, 0, 0} - dst := parseDstFromOOB(oob) - dst4 := dst.To4() - if dst4 == nil { - t.Errorf("failed to parse IPv4 in IPv6: %v", dst) - } else if dst4.String() != "100.100.100.100" { - t.Errorf("unexpected IPv4: %v", dst4) - } - - // dst is 2001:db8::1 - oob = []byte{36, 0, 0, 0, 0, 0, 0, 0, 41, 0, 0, 0, 50, 0, 0, 0, 32, 1, 13, 184, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0} - dst = parseDstFromOOB(oob) - dst6 := dst.To16() - if dst6 == nil { - t.Errorf("failed to parse IPv6: %v", dst) - } else if dst6.String() != "2001:db8::1" { - t.Errorf("unexpected IPv6: %v", dst4) - } - - // dst is 100.100.100.100 but was received on 10.10.10.10 - oob = []byte{28, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 2, 0, 0, 0, 10, 10, 10, 10, 100, 100, 100, 100, 0, 0, 0, 0} - dst = parseDstFromOOB(oob) - dst4 = dst.To4() - if dst4 == nil { - t.Errorf("failed to parse IPv4: %v", dst) - } else if dst4.String() != "100.100.100.100" { - t.Errorf("unexpected IPv4: %v", dst4) - } -} - -func TestCorrectSource(t *testing.T) { - if runtime.GOARCH != "amd64" { - // See comment above in TestParseDstFromOOB. - t.Skip("skipping test on unsupported architecture") - } - - // dst is :ffff:100.100.100.100 which should be counted as IPv4 - oob := []byte{36, 0, 0, 0, 0, 0, 0, 0, 41, 0, 0, 0, 50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 100, 100, 100, 100, 2, 0, 0, 0} - soob := correctSource(oob) - cm4 := new(ipv4.ControlMessage) - cm4.Src = net.ParseIP("100.100.100.100") - if !bytes.Equal(soob, cm4.Marshal()) { - t.Errorf("unexpected oob for ipv4 address: %v", soob) - } - - // dst is 2001:db8::1 - oob = []byte{36, 0, 0, 0, 0, 0, 0, 0, 41, 0, 0, 0, 50, 0, 0, 0, 32, 1, 13, 184, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0} - soob = correctSource(oob) - cm6 := new(ipv6.ControlMessage) - cm6.Src = net.ParseIP("2001:db8::1") - if !bytes.Equal(soob, cm6.Marshal()) { - t.Errorf("unexpected oob for IPv6 address: %v", soob) - } -} diff --git a/vendor/github.com/miekg/dns/udp_windows.go b/vendor/github.com/miekg/dns/udp_windows.go index 6778c3c6cfe9..2ce4b3300287 100644 --- a/vendor/github.com/miekg/dns/udp_windows.go +++ b/vendor/github.com/miekg/dns/udp_windows.go @@ -4,17 +4,12 @@ package dns import "net" -// SessionUDP holds the remote address type SessionUDP struct { raddr *net.UDPAddr } -// RemoteAddr returns the remote network address. -func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr } - // ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a // net.UDPAddr. -// TODO(fastest963): Once go1.10 is released, use ReadMsgUDP. func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) { n, raddr, err := conn.ReadFrom(b) if err != nil { @@ -24,14 +19,16 @@ func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) { return n, session, err } -// WriteToSessionUDP acts just like net.UDPConn.WriteTo(), but uses a *SessionUDP instead of a net.Addr. -// TODO(fastest963): Once go1.10 is released, use WriteMsgUDP. +// WriteToSessionUDP acts just like net.UDPConn.WritetTo(), but uses a *SessionUDP instead of a net.Addr. func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) { n, err := conn.WriteTo(b, session.raddr) return n, err } -// TODO(fastest963): Once go1.10 is released and we can use *MsgUDP methods -// use the standard method in udp.go for these. -func setUDPSocketOptions(*net.UDPConn) error { return nil } -func parseDstFromOOB([]byte, net.IP) net.IP { return nil } +func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr } + +// setUDPSocketOptions sets the UDP socket options. +// This function is implemented on a per platform basis. See udp_*.go for more details +func setUDPSocketOptions(conn *net.UDPConn) error { + return nil +} diff --git a/vendor/github.com/miekg/dns/update_test.go b/vendor/github.com/miekg/dns/update_test.go index 5dba413a477a..56602dfe9f44 100644 --- a/vendor/github.com/miekg/dns/update_test.go +++ b/vendor/github.com/miekg/dns/update_test.go @@ -13,8 +13,11 @@ func TestDynamicUpdateParsing(t *testing.T) { typ == "Reserved" || typ == "None" || typ == "NXT" || typ == "MAILB" || typ == "MAILA" { continue } - if _, err := NewRR(prefix + typ); err != nil { + r, err := NewRR(prefix + typ) + if err != nil { t.Errorf("failure to parse: %s %s: %v", prefix, typ, err) + } else { + t.Logf("parsed: %s", r.String()) } } } @@ -53,7 +56,10 @@ func TestDynamicUpdateZeroRdataUnpack(t *testing.T) { func TestRemoveRRset(t *testing.T) { // Should add a zero data RR in Class ANY with a TTL of 0 // for each set mentioned in the RRs provided to it. - rr := testRR(". 100 IN A 127.0.0.1") + rr, err := NewRR(". 100 IN A 127.0.0.1") + if err != nil { + t.Fatalf("error constructing RR: %v", err) + } m := new(Msg) m.Ns = []RR{&RR_Header{Name: ".", Rrtype: TypeA, Class: ClassANY, Ttl: 0, Rdlength: 0}} expectstr := m.String() @@ -86,28 +92,28 @@ func TestPreReqAndRemovals(t *testing.T) { m.Id = 1234 // Use a full set of RRs each time, so we are sure the rdata is stripped. - rrName1 := testRR("name_used. 3600 IN A 127.0.0.1") - rrName2 := testRR("name_not_used. 3600 IN A 127.0.0.1") - rrRemove1 := testRR("remove1. 3600 IN A 127.0.0.1") - rrRemove2 := testRR("remove2. 3600 IN A 127.0.0.1") - rrRemove3 := testRR("remove3. 3600 IN A 127.0.0.1") - rrInsert := testRR("insert. 3600 IN A 127.0.0.1") - rrRrset1 := testRR("rrset_used1. 3600 IN A 127.0.0.1") - rrRrset2 := testRR("rrset_used2. 3600 IN A 127.0.0.1") - rrRrset3 := testRR("rrset_not_used. 3600 IN A 127.0.0.1") + rr_name1, _ := NewRR("name_used. 3600 IN A 127.0.0.1") + rr_name2, _ := NewRR("name_not_used. 3600 IN A 127.0.0.1") + rr_remove1, _ := NewRR("remove1. 3600 IN A 127.0.0.1") + rr_remove2, _ := NewRR("remove2. 3600 IN A 127.0.0.1") + rr_remove3, _ := NewRR("remove3. 3600 IN A 127.0.0.1") + rr_insert, _ := NewRR("insert. 3600 IN A 127.0.0.1") + rr_rrset1, _ := NewRR("rrset_used1. 3600 IN A 127.0.0.1") + rr_rrset2, _ := NewRR("rrset_used2. 3600 IN A 127.0.0.1") + rr_rrset3, _ := NewRR("rrset_not_used. 3600 IN A 127.0.0.1") // Handle the prereqs. - m.NameUsed([]RR{rrName1}) - m.NameNotUsed([]RR{rrName2}) - m.RRsetUsed([]RR{rrRrset1}) - m.Used([]RR{rrRrset2}) - m.RRsetNotUsed([]RR{rrRrset3}) + m.NameUsed([]RR{rr_name1}) + m.NameNotUsed([]RR{rr_name2}) + m.RRsetUsed([]RR{rr_rrset1}) + m.Used([]RR{rr_rrset2}) + m.RRsetNotUsed([]RR{rr_rrset3}) // and now the updates. - m.RemoveName([]RR{rrRemove1}) - m.RemoveRRset([]RR{rrRemove2}) - m.Remove([]RR{rrRemove3}) - m.Insert([]RR{rrInsert}) + m.RemoveName([]RR{rr_remove1}) + m.RemoveRRset([]RR{rr_remove2}) + m.Remove([]RR{rr_remove3}) + m.Insert([]RR{rr_insert}) // This test function isn't a Example function because we print these RR with tabs at the // end and the Example function trim these, thus they never match. @@ -119,15 +125,15 @@ func TestPreReqAndRemovals(t *testing.T) { ;example.org. IN SOA ;; ANSWER SECTION: -name_used. 0 CLASS255 ANY +name_used. 0 ANY ANY name_not_used. 0 NONE ANY -rrset_used1. 0 CLASS255 A +rrset_used1. 0 ANY A rrset_used2. 3600 IN A 127.0.0.1 rrset_not_used. 0 NONE A ;; AUTHORITY SECTION: -remove1. 0 CLASS255 ANY -remove2. 0 CLASS255 A +remove1. 0 ANY ANY +remove2. 0 ANY A remove3. 0 NONE A 127.0.0.1 insert. 3600 IN A 127.0.0.1 ` diff --git a/vendor/github.com/miekg/dns/version.go b/vendor/github.com/miekg/dns/version.go deleted file mode 100644 index dcc84e4a7dcc..000000000000 --- a/vendor/github.com/miekg/dns/version.go +++ /dev/null @@ -1,15 +0,0 @@ -package dns - -import "fmt" - -// Version is current version of this library. -var Version = V{1, 0, 8} - -// V holds the version of this library. -type V struct { - Major, Minor, Patch int -} - -func (v V) String() string { - return fmt.Sprintf("%d.%d.%d", v.Major, v.Minor, v.Patch) -} diff --git a/vendor/github.com/miekg/dns/version_test.go b/vendor/github.com/miekg/dns/version_test.go deleted file mode 100644 index 61c4048484be..000000000000 --- a/vendor/github.com/miekg/dns/version_test.go +++ /dev/null @@ -1,10 +0,0 @@ -package dns - -import "testing" - -func TestVersion(t *testing.T) { - v := V{1, 0, 0} - if x := v.String(); x != "1.0.0" { - t.Fatalf("Failed to convert version %v, got: %s", v, x) - } -} diff --git a/vendor/github.com/miekg/dns/xfr.go b/vendor/github.com/miekg/dns/xfr.go index 5d0ff5c8a275..7346deffbb73 100644 --- a/vendor/github.com/miekg/dns/xfr.go +++ b/vendor/github.com/miekg/dns/xfr.go @@ -1,7 +1,6 @@ package dns import ( - "fmt" "time" ) @@ -17,7 +16,7 @@ type Transfer struct { DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - TsigSecret map[string]string // Secret(s) for Tsig map[], zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2) + TsigSecret map[string]string // Secret(s) for Tsig map[], zonename must be fully qualified tsigTimersOnly bool } @@ -51,18 +50,18 @@ func (t *Transfer) In(q *Msg, a string) (env chan *Envelope, err error) { env = make(chan *Envelope) go func() { if q.Question[0].Qtype == TypeAXFR { - go t.inAxfr(q, env) + go t.inAxfr(q.Id, env) return } if q.Question[0].Qtype == TypeIXFR { - go t.inIxfr(q, env) + go t.inIxfr(q.Id, env) return } }() return env, nil } -func (t *Transfer) inAxfr(q *Msg, c chan *Envelope) { +func (t *Transfer) inAxfr(id uint16, c chan *Envelope) { first := true defer t.Close() defer close(c) @@ -77,15 +76,11 @@ func (t *Transfer) inAxfr(q *Msg, c chan *Envelope) { c <- &Envelope{nil, err} return } - if q.Id != in.Id { + if id != in.Id { c <- &Envelope{in.Answer, ErrId} return } if first { - if in.Rcode != RcodeSuccess { - c <- &Envelope{in.Answer, &Error{err: fmt.Sprintf(errXFR, in.Rcode)}} - return - } if !isSOAFirst(in) { c <- &Envelope{in.Answer, ErrSoa} return @@ -110,11 +105,9 @@ func (t *Transfer) inAxfr(q *Msg, c chan *Envelope) { } } -func (t *Transfer) inIxfr(q *Msg, c chan *Envelope) { +func (t *Transfer) inIxfr(id uint16, c chan *Envelope) { serial := uint32(0) // The first serial seen is the current server serial - axfr := true - n := 0 - qser := q.Ns[0].(*SOA).Serial + first := true defer t.Close() defer close(c) timeout := dnsTimeout @@ -128,15 +121,17 @@ func (t *Transfer) inIxfr(q *Msg, c chan *Envelope) { c <- &Envelope{nil, err} return } - if q.Id != in.Id { + if id != in.Id { c <- &Envelope{in.Answer, ErrId} return } - if in.Rcode != RcodeSuccess { - c <- &Envelope{in.Answer, &Error{err: fmt.Sprintf(errXFR, in.Rcode)}} - return - } - if n == 0 { + if first { + // A single SOA RR signals "no changes" + if len(in.Answer) == 1 && isSOAFirst(in) { + c <- &Envelope{in.Answer, nil} + return + } + // Check if the returned answer is ok if !isSOAFirst(in) { c <- &Envelope{in.Answer, ErrSoa} @@ -144,30 +139,21 @@ func (t *Transfer) inIxfr(q *Msg, c chan *Envelope) { } // This serial is important serial = in.Answer[0].(*SOA).Serial - // Check if there are no changes in zone - if qser >= serial { - c <- &Envelope{in.Answer, nil} - return - } + first = !first } + // Now we need to check each message for SOA records, to see what we need to do - t.tsigTimersOnly = true - for _, rr := range in.Answer { - if v, ok := rr.(*SOA); ok { + if !first { + t.tsigTimersOnly = true + // If the last record in the IXFR contains the servers' SOA, we should quit + if v, ok := in.Answer[len(in.Answer)-1].(*SOA); ok { if v.Serial == serial { - n++ - // quit if it's a full axfr or the the servers' SOA is repeated the third time - if axfr && n == 2 || n == 3 { - c <- &Envelope{in.Answer, nil} - return - } - } else if axfr { - // it's an ixfr - axfr = false + c <- &Envelope{in.Answer, nil} + return } } + c <- &Envelope{in.Answer, nil} } - c <- &Envelope{in.Answer, nil} } } @@ -256,5 +242,3 @@ func isSOALast(in *Msg) bool { } return false } - -const errXFR = "bad xfr rcode: %d" diff --git a/vendor/github.com/miekg/dns/xfr_test.go b/vendor/github.com/miekg/dns/xfr_test.go new file mode 100644 index 000000000000..1337eec65462 --- /dev/null +++ b/vendor/github.com/miekg/dns/xfr_test.go @@ -0,0 +1,161 @@ +// +build net + +package dns + +import ( + "net" + "testing" + "time" +) + +func getIP(s string) string { + a, err := net.LookupAddr(s) + if err != nil { + return "" + } + return a[0] +} + +// flaky, need to setup local server and test from +// that. +func TestAXFR_Miek(t *testing.T) { + // This test runs against a server maintained by Miek + if testing.Short() { + return + } + m := new(Msg) + m.SetAxfr("miek.nl.") + + server := getIP("linode.atoom.net") + + tr := new(Transfer) + + if a, err := tr.In(m, net.JoinHostPort(server, "53")); err != nil { + t.Fatal("failed to setup axfr: ", err) + } else { + for ex := range a { + if ex.Error != nil { + t.Errorf("error %v", ex.Error) + break + } + for _, rr := range ex.RR { + t.Log(rr.String()) + } + } + } +} + +// fails. +func TestAXFR_NLNL_MultipleEnvelopes(t *testing.T) { + // This test runs against a server maintained by NLnet Labs + if testing.Short() { + return + } + m := new(Msg) + m.SetAxfr("nlnetlabs.nl.") + + server := getIP("open.nlnetlabs.nl.") + + tr := new(Transfer) + if a, err := tr.In(m, net.JoinHostPort(server, "53")); err != nil { + t.Fatalf("failed to setup axfr %v for server: %v", err, server) + } else { + for ex := range a { + if ex.Error != nil { + t.Errorf("error %v", ex.Error) + break + } + } + } +} + +func TestAXFR_Miek_Tsig(t *testing.T) { + // This test runs against a server maintained by Miek + if testing.Short() { + return + } + m := new(Msg) + m.SetAxfr("example.nl.") + m.SetTsig("axfr.", HmacMD5, 300, time.Now().Unix()) + + tr := new(Transfer) + tr.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} + + if a, err := tr.In(m, "176.58.119.54:53"); err != nil { + t.Fatal("failed to setup axfr: ", err) + } else { + for ex := range a { + if ex.Error != nil { + t.Errorf("error %v", ex.Error) + break + } + for _, rr := range ex.RR { + t.Log(rr.String()) + } + } + } +} + +func TestAXFR_SIDN_NSD3_NONE(t *testing.T) { testAXFRSIDN(t, "nsd", "") } +func TestAXFR_SIDN_NSD3_MD5(t *testing.T) { testAXFRSIDN(t, "nsd", HmacMD5) } +func TestAXFR_SIDN_NSD3_SHA1(t *testing.T) { testAXFRSIDN(t, "nsd", HmacSHA1) } +func TestAXFR_SIDN_NSD3_SHA256(t *testing.T) { testAXFRSIDN(t, "nsd", HmacSHA256) } + +func TestAXFR_SIDN_NSD4_NONE(t *testing.T) { testAXFRSIDN(t, "nsd4", "") } +func TestAXFR_SIDN_NSD4_MD5(t *testing.T) { testAXFRSIDN(t, "nsd4", HmacMD5) } +func TestAXFR_SIDN_NSD4_SHA1(t *testing.T) { testAXFRSIDN(t, "nsd4", HmacSHA1) } +func TestAXFR_SIDN_NSD4_SHA256(t *testing.T) { testAXFRSIDN(t, "nsd4", HmacSHA256) } + +func TestAXFR_SIDN_BIND9_NONE(t *testing.T) { testAXFRSIDN(t, "bind9", "") } +func TestAXFR_SIDN_BIND9_MD5(t *testing.T) { testAXFRSIDN(t, "bind9", HmacMD5) } +func TestAXFR_SIDN_BIND9_SHA1(t *testing.T) { testAXFRSIDN(t, "bind9", HmacSHA1) } +func TestAXFR_SIDN_BIND9_SHA256(t *testing.T) { testAXFRSIDN(t, "bind9", HmacSHA256) } + +func TestAXFR_SIDN_KNOT_NONE(t *testing.T) { testAXFRSIDN(t, "knot", "") } +func TestAXFR_SIDN_KNOT_MD5(t *testing.T) { testAXFRSIDN(t, "knot", HmacMD5) } +func TestAXFR_SIDN_KNOT_SHA1(t *testing.T) { testAXFRSIDN(t, "knot", HmacSHA1) } +func TestAXFR_SIDN_KNOT_SHA256(t *testing.T) { testAXFRSIDN(t, "knot", HmacSHA256) } + +func TestAXFR_SIDN_POWERDNS_NONE(t *testing.T) { testAXFRSIDN(t, "powerdns", "") } +func TestAXFR_SIDN_POWERDNS_MD5(t *testing.T) { testAXFRSIDN(t, "powerdns", HmacMD5) } +func TestAXFR_SIDN_POWERDNS_SHA1(t *testing.T) { testAXFRSIDN(t, "powerdns", HmacSHA1) } +func TestAXFR_SIDN_POWERDNS_SHA256(t *testing.T) { testAXFRSIDN(t, "powerdns", HmacSHA256) } + +func TestAXFR_SIDN_YADIFA_NONE(t *testing.T) { testAXFRSIDN(t, "yadifa", "") } +func TestAXFR_SIDN_YADIFA_MD5(t *testing.T) { testAXFRSIDN(t, "yadifa", HmacMD5) } +func TestAXFR_SIDN_YADIFA_SHA1(t *testing.T) { testAXFRSIDN(t, "yadifa", HmacSHA1) } +func TestAXFR_SIDN_YADIFA_SHA256(t *testing.T) { testAXFRSIDN(t, "yadifa", HmacSHA256) } + +func testAXFRSIDN(t *testing.T, host, alg string) { + // This tests run against a server maintained by SIDN labs, see: + // https://workbench.sidnlabs.nl/ + if testing.Short() { + return + } + x := new(Transfer) + x.TsigSecret = map[string]string{ + "wb_md5.": "Wu/utSasZUkoeCNku152Zw==", + "wb_sha1_longkey.": "uhMpEhPq/RAD9Bt4mqhfmi+7ZdKmjLQb/lcrqYPXR4s/nnbsqw==", + "wb_sha256.": "npfrIJjt/MJOjGJoBNZtsjftKMhkSpIYMv2RzRZt1f8=", + } + keyname := map[string]string{ + HmacMD5: "wb_md5.", + HmacSHA1: "wb_sha1_longkey.", + HmacSHA256: "wb_sha256.", + }[alg] + + m := new(Msg) + m.SetAxfr("types.wb.sidnlabs.nl.") + if keyname != "" { + m.SetTsig(keyname, alg, 300, time.Now().Unix()) + } + c, err := x.In(m, host+".sidnlabs.nl:53") + if err != nil { + t.Fatal(err) + } + for e := range c { + if e.Error != nil { + t.Fatal(e.Error) + } + } +} diff --git a/vendor/github.com/miekg/dns/zcompress.go b/vendor/github.com/miekg/dns/zcompress.go deleted file mode 100644 index a2c09dd483b4..000000000000 --- a/vendor/github.com/miekg/dns/zcompress.go +++ /dev/null @@ -1,155 +0,0 @@ -// Code generated by "go run compress_generate.go"; DO NOT EDIT. - -package dns - -func compressionLenHelperType(c map[string]int, r RR, initLen int) int { - currentLen := initLen - switch x := r.(type) { - case *AFSDB: - currentLen -= len(x.Hostname) + 1 - currentLen += compressionLenHelper(c, x.Hostname, currentLen) - case *CNAME: - currentLen -= len(x.Target) + 1 - currentLen += compressionLenHelper(c, x.Target, currentLen) - case *DNAME: - currentLen -= len(x.Target) + 1 - currentLen += compressionLenHelper(c, x.Target, currentLen) - case *HIP: - for i := range x.RendezvousServers { - currentLen -= len(x.RendezvousServers[i]) + 1 - } - for i := range x.RendezvousServers { - currentLen += compressionLenHelper(c, x.RendezvousServers[i], currentLen) - } - case *KX: - currentLen -= len(x.Exchanger) + 1 - currentLen += compressionLenHelper(c, x.Exchanger, currentLen) - case *LP: - currentLen -= len(x.Fqdn) + 1 - currentLen += compressionLenHelper(c, x.Fqdn, currentLen) - case *MB: - currentLen -= len(x.Mb) + 1 - currentLen += compressionLenHelper(c, x.Mb, currentLen) - case *MD: - currentLen -= len(x.Md) + 1 - currentLen += compressionLenHelper(c, x.Md, currentLen) - case *MF: - currentLen -= len(x.Mf) + 1 - currentLen += compressionLenHelper(c, x.Mf, currentLen) - case *MG: - currentLen -= len(x.Mg) + 1 - currentLen += compressionLenHelper(c, x.Mg, currentLen) - case *MINFO: - currentLen -= len(x.Rmail) + 1 - currentLen += compressionLenHelper(c, x.Rmail, currentLen) - currentLen -= len(x.Email) + 1 - currentLen += compressionLenHelper(c, x.Email, currentLen) - case *MR: - currentLen -= len(x.Mr) + 1 - currentLen += compressionLenHelper(c, x.Mr, currentLen) - case *MX: - currentLen -= len(x.Mx) + 1 - currentLen += compressionLenHelper(c, x.Mx, currentLen) - case *NAPTR: - currentLen -= len(x.Replacement) + 1 - currentLen += compressionLenHelper(c, x.Replacement, currentLen) - case *NS: - currentLen -= len(x.Ns) + 1 - currentLen += compressionLenHelper(c, x.Ns, currentLen) - case *NSAPPTR: - currentLen -= len(x.Ptr) + 1 - currentLen += compressionLenHelper(c, x.Ptr, currentLen) - case *NSEC: - currentLen -= len(x.NextDomain) + 1 - currentLen += compressionLenHelper(c, x.NextDomain, currentLen) - case *PTR: - currentLen -= len(x.Ptr) + 1 - currentLen += compressionLenHelper(c, x.Ptr, currentLen) - case *PX: - currentLen -= len(x.Map822) + 1 - currentLen += compressionLenHelper(c, x.Map822, currentLen) - currentLen -= len(x.Mapx400) + 1 - currentLen += compressionLenHelper(c, x.Mapx400, currentLen) - case *RP: - currentLen -= len(x.Mbox) + 1 - currentLen += compressionLenHelper(c, x.Mbox, currentLen) - currentLen -= len(x.Txt) + 1 - currentLen += compressionLenHelper(c, x.Txt, currentLen) - case *RRSIG: - currentLen -= len(x.SignerName) + 1 - currentLen += compressionLenHelper(c, x.SignerName, currentLen) - case *RT: - currentLen -= len(x.Host) + 1 - currentLen += compressionLenHelper(c, x.Host, currentLen) - case *SIG: - currentLen -= len(x.SignerName) + 1 - currentLen += compressionLenHelper(c, x.SignerName, currentLen) - case *SOA: - currentLen -= len(x.Ns) + 1 - currentLen += compressionLenHelper(c, x.Ns, currentLen) - currentLen -= len(x.Mbox) + 1 - currentLen += compressionLenHelper(c, x.Mbox, currentLen) - case *SRV: - currentLen -= len(x.Target) + 1 - currentLen += compressionLenHelper(c, x.Target, currentLen) - case *TALINK: - currentLen -= len(x.PreviousName) + 1 - currentLen += compressionLenHelper(c, x.PreviousName, currentLen) - currentLen -= len(x.NextName) + 1 - currentLen += compressionLenHelper(c, x.NextName, currentLen) - case *TKEY: - currentLen -= len(x.Algorithm) + 1 - currentLen += compressionLenHelper(c, x.Algorithm, currentLen) - case *TSIG: - currentLen -= len(x.Algorithm) + 1 - currentLen += compressionLenHelper(c, x.Algorithm, currentLen) - } - return currentLen - initLen -} - -func compressionLenSearchType(c map[string]int, r RR) (int, bool, int) { - switch x := r.(type) { - case *AFSDB: - k1, ok1, sz1 := compressionLenSearch(c, x.Hostname) - return k1, ok1, sz1 - case *CNAME: - k1, ok1, sz1 := compressionLenSearch(c, x.Target) - return k1, ok1, sz1 - case *MB: - k1, ok1, sz1 := compressionLenSearch(c, x.Mb) - return k1, ok1, sz1 - case *MD: - k1, ok1, sz1 := compressionLenSearch(c, x.Md) - return k1, ok1, sz1 - case *MF: - k1, ok1, sz1 := compressionLenSearch(c, x.Mf) - return k1, ok1, sz1 - case *MG: - k1, ok1, sz1 := compressionLenSearch(c, x.Mg) - return k1, ok1, sz1 - case *MINFO: - k1, ok1, sz1 := compressionLenSearch(c, x.Rmail) - k2, ok2, sz2 := compressionLenSearch(c, x.Email) - return k1 + k2, ok1 && ok2, sz1 + sz2 - case *MR: - k1, ok1, sz1 := compressionLenSearch(c, x.Mr) - return k1, ok1, sz1 - case *MX: - k1, ok1, sz1 := compressionLenSearch(c, x.Mx) - return k1, ok1, sz1 - case *NS: - k1, ok1, sz1 := compressionLenSearch(c, x.Ns) - return k1, ok1, sz1 - case *PTR: - k1, ok1, sz1 := compressionLenSearch(c, x.Ptr) - return k1, ok1, sz1 - case *RT: - k1, ok1, sz1 := compressionLenSearch(c, x.Host) - return k1, ok1, sz1 - case *SOA: - k1, ok1, sz1 := compressionLenSearch(c, x.Ns) - k2, ok2, sz2 := compressionLenSearch(c, x.Mbox) - return k1 + k2, ok1 && ok2, sz1 + sz2 - } - return 0, false, 0 -} diff --git a/vendor/github.com/miekg/dns/zmsg.go b/vendor/github.com/miekg/dns/zmsg.go index 0d1f6f4daadd..e5f3cf2974c4 100644 --- a/vendor/github.com/miekg/dns/zmsg.go +++ b/vendor/github.com/miekg/dns/zmsg.go @@ -1,4 +1,5 @@ -// Code generated by "go run msg_generate.go"; DO NOT EDIT. +// *** DO NOT MODIFY *** +// AUTOGENERATED BY go generate from msg_generate.go package dns @@ -60,20 +61,6 @@ func (rr *ANY) pack(msg []byte, off int, compression map[string]int, compress bo return off, nil } -func (rr *AVC) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packStringTxt(rr.Txt, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - func (rr *CAA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { @@ -188,28 +175,6 @@ func (rr *CNAME) pack(msg []byte, off int, compression map[string]int, compress return off, nil } -func (rr *CSYNC) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint32(rr.Serial, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.Flags, msg, off) - if err != nil { - return off, err - } - off, err = packDataNsec(rr.TypeBitMap, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - func (rr *DHCID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { @@ -256,7 +221,7 @@ func (rr *DNAME) pack(msg []byte, off int, compression map[string]int, compress return off, err } headerEnd := off - off, err = PackDomainName(rr.Target, msg, off, compression, false) + off, err = PackDomainName(rr.Target, msg, off, compression, compress) if err != nil { return off, err } @@ -482,7 +447,7 @@ func (rr *KX) pack(msg []byte, off int, compression map[string]int, compress boo if err != nil { return off, err } - off, err = PackDomainName(rr.Exchanger, msg, off, compression, false) + off, err = PackDomainName(rr.Exchanger, msg, off, compression, compress) if err != nil { return off, err } @@ -574,7 +539,7 @@ func (rr *LP) pack(msg []byte, off int, compression map[string]int, compress boo if err != nil { return off, err } - off, err = PackDomainName(rr.Fqdn, msg, off, compression, false) + off, err = PackDomainName(rr.Fqdn, msg, off, compression, compress) if err != nil { return off, err } @@ -714,7 +679,7 @@ func (rr *NAPTR) pack(msg []byte, off int, compression map[string]int, compress if err != nil { return off, err } - off, err = PackDomainName(rr.Replacement, msg, off, compression, false) + off, err = PackDomainName(rr.Replacement, msg, off, compression, compress) if err != nil { return off, err } @@ -788,7 +753,7 @@ func (rr *NSAPPTR) pack(msg []byte, off int, compression map[string]int, compres return off, err } headerEnd := off - off, err = PackDomainName(rr.Ptr, msg, off, compression, false) + off, err = PackDomainName(rr.Ptr, msg, off, compression, compress) if err != nil { return off, err } @@ -802,7 +767,7 @@ func (rr *NSEC) pack(msg []byte, off int, compression map[string]int, compress b return off, err } headerEnd := off - off, err = PackDomainName(rr.NextDomain, msg, off, compression, false) + off, err = PackDomainName(rr.NextDomain, msg, off, compression, compress) if err != nil { return off, err } @@ -836,12 +801,9 @@ func (rr *NSEC3) pack(msg []byte, off int, compression map[string]int, compress if err != nil { return off, err } - // Only pack salt if value is not "-", i.e. empty - if rr.Salt != "-" { - off, err = packStringHex(rr.Salt, msg, off) - if err != nil { - return off, err - } + off, err = packStringHex(rr.Salt, msg, off) + if err != nil { + return off, err } off, err = packUint8(rr.HashLength, msg, off) if err != nil { @@ -881,12 +843,9 @@ func (rr *NSEC3PARAM) pack(msg []byte, off int, compression map[string]int, comp if err != nil { return off, err } - // Only pack salt if value is not "-", i.e. empty - if rr.Salt != "-" { - off, err = packStringHex(rr.Salt, msg, off) - if err != nil { - return off, err - } + off, err = packStringHex(rr.Salt, msg, off) + if err != nil { + return off, err } rr.Header().Rdlength = uint16(off - headerEnd) return off, nil @@ -944,11 +903,11 @@ func (rr *PX) pack(msg []byte, off int, compression map[string]int, compress boo if err != nil { return off, err } - off, err = PackDomainName(rr.Map822, msg, off, compression, false) + off, err = PackDomainName(rr.Map822, msg, off, compression, compress) if err != nil { return off, err } - off, err = PackDomainName(rr.Mapx400, msg, off, compression, false) + off, err = PackDomainName(rr.Mapx400, msg, off, compression, compress) if err != nil { return off, err } @@ -1002,11 +961,11 @@ func (rr *RP) pack(msg []byte, off int, compression map[string]int, compress boo return off, err } headerEnd := off - off, err = PackDomainName(rr.Mbox, msg, off, compression, false) + off, err = PackDomainName(rr.Mbox, msg, off, compression, compress) if err != nil { return off, err } - off, err = PackDomainName(rr.Txt, msg, off, compression, false) + off, err = PackDomainName(rr.Txt, msg, off, compression, compress) if err != nil { return off, err } @@ -1048,7 +1007,7 @@ func (rr *RRSIG) pack(msg []byte, off int, compression map[string]int, compress if err != nil { return off, err } - off, err = PackDomainName(rr.SignerName, msg, off, compression, false) + off, err = PackDomainName(rr.SignerName, msg, off, compression, compress) if err != nil { return off, err } @@ -1112,7 +1071,7 @@ func (rr *SIG) pack(msg []byte, off int, compression map[string]int, compress bo if err != nil { return off, err } - off, err = PackDomainName(rr.SignerName, msg, off, compression, false) + off, err = PackDomainName(rr.SignerName, msg, off, compression, compress) if err != nil { return off, err } @@ -1124,32 +1083,6 @@ func (rr *SIG) pack(msg []byte, off int, compression map[string]int, compress bo return off, nil } -func (rr *SMIMEA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint8(rr.Usage, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Selector, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.MatchingType, msg, off) - if err != nil { - return off, err - } - off, err = packStringHex(rr.Certificate, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - func (rr *SOA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { off, err := rr.Hdr.pack(msg, off, compression, compress) if err != nil { @@ -1220,7 +1153,7 @@ func (rr *SRV) pack(msg []byte, off int, compression map[string]int, compress bo if err != nil { return off, err } - off, err = PackDomainName(rr.Target, msg, off, compression, false) + off, err = PackDomainName(rr.Target, msg, off, compression, compress) if err != nil { return off, err } @@ -1282,11 +1215,11 @@ func (rr *TALINK) pack(msg []byte, off int, compression map[string]int, compress return off, err } headerEnd := off - off, err = PackDomainName(rr.PreviousName, msg, off, compression, false) + off, err = PackDomainName(rr.PreviousName, msg, off, compression, compress) if err != nil { return off, err } - off, err = PackDomainName(rr.NextName, msg, off, compression, false) + off, err = PackDomainName(rr.NextName, msg, off, compression, compress) if err != nil { return off, err } @@ -1300,7 +1233,7 @@ func (rr *TKEY) pack(msg []byte, off int, compression map[string]int, compress b return off, err } headerEnd := off - off, err = PackDomainName(rr.Algorithm, msg, off, compression, false) + off, err = PackDomainName(rr.Algorithm, msg, off, compression, compress) if err != nil { return off, err } @@ -1324,7 +1257,7 @@ func (rr *TKEY) pack(msg []byte, off int, compression map[string]int, compress b if err != nil { return off, err } - off, err = packStringHex(rr.Key, msg, off) + off, err = packString(rr.Key, msg, off) if err != nil { return off, err } @@ -1332,7 +1265,7 @@ func (rr *TKEY) pack(msg []byte, off int, compression map[string]int, compress b if err != nil { return off, err } - off, err = packStringHex(rr.OtherData, msg, off) + off, err = packString(rr.OtherData, msg, off) if err != nil { return off, err } @@ -1372,7 +1305,7 @@ func (rr *TSIG) pack(msg []byte, off int, compression map[string]int, compress b return off, err } headerEnd := off - off, err = PackDomainName(rr.Algorithm, msg, off, compression, false) + off, err = PackDomainName(rr.Algorithm, msg, off, compression, compress) if err != nil { return off, err } @@ -1563,23 +1496,6 @@ func unpackANY(h RR_Header, msg []byte, off int) (RR, int, error) { return rr, off, err } -func unpackAVC(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(AVC) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Txt, off, err = unpackStringTxt(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - func unpackCAA(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(CAA) rr.Hdr = h @@ -1742,37 +1658,6 @@ func unpackCNAME(h RR_Header, msg []byte, off int) (RR, int, error) { return rr, off, err } -func unpackCSYNC(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(CSYNC) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Serial, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Flags, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.TypeBitMap, off, err = unpackDataNsec(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - func unpackDHCID(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(DHCID) rr.Hdr = h @@ -2682,7 +2567,7 @@ func unpackNSEC3PARAM(h RR_Header, msg []byte, off int) (RR, int, error) { if off == len(msg) { return rr, off, nil } - rr.Salt, off, err = unpackStringHex(msg, off, off+int(rr.SaltLength)) + rr.Salt, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) if err != nil { return rr, off, err } @@ -3020,44 +2905,6 @@ func unpackSIG(h RR_Header, msg []byte, off int) (RR, int, error) { return rr, off, err } -func unpackSMIMEA(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(SMIMEA) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Usage, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Selector, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.MatchingType, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Certificate, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - func unpackSOA(h RR_Header, msg []byte, off int) (RR, int, error) { rr := new(SOA) rr.Hdr = h @@ -3317,10 +3164,13 @@ func unpackTKEY(h RR_Header, msg []byte, off int) (RR, int, error) { if off == len(msg) { return rr, off, nil } - rr.Key, off, err = unpackStringHex(msg, off, off+int(rr.KeySize)) + rr.Key, off, err = unpackString(msg, off) if err != nil { return rr, off, err } + if off == len(msg) { + return rr, off, nil + } rr.OtherLen, off, err = unpackUint16(msg, off) if err != nil { return rr, off, err @@ -3328,7 +3178,7 @@ func unpackTKEY(h RR_Header, msg []byte, off int) (RR, int, error) { if off == len(msg) { return rr, off, nil } - rr.OtherData, off, err = unpackStringHex(msg, off, off+int(rr.OtherLen)) + rr.OtherData, off, err = unpackString(msg, off) if err != nil { return rr, off, err } @@ -3547,13 +3397,11 @@ var typeToUnpack = map[uint16]func(RR_Header, []byte, int) (RR, int, error){ TypeAAAA: unpackAAAA, TypeAFSDB: unpackAFSDB, TypeANY: unpackANY, - TypeAVC: unpackAVC, TypeCAA: unpackCAA, TypeCDNSKEY: unpackCDNSKEY, TypeCDS: unpackCDS, TypeCERT: unpackCERT, TypeCNAME: unpackCNAME, - TypeCSYNC: unpackCSYNC, TypeDHCID: unpackDHCID, TypeDLV: unpackDLV, TypeDNAME: unpackDNAME, @@ -3597,7 +3445,6 @@ var typeToUnpack = map[uint16]func(RR_Header, []byte, int) (RR, int, error){ TypeRRSIG: unpackRRSIG, TypeRT: unpackRT, TypeSIG: unpackSIG, - TypeSMIMEA: unpackSMIMEA, TypeSOA: unpackSOA, TypeSPF: unpackSPF, TypeSRV: unpackSRV, diff --git a/vendor/github.com/miekg/dns/ztypes.go b/vendor/github.com/miekg/dns/ztypes.go index 965753b11b27..a4ecbb0cc0fa 100644 --- a/vendor/github.com/miekg/dns/ztypes.go +++ b/vendor/github.com/miekg/dns/ztypes.go @@ -1,4 +1,5 @@ -// Code generated by "go run types_generate.go"; DO NOT EDIT. +// *** DO NOT MODIFY *** +// AUTOGENERATED BY go generate from type_generate.go package dns @@ -13,13 +14,11 @@ var TypeToRR = map[uint16]func() RR{ TypeAAAA: func() RR { return new(AAAA) }, TypeAFSDB: func() RR { return new(AFSDB) }, TypeANY: func() RR { return new(ANY) }, - TypeAVC: func() RR { return new(AVC) }, TypeCAA: func() RR { return new(CAA) }, TypeCDNSKEY: func() RR { return new(CDNSKEY) }, TypeCDS: func() RR { return new(CDS) }, TypeCERT: func() RR { return new(CERT) }, TypeCNAME: func() RR { return new(CNAME) }, - TypeCSYNC: func() RR { return new(CSYNC) }, TypeDHCID: func() RR { return new(DHCID) }, TypeDLV: func() RR { return new(DLV) }, TypeDNAME: func() RR { return new(DNAME) }, @@ -63,7 +62,6 @@ var TypeToRR = map[uint16]func() RR{ TypeRRSIG: func() RR { return new(RRSIG) }, TypeRT: func() RR { return new(RT) }, TypeSIG: func() RR { return new(SIG) }, - TypeSMIMEA: func() RR { return new(SMIMEA) }, TypeSOA: func() RR { return new(SOA) }, TypeSPF: func() RR { return new(SPF) }, TypeSRV: func() RR { return new(SRV) }, @@ -87,14 +85,12 @@ var TypeToString = map[uint16]string{ TypeAFSDB: "AFSDB", TypeANY: "ANY", TypeATMA: "ATMA", - TypeAVC: "AVC", TypeAXFR: "AXFR", TypeCAA: "CAA", TypeCDNSKEY: "CDNSKEY", TypeCDS: "CDS", TypeCERT: "CERT", TypeCNAME: "CNAME", - TypeCSYNC: "CSYNC", TypeDHCID: "DHCID", TypeDLV: "DLV", TypeDNAME: "DNAME", @@ -145,7 +141,6 @@ var TypeToString = map[uint16]string{ TypeRT: "RT", TypeReserved: "Reserved", TypeSIG: "SIG", - TypeSMIMEA: "SMIMEA", TypeSOA: "SOA", TypeSPF: "SPF", TypeSRV: "SRV", @@ -164,17 +159,16 @@ var TypeToString = map[uint16]string{ TypeNSAPPTR: "NSAP-PTR", } +// Header() functions func (rr *A) Header() *RR_Header { return &rr.Hdr } func (rr *AAAA) Header() *RR_Header { return &rr.Hdr } func (rr *AFSDB) Header() *RR_Header { return &rr.Hdr } func (rr *ANY) Header() *RR_Header { return &rr.Hdr } -func (rr *AVC) Header() *RR_Header { return &rr.Hdr } func (rr *CAA) Header() *RR_Header { return &rr.Hdr } func (rr *CDNSKEY) Header() *RR_Header { return &rr.Hdr } func (rr *CDS) Header() *RR_Header { return &rr.Hdr } func (rr *CERT) Header() *RR_Header { return &rr.Hdr } func (rr *CNAME) Header() *RR_Header { return &rr.Hdr } -func (rr *CSYNC) Header() *RR_Header { return &rr.Hdr } func (rr *DHCID) Header() *RR_Header { return &rr.Hdr } func (rr *DLV) Header() *RR_Header { return &rr.Hdr } func (rr *DNAME) Header() *RR_Header { return &rr.Hdr } @@ -219,7 +213,6 @@ func (rr *RP) Header() *RR_Header { return &rr.Hdr } func (rr *RRSIG) Header() *RR_Header { return &rr.Hdr } func (rr *RT) Header() *RR_Header { return &rr.Hdr } func (rr *SIG) Header() *RR_Header { return &rr.Hdr } -func (rr *SMIMEA) Header() *RR_Header { return &rr.Hdr } func (rr *SOA) Header() *RR_Header { return &rr.Hdr } func (rr *SPF) Header() *RR_Header { return &rr.Hdr } func (rr *SRV) Header() *RR_Header { return &rr.Hdr } @@ -256,16 +249,9 @@ func (rr *ANY) len() int { l := rr.Hdr.len() return l } -func (rr *AVC) len() int { - l := rr.Hdr.len() - for _, x := range rr.Txt { - l += len(x) + 1 - } - return l -} func (rr *CAA) len() int { l := rr.Hdr.len() - l++ // Flag + l += 1 // Flag l += len(rr.Tag) + 1 l += len(rr.Value) return l @@ -274,7 +260,7 @@ func (rr *CERT) len() int { l := rr.Hdr.len() l += 2 // Type l += 2 // KeyTag - l++ // Algorithm + l += 1 // Algorithm l += base64.StdEncoding.DecodedLen(len(rr.Certificate)) return l } @@ -296,16 +282,16 @@ func (rr *DNAME) len() int { func (rr *DNSKEY) len() int { l := rr.Hdr.len() l += 2 // Flags - l++ // Protocol - l++ // Algorithm + l += 1 // Protocol + l += 1 // Algorithm l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) return l } func (rr *DS) len() int { l := rr.Hdr.len() l += 2 // KeyTag - l++ // Algorithm - l++ // DigestType + l += 1 // Algorithm + l += 1 // DigestType l += len(rr.Digest)/2 + 1 return l } @@ -344,10 +330,10 @@ func (rr *HINFO) len() int { } func (rr *HIP) len() int { l := rr.Hdr.len() - l++ // HitLength - l++ // PublicKeyAlgorithm + l += 1 // HitLength + l += 1 // PublicKeyAlgorithm l += 2 // PublicKeyLength - l += len(rr.Hit) / 2 + l += len(rr.Hit)/2 + 1 l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) for _, x := range rr.RendezvousServers { l += len(x) + 1 @@ -374,10 +360,10 @@ func (rr *L64) len() int { } func (rr *LOC) len() int { l := rr.Hdr.len() - l++ // Version - l++ // Size - l++ // HorizPre - l++ // VertPre + l += 1 // Version + l += 1 // Size + l += 1 // HorizPre + l += 1 // VertPre l += 4 // Latitude l += 4 // Longitude l += 4 // Altitude @@ -466,11 +452,11 @@ func (rr *NSAPPTR) len() int { } func (rr *NSEC3PARAM) len() int { l := rr.Hdr.len() - l++ // Hash - l++ // Flags + l += 1 // Hash + l += 1 // Flags l += 2 // Iterations - l++ // SaltLength - l += len(rr.Salt) / 2 + l += 1 // SaltLength + l += len(rr.Salt)/2 + 1 return l } func (rr *OPENPGPKEY) len() int { @@ -498,8 +484,8 @@ func (rr *RFC3597) len() int { func (rr *RKEY) len() int { l := rr.Hdr.len() l += 2 // Flags - l++ // Protocol - l++ // Algorithm + l += 1 // Protocol + l += 1 // Algorithm l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) return l } @@ -512,8 +498,8 @@ func (rr *RP) len() int { func (rr *RRSIG) len() int { l := rr.Hdr.len() l += 2 // TypeCovered - l++ // Algorithm - l++ // Labels + l += 1 // Algorithm + l += 1 // Labels l += 4 // OrigTtl l += 4 // Expiration l += 4 // Inception @@ -528,14 +514,6 @@ func (rr *RT) len() int { l += len(rr.Host) + 1 return l } -func (rr *SMIMEA) len() int { - l := rr.Hdr.len() - l++ // Usage - l++ // Selector - l++ // MatchingType - l += len(rr.Certificate)/2 + 1 - return l -} func (rr *SOA) len() int { l := rr.Hdr.len() l += len(rr.Ns) + 1 @@ -564,16 +542,16 @@ func (rr *SRV) len() int { } func (rr *SSHFP) len() int { l := rr.Hdr.len() - l++ // Algorithm - l++ // Type + l += 1 // Algorithm + l += 1 // Type l += len(rr.FingerPrint)/2 + 1 return l } func (rr *TA) len() int { l := rr.Hdr.len() l += 2 // KeyTag - l++ // Algorithm - l++ // DigestType + l += 1 // Algorithm + l += 1 // DigestType l += len(rr.Digest)/2 + 1 return l } @@ -591,16 +569,16 @@ func (rr *TKEY) len() int { l += 2 // Mode l += 2 // Error l += 2 // KeySize - l += len(rr.Key) / 2 + l += len(rr.Key) + 1 l += 2 // OtherLen - l += len(rr.OtherData) / 2 + l += len(rr.OtherData) + 1 return l } func (rr *TLSA) len() int { l := rr.Hdr.len() - l++ // Usage - l++ // Selector - l++ // MatchingType + l += 1 // Usage + l += 1 // Selector + l += 1 // MatchingType l += len(rr.Certificate)/2 + 1 return l } @@ -610,11 +588,11 @@ func (rr *TSIG) len() int { l += 6 // TimeSigned l += 2 // Fudge l += 2 // MACSize - l += len(rr.MAC) / 2 + l += len(rr.MAC)/2 + 1 l += 2 // OrigId l += 2 // Error l += 2 // OtherLen - l += len(rr.OtherData) / 2 + l += len(rr.OtherData)/2 + 1 return l } func (rr *TXT) len() int { @@ -649,215 +627,202 @@ func (rr *X25) len() int { // copy() functions func (rr *A) copy() RR { - return &A{rr.Hdr, copyIP(rr.A)} + return &A{*rr.Hdr.copyHeader(), copyIP(rr.A)} } func (rr *AAAA) copy() RR { - return &AAAA{rr.Hdr, copyIP(rr.AAAA)} + return &AAAA{*rr.Hdr.copyHeader(), copyIP(rr.AAAA)} } func (rr *AFSDB) copy() RR { - return &AFSDB{rr.Hdr, rr.Subtype, rr.Hostname} + return &AFSDB{*rr.Hdr.copyHeader(), rr.Subtype, rr.Hostname} } func (rr *ANY) copy() RR { - return &ANY{rr.Hdr} -} -func (rr *AVC) copy() RR { - Txt := make([]string, len(rr.Txt)) - copy(Txt, rr.Txt) - return &AVC{rr.Hdr, Txt} + return &ANY{*rr.Hdr.copyHeader()} } func (rr *CAA) copy() RR { - return &CAA{rr.Hdr, rr.Flag, rr.Tag, rr.Value} + return &CAA{*rr.Hdr.copyHeader(), rr.Flag, rr.Tag, rr.Value} } func (rr *CERT) copy() RR { - return &CERT{rr.Hdr, rr.Type, rr.KeyTag, rr.Algorithm, rr.Certificate} + return &CERT{*rr.Hdr.copyHeader(), rr.Type, rr.KeyTag, rr.Algorithm, rr.Certificate} } func (rr *CNAME) copy() RR { - return &CNAME{rr.Hdr, rr.Target} -} -func (rr *CSYNC) copy() RR { - TypeBitMap := make([]uint16, len(rr.TypeBitMap)) - copy(TypeBitMap, rr.TypeBitMap) - return &CSYNC{rr.Hdr, rr.Serial, rr.Flags, TypeBitMap} + return &CNAME{*rr.Hdr.copyHeader(), rr.Target} } func (rr *DHCID) copy() RR { - return &DHCID{rr.Hdr, rr.Digest} + return &DHCID{*rr.Hdr.copyHeader(), rr.Digest} } func (rr *DNAME) copy() RR { - return &DNAME{rr.Hdr, rr.Target} + return &DNAME{*rr.Hdr.copyHeader(), rr.Target} } func (rr *DNSKEY) copy() RR { - return &DNSKEY{rr.Hdr, rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey} + return &DNSKEY{*rr.Hdr.copyHeader(), rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey} } func (rr *DS) copy() RR { - return &DS{rr.Hdr, rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest} + return &DS{*rr.Hdr.copyHeader(), rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest} } func (rr *EID) copy() RR { - return &EID{rr.Hdr, rr.Endpoint} + return &EID{*rr.Hdr.copyHeader(), rr.Endpoint} } func (rr *EUI48) copy() RR { - return &EUI48{rr.Hdr, rr.Address} + return &EUI48{*rr.Hdr.copyHeader(), rr.Address} } func (rr *EUI64) copy() RR { - return &EUI64{rr.Hdr, rr.Address} + return &EUI64{*rr.Hdr.copyHeader(), rr.Address} } func (rr *GID) copy() RR { - return &GID{rr.Hdr, rr.Gid} + return &GID{*rr.Hdr.copyHeader(), rr.Gid} } func (rr *GPOS) copy() RR { - return &GPOS{rr.Hdr, rr.Longitude, rr.Latitude, rr.Altitude} + return &GPOS{*rr.Hdr.copyHeader(), rr.Longitude, rr.Latitude, rr.Altitude} } func (rr *HINFO) copy() RR { - return &HINFO{rr.Hdr, rr.Cpu, rr.Os} + return &HINFO{*rr.Hdr.copyHeader(), rr.Cpu, rr.Os} } func (rr *HIP) copy() RR { RendezvousServers := make([]string, len(rr.RendezvousServers)) copy(RendezvousServers, rr.RendezvousServers) - return &HIP{rr.Hdr, rr.HitLength, rr.PublicKeyAlgorithm, rr.PublicKeyLength, rr.Hit, rr.PublicKey, RendezvousServers} + return &HIP{*rr.Hdr.copyHeader(), rr.HitLength, rr.PublicKeyAlgorithm, rr.PublicKeyLength, rr.Hit, rr.PublicKey, RendezvousServers} } func (rr *KX) copy() RR { - return &KX{rr.Hdr, rr.Preference, rr.Exchanger} + return &KX{*rr.Hdr.copyHeader(), rr.Preference, rr.Exchanger} } func (rr *L32) copy() RR { - return &L32{rr.Hdr, rr.Preference, copyIP(rr.Locator32)} + return &L32{*rr.Hdr.copyHeader(), rr.Preference, copyIP(rr.Locator32)} } func (rr *L64) copy() RR { - return &L64{rr.Hdr, rr.Preference, rr.Locator64} + return &L64{*rr.Hdr.copyHeader(), rr.Preference, rr.Locator64} } func (rr *LOC) copy() RR { - return &LOC{rr.Hdr, rr.Version, rr.Size, rr.HorizPre, rr.VertPre, rr.Latitude, rr.Longitude, rr.Altitude} + return &LOC{*rr.Hdr.copyHeader(), rr.Version, rr.Size, rr.HorizPre, rr.VertPre, rr.Latitude, rr.Longitude, rr.Altitude} } func (rr *LP) copy() RR { - return &LP{rr.Hdr, rr.Preference, rr.Fqdn} + return &LP{*rr.Hdr.copyHeader(), rr.Preference, rr.Fqdn} } func (rr *MB) copy() RR { - return &MB{rr.Hdr, rr.Mb} + return &MB{*rr.Hdr.copyHeader(), rr.Mb} } func (rr *MD) copy() RR { - return &MD{rr.Hdr, rr.Md} + return &MD{*rr.Hdr.copyHeader(), rr.Md} } func (rr *MF) copy() RR { - return &MF{rr.Hdr, rr.Mf} + return &MF{*rr.Hdr.copyHeader(), rr.Mf} } func (rr *MG) copy() RR { - return &MG{rr.Hdr, rr.Mg} + return &MG{*rr.Hdr.copyHeader(), rr.Mg} } func (rr *MINFO) copy() RR { - return &MINFO{rr.Hdr, rr.Rmail, rr.Email} + return &MINFO{*rr.Hdr.copyHeader(), rr.Rmail, rr.Email} } func (rr *MR) copy() RR { - return &MR{rr.Hdr, rr.Mr} + return &MR{*rr.Hdr.copyHeader(), rr.Mr} } func (rr *MX) copy() RR { - return &MX{rr.Hdr, rr.Preference, rr.Mx} + return &MX{*rr.Hdr.copyHeader(), rr.Preference, rr.Mx} } func (rr *NAPTR) copy() RR { - return &NAPTR{rr.Hdr, rr.Order, rr.Preference, rr.Flags, rr.Service, rr.Regexp, rr.Replacement} + return &NAPTR{*rr.Hdr.copyHeader(), rr.Order, rr.Preference, rr.Flags, rr.Service, rr.Regexp, rr.Replacement} } func (rr *NID) copy() RR { - return &NID{rr.Hdr, rr.Preference, rr.NodeID} + return &NID{*rr.Hdr.copyHeader(), rr.Preference, rr.NodeID} } func (rr *NIMLOC) copy() RR { - return &NIMLOC{rr.Hdr, rr.Locator} + return &NIMLOC{*rr.Hdr.copyHeader(), rr.Locator} } func (rr *NINFO) copy() RR { ZSData := make([]string, len(rr.ZSData)) copy(ZSData, rr.ZSData) - return &NINFO{rr.Hdr, ZSData} + return &NINFO{*rr.Hdr.copyHeader(), ZSData} } func (rr *NS) copy() RR { - return &NS{rr.Hdr, rr.Ns} + return &NS{*rr.Hdr.copyHeader(), rr.Ns} } func (rr *NSAPPTR) copy() RR { - return &NSAPPTR{rr.Hdr, rr.Ptr} + return &NSAPPTR{*rr.Hdr.copyHeader(), rr.Ptr} } func (rr *NSEC) copy() RR { TypeBitMap := make([]uint16, len(rr.TypeBitMap)) copy(TypeBitMap, rr.TypeBitMap) - return &NSEC{rr.Hdr, rr.NextDomain, TypeBitMap} + return &NSEC{*rr.Hdr.copyHeader(), rr.NextDomain, TypeBitMap} } func (rr *NSEC3) copy() RR { TypeBitMap := make([]uint16, len(rr.TypeBitMap)) copy(TypeBitMap, rr.TypeBitMap) - return &NSEC3{rr.Hdr, rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt, rr.HashLength, rr.NextDomain, TypeBitMap} + return &NSEC3{*rr.Hdr.copyHeader(), rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt, rr.HashLength, rr.NextDomain, TypeBitMap} } func (rr *NSEC3PARAM) copy() RR { - return &NSEC3PARAM{rr.Hdr, rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt} + return &NSEC3PARAM{*rr.Hdr.copyHeader(), rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt} } func (rr *OPENPGPKEY) copy() RR { - return &OPENPGPKEY{rr.Hdr, rr.PublicKey} + return &OPENPGPKEY{*rr.Hdr.copyHeader(), rr.PublicKey} } func (rr *OPT) copy() RR { Option := make([]EDNS0, len(rr.Option)) copy(Option, rr.Option) - return &OPT{rr.Hdr, Option} + return &OPT{*rr.Hdr.copyHeader(), Option} } func (rr *PTR) copy() RR { - return &PTR{rr.Hdr, rr.Ptr} + return &PTR{*rr.Hdr.copyHeader(), rr.Ptr} } func (rr *PX) copy() RR { - return &PX{rr.Hdr, rr.Preference, rr.Map822, rr.Mapx400} + return &PX{*rr.Hdr.copyHeader(), rr.Preference, rr.Map822, rr.Mapx400} } func (rr *RFC3597) copy() RR { - return &RFC3597{rr.Hdr, rr.Rdata} + return &RFC3597{*rr.Hdr.copyHeader(), rr.Rdata} } func (rr *RKEY) copy() RR { - return &RKEY{rr.Hdr, rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey} + return &RKEY{*rr.Hdr.copyHeader(), rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey} } func (rr *RP) copy() RR { - return &RP{rr.Hdr, rr.Mbox, rr.Txt} + return &RP{*rr.Hdr.copyHeader(), rr.Mbox, rr.Txt} } func (rr *RRSIG) copy() RR { - return &RRSIG{rr.Hdr, rr.TypeCovered, rr.Algorithm, rr.Labels, rr.OrigTtl, rr.Expiration, rr.Inception, rr.KeyTag, rr.SignerName, rr.Signature} + return &RRSIG{*rr.Hdr.copyHeader(), rr.TypeCovered, rr.Algorithm, rr.Labels, rr.OrigTtl, rr.Expiration, rr.Inception, rr.KeyTag, rr.SignerName, rr.Signature} } func (rr *RT) copy() RR { - return &RT{rr.Hdr, rr.Preference, rr.Host} -} -func (rr *SMIMEA) copy() RR { - return &SMIMEA{rr.Hdr, rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate} + return &RT{*rr.Hdr.copyHeader(), rr.Preference, rr.Host} } func (rr *SOA) copy() RR { - return &SOA{rr.Hdr, rr.Ns, rr.Mbox, rr.Serial, rr.Refresh, rr.Retry, rr.Expire, rr.Minttl} + return &SOA{*rr.Hdr.copyHeader(), rr.Ns, rr.Mbox, rr.Serial, rr.Refresh, rr.Retry, rr.Expire, rr.Minttl} } func (rr *SPF) copy() RR { Txt := make([]string, len(rr.Txt)) copy(Txt, rr.Txt) - return &SPF{rr.Hdr, Txt} + return &SPF{*rr.Hdr.copyHeader(), Txt} } func (rr *SRV) copy() RR { - return &SRV{rr.Hdr, rr.Priority, rr.Weight, rr.Port, rr.Target} + return &SRV{*rr.Hdr.copyHeader(), rr.Priority, rr.Weight, rr.Port, rr.Target} } func (rr *SSHFP) copy() RR { - return &SSHFP{rr.Hdr, rr.Algorithm, rr.Type, rr.FingerPrint} + return &SSHFP{*rr.Hdr.copyHeader(), rr.Algorithm, rr.Type, rr.FingerPrint} } func (rr *TA) copy() RR { - return &TA{rr.Hdr, rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest} + return &TA{*rr.Hdr.copyHeader(), rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest} } func (rr *TALINK) copy() RR { - return &TALINK{rr.Hdr, rr.PreviousName, rr.NextName} + return &TALINK{*rr.Hdr.copyHeader(), rr.PreviousName, rr.NextName} } func (rr *TKEY) copy() RR { - return &TKEY{rr.Hdr, rr.Algorithm, rr.Inception, rr.Expiration, rr.Mode, rr.Error, rr.KeySize, rr.Key, rr.OtherLen, rr.OtherData} + return &TKEY{*rr.Hdr.copyHeader(), rr.Algorithm, rr.Inception, rr.Expiration, rr.Mode, rr.Error, rr.KeySize, rr.Key, rr.OtherLen, rr.OtherData} } func (rr *TLSA) copy() RR { - return &TLSA{rr.Hdr, rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate} + return &TLSA{*rr.Hdr.copyHeader(), rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate} } func (rr *TSIG) copy() RR { - return &TSIG{rr.Hdr, rr.Algorithm, rr.TimeSigned, rr.Fudge, rr.MACSize, rr.MAC, rr.OrigId, rr.Error, rr.OtherLen, rr.OtherData} + return &TSIG{*rr.Hdr.copyHeader(), rr.Algorithm, rr.TimeSigned, rr.Fudge, rr.MACSize, rr.MAC, rr.OrigId, rr.Error, rr.OtherLen, rr.OtherData} } func (rr *TXT) copy() RR { Txt := make([]string, len(rr.Txt)) copy(Txt, rr.Txt) - return &TXT{rr.Hdr, Txt} + return &TXT{*rr.Hdr.copyHeader(), Txt} } func (rr *UID) copy() RR { - return &UID{rr.Hdr, rr.Uid} + return &UID{*rr.Hdr.copyHeader(), rr.Uid} } func (rr *UINFO) copy() RR { - return &UINFO{rr.Hdr, rr.Uinfo} + return &UINFO{*rr.Hdr.copyHeader(), rr.Uinfo} } func (rr *URI) copy() RR { - return &URI{rr.Hdr, rr.Priority, rr.Weight, rr.Target} + return &URI{*rr.Hdr.copyHeader(), rr.Priority, rr.Weight, rr.Target} } func (rr *X25) copy() RR { - return &X25{rr.Hdr, rr.PSDNAddress} + return &X25{*rr.Hdr.copyHeader(), rr.PSDNAddress} } diff --git a/vendor/github.com/moby/buildkit/.dockerignore b/vendor/github.com/moby/buildkit/.dockerignore deleted file mode 100644 index a4ad067c4d76..000000000000 --- a/vendor/github.com/moby/buildkit/.dockerignore +++ /dev/null @@ -1,2 +0,0 @@ -bin -.tmp diff --git a/vendor/github.com/moby/buildkit/.gitignore b/vendor/github.com/moby/buildkit/.gitignore deleted file mode 100644 index 204e2cec1d89..000000000000 --- a/vendor/github.com/moby/buildkit/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -bin -.tmp -release-out \ No newline at end of file diff --git a/vendor/github.com/moby/buildkit/.travis.yml b/vendor/github.com/moby/buildkit/.travis.yml deleted file mode 100644 index b4f8ddb0beee..000000000000 --- a/vendor/github.com/moby/buildkit/.travis.yml +++ /dev/null @@ -1,66 +0,0 @@ -dist: trusty -sudo: required - -install: - - docker run --name buildkit --rm -d --privileged -p 1234:1234 $REPO_SLUG_ORIGIN --addr tcp://0.0.0.0:1234 - - sudo docker cp buildkit:/usr/bin/buildctl /usr/bin/ - - export BUILDKIT_HOST=tcp://0.0.0.0:1234 - -env: - global: - - PLATFORMS="linux/amd64,linux/arm/v6,linux/arm/v7,linux/arm64,linux/s390x,linux/ppc64le" - - PREFER_BUILDCTL="1" - -script: - - make binaries validate-all && ./hack/cross - -before_deploy: - - echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin - -deploy: - - provider: script - script: ./hack/release master $REPO_SLUG_TARGET push - on: - repo: moby/buildkit - branch: master - condition: $TRAVIS_EVENT_TYPE != "cron" - - provider: script - script: ./hack/release $TRAVIS_TAG $REPO_SLUG_TARGET push && PLATFORMS="${PLATFORMS},darwin/amd64,windows/amd64" ./hack/release-tar $TRAVIS_TAG release-out - on: - repo: moby/buildkit - tags: true - condition: $TRAVIS_TAG =~ ^v[0-9] - - provider: releases - api_key: - secure: "hA0L2F6O1MLEJEbUDzxokpO6F6QrAIkltmVG3g0tTAoVj1xtCOXSmH3cAnVbFYyOz9q8pa/85tbpyEEIHVlqvWk2a5/QS16QaBW6XxH+FiZ3oQ44JbtpsjpmBFxdhfeFs8Ca6Nj29AOtDx21HHWsZKlBZFvC4Ubc05AM1rgZpJyZVDvYsjZIunc8/CPCbvAAp6RLnLHxAYXF+TQ7mAZP2SewsW/61nPjPIp2P4d93CduA9kUSxtC/1ewmU2T9Ak2X1Nw2ecPTonGjO51xNa6Ebo1hsbsRt5Krd1IR5rSkgXqLrhQO+19J3sUrQr2p8su6hCTKXR5TQz9L5C9VG8T3yOLbA7/FKBndWgBCm7EB7SezhFkm91e3Phkd/Hi5PF4ZKUSKyOYORHpoeg7ggBXaQF5r0OolqvNjxe7EhE+zlUIqnk5eprVrXT8H1QDF0Jg7pfdqVV9AIZO6i+e+1wOVDaP6K6tiWGdkRFH0wahcucZ/8xVoa8JVNZKke2mMCuLGsNWcN4DeLhkxa6giw3tkqbnY+eTYcW/PyVFMAVsZ8rOjQu4u4mm82FYBI7UywWQJTReD1LO2ibxHk74nwtyauX7KsCPFh2CA27DKlsQ1/xkjaCpE6vduzKzPj2DSHp6tKjxn2edPWRI+/4JxLD6KUFX1f1KqD0pKy/qVsZhEPI=" - file: release-out/**/* - skip_cleanup: true - file_glob: true - on: - repo: moby/buildkit - tags: true - condition: $TRAVIS_TAG =~ ^v[0-9] - - provider: script - script: ./frontend/dockerfile/cmd/dockerfile-frontend/hack/release master mainline $DF_REPO_SLUG_TARGET push - on: - repo: moby/buildkit - branch: master - condition: $TRAVIS_EVENT_TYPE != "cron" - - provider: script - script: ./frontend/dockerfile/cmd/dockerfile-frontend/hack/release master experimental $DF_REPO_SLUG_TARGET push - on: - repo: moby/buildkit - branch: master - condition: $TRAVIS_EVENT_TYPE != "cron" - - provider: script - script: ./frontend/dockerfile/cmd/dockerfile-frontend/hack/release tag $TRAVIS_TAG $DF_REPO_SLUG_TARGET push - on: - repo: moby/buildkit - tags: true - condition: $TRAVIS_TAG =~ ^dockerfile/[0-9] - - provider: script - script: ./frontend/dockerfile/cmd/dockerfile-frontend/hack/release daily _ $DF_REPO_SLUG_TARGET push - on: - repo: moby/buildkit - branch: master - condition: $TRAVIS_EVENT_TYPE == "cron" \ No newline at end of file diff --git a/vendor/github.com/moby/buildkit/LICENSE b/vendor/github.com/moby/buildkit/LICENSE deleted file mode 100644 index 261eeb9e9f8b..000000000000 --- a/vendor/github.com/moby/buildkit/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/moby/buildkit/Makefile b/vendor/github.com/moby/buildkit/Makefile deleted file mode 100644 index 4af96689d4cd..000000000000 --- a/vendor/github.com/moby/buildkit/Makefile +++ /dev/null @@ -1,34 +0,0 @@ -DESTDIR=/usr/local - -binaries: FORCE - hack/binaries - -install: FORCE - mkdir -p $(DESTDIR)/bin - install bin/* $(DESTDIR)/bin - -clean: FORCE - rm -rf ./bin - -test: - ./hack/test integration gateway dockerfile - -lint: - ./hack/lint - -validate-vendor: - ./hack/validate-vendor - -validate-generated-files: - ./hack/validate-generated-files - -validate-all: test lint validate-vendor validate-generated-files - -vendor: - ./hack/update-vendor - -generated-files: - ./hack/update-generated-files - -.PHONY: vendor generated-files test binaries install clean lint validate-all validate-vendor validate-generated-files -FORCE: diff --git a/vendor/github.com/moby/buildkit/README.md b/vendor/github.com/moby/buildkit/README.md deleted file mode 100644 index 649835a9ebd2..000000000000 --- a/vendor/github.com/moby/buildkit/README.md +++ /dev/null @@ -1,294 +0,0 @@ -[![asciicinema example](https://asciinema.org/a/gPEIEo1NzmDTUu2bEPsUboqmU.png)](https://asciinema.org/a/gPEIEo1NzmDTUu2bEPsUboqmU) - - -## BuildKit - -[![GoDoc](https://godoc.org/github.com/moby/buildkit?status.svg)](https://godoc.org/github.com/moby/buildkit/client/llb) -[![Build Status](https://travis-ci.org/moby/buildkit.svg?branch=master)](https://travis-ci.org/moby/buildkit) -[![Go Report Card](https://goreportcard.com/badge/github.com/moby/buildkit)](https://goreportcard.com/report/github.com/moby/buildkit) - - -BuildKit is a toolkit for converting source code to build artifacts in an efficient, expressive and repeatable manner. - -Key features: -- Automatic garbage collection -- Extendable frontend formats -- Concurrent dependency resolution -- Efficient instruction caching -- Build cache import/export -- Nested build job invocations -- Distributable workers -- Multiple output formats -- Pluggable architecture -- Execution without root privileges - - -Read the proposal from https://github.com/moby/moby/issues/32925 - -Introductory blog post https://blog.mobyproject.org/introducing-buildkit-17e056cc5317 - -:information_source: If you are visiting this repo for the usage of experimental Dockerfile features like `RUN --mount=type=(bind|cache|tmpfs|secret|ssh)`, please refer to [`frontend/dockerfile/docs/experimental.md`](frontend/dockerfile/docs/experimental.md). - -### Used by - -[Moby & Docker](https://github.com/moby/moby/pull/37151) - -[img](https://github.com/genuinetools/img) - -[OpenFaaS Cloud](https://github.com/openfaas/openfaas-cloud) - -[container build interface](https://github.com/containerbuilding/cbi) - -[Knative Build Templates](https://github.com/knative/build-templates) - -[boss](https://github.com/crosbymichael/boss) - -[Rio](https://github.com/rancher/rio) (on roadmap) - -### Quick start - -Dependencies: -- [runc](https://github.com/opencontainers/runc) -- [containerd](https://github.com/containerd/containerd) (if you want to use containerd worker) - - -The following command installs `buildkitd` and `buildctl` to `/usr/local/bin`: - -```bash -$ make && sudo make install -``` - -You can also use `make binaries-all` to prepare `buildkitd.containerd_only` and `buildkitd.oci_only`. - -#### Starting the buildkitd daemon: - -``` -buildkitd --debug --root /var/lib/buildkit -``` - -The buildkitd daemon supports two worker backends: OCI (runc) and containerd. - -By default, the OCI (runc) worker is used. -You can set `--oci-worker=false --containerd-worker=true` to use the containerd worker. - -We are open to adding more backends. - -#### Exploring LLB - -BuildKit builds are based on a binary intermediate format called LLB that is used for defining the dependency graph for processes running part of your build. tl;dr: LLB is to Dockerfile what LLVM IR is to C. - -- Marshaled as Protobuf messages -- Concurrently executable -- Efficiently cacheable -- Vendor-neutral (i.e. non-Dockerfile languages can be easily implemented) - -See [`solver/pb/ops.proto`](./solver/pb/ops.proto) for the format definition. - -Currently, following high-level languages has been implemented for LLB: - -- Dockerfile (See [Exploring Dockerfiles](#exploring-dockerfiles)) -- [Buildpacks](https://github.com/tonistiigi/buildkit-pack) -- (open a PR to add your own language) - -For understanding the basics of LLB, `examples/buildkit*` directory contains scripts that define how to build different configurations of BuildKit itself and its dependencies using the `client` package. Running one of these scripts generates a protobuf definition of a build graph. Note that the script itself does not execute any steps of the build. - -You can use `buildctl debug dump-llb` to see what data is in this definition. Add `--dot` to generate dot layout. - -```bash -go run examples/buildkit0/buildkit.go | buildctl debug dump-llb | jq . -``` - -To start building use `buildctl build` command. The example script accepts `--with-containerd` flag to choose if containerd binaries and support should be included in the end result as well. - -```bash -go run examples/buildkit0/buildkit.go | buildctl build -``` - -`buildctl build` will show interactive progress bar by default while the build job is running. It will also show you the path to the trace file that contains all information about the timing of the individual steps and logs. - -Different versions of the example scripts show different ways of describing the build definition for this project to show the capabilities of the library. New versions have been added when new features have become available. - -- `./examples/buildkit0` - uses only exec operations, defines a full stage per component. -- `./examples/buildkit1` - cloning git repositories has been separated for extra concurrency. -- `./examples/buildkit2` - uses git sources directly instead of running `git clone`, allowing better performance and much safer caching. -- `./examples/buildkit3` - allows using local source files for separate components eg. `./buildkit3 --runc=local | buildctl build --local runc-src=some/local/path` -- `./examples/dockerfile2llb` - can be used to convert a Dockerfile to LLB for debugging purposes -- `./examples/gobuild` - shows how to use nested invocation to generate LLB for Go package internal dependencies - - -#### Exploring Dockerfiles - -Frontends are components that run inside BuildKit and convert any build definition to LLB. There is a special frontend called gateway (gateway.v0) that allows using any image as a frontend. - -During development, Dockerfile frontend (dockerfile.v0) is also part of the BuildKit repo. In the future, this will be moved out, and Dockerfiles can be built using an external image. - -##### Building a Dockerfile with `buildctl` - -``` -buildctl build --frontend=dockerfile.v0 --local context=. --local dockerfile=. -buildctl build --frontend=dockerfile.v0 --local context=. --local dockerfile=. --frontend-opt target=foo --frontend-opt build-arg:foo=bar -``` - -`--local` exposes local source files from client to the builder. `context` and `dockerfile` are the names Dockerfile frontend looks for build context and Dockerfile location. - -##### build-using-dockerfile utility - -For people familiar with `docker build` command, there is an example wrapper utility in `./examples/build-using-dockerfile` that allows building Dockerfiles with BuildKit using a syntax similar to `docker build`. - -``` -go build ./examples/build-using-dockerfile && sudo install build-using-dockerfile /usr/local/bin - -build-using-dockerfile -t myimage . -build-using-dockerfile -t mybuildkit -f ./hack/dockerfiles/test.Dockerfile . - -# build-using-dockerfile will automatically load the resulting image to Docker -docker inspect myimage -``` - -##### Building a Dockerfile using [external frontend](https://hub.docker.com/r/tonistiigi/dockerfile/tags/): - -During development, an external version of the Dockerfile frontend is pushed to https://hub.docker.com/r/tonistiigi/dockerfile that can be used with the gateway frontend. The source for the external frontend is currently located in `./frontend/dockerfile/cmd/dockerfile-frontend` but will move out of this repository in the future ([#163](https://github.com/moby/buildkit/issues/163)). For automatic build from master branch of this repository `tonistiigi/dockerfile:master` image can be used. - -``` -buildctl build --frontend=gateway.v0 --frontend-opt=source=tonistiigi/dockerfile --local context=. --local dockerfile=. -buildctl build --frontend gateway.v0 --frontend-opt=source=tonistiigi/dockerfile --frontend-opt=context=git://github.com/moby/moby --frontend-opt build-arg:APT_MIRROR=cdn-fastly.deb.debian.org -```` - -##### Building a Dockerfile with experimental features like `RUN --mount=type=(bind|cache|tmpfs|secret|ssh)` - -See [`frontend/dockerfile/docs/experimental.md`](frontend/dockerfile/docs/experimental.md). - -### Exporters - -By default, the build result and intermediate cache will only remain internally in BuildKit. Exporter needs to be specified to retrieve the result. - -##### Exporting resulting image to containerd - -The containerd worker needs to be used - -``` -buildctl build ... --exporter=image --exporter-opt name=docker.io/username/image -ctr --namespace=buildkit images ls -``` - -##### Push resulting image to registry - -``` -buildctl build ... --exporter=image --exporter-opt name=docker.io/username/image --exporter-opt push=true -``` - -If credentials are required, `buildctl` will attempt to read Docker configuration file. - - -##### Exporting build result back to client - -The local client will copy the files directly to the client. This is useful if BuildKit is being used for building something else than container images. - -``` -buildctl build ... --exporter=local --exporter-opt output=path/to/output-dir -``` - -##### Exporting built image to Docker - -``` -# exported tarball is also compatible with OCI spec -buildctl build ... --exporter=docker --exporter-opt name=myimage | docker load -``` - -##### Exporting [OCI Image Format](https://github.com/opencontainers/image-spec) tarball to client - -``` -buildctl build ... --exporter=oci --exporter-opt output=path/to/output.tar -buildctl build ... --exporter=oci > output.tar -``` - -### Other - -#### View build cache - -``` -buildctl du -v -``` - -#### Show enabled workers - -``` -buildctl debug workers -v -``` - -### Running containerized buildkit - -BuildKit can also be used by running the `buildkitd` daemon inside a Docker container and accessing it remotely. The client tool `buildctl` is also available for Mac and Windows. - -We provide `buildkitd` container images as [`moby/buildkit`](https://hub.docker.com/r/moby/buildkit/tags/): - -* `moby/buildkit:latest`: built from the latest regular [release](https://github.com/moby/buildkit/releases) -* `moby/buildkit:rootless`: same as `latest` but runs as an unprivileged user, see [`docs/rootless.md`](docs/rootless.md) -* `moby/buildkit:master`: built from the master branch -* `moby/buildkit:master-rootless`: same as master but runs as an unprivileged user, see [`docs/rootless.md`](docs/rootless.md) - -To run daemon in a container: - -``` -docker run -d --privileged -p 1234:1234 moby/buildkit:latest --addr tcp://0.0.0.0:1234 -export BUILDKIT_HOST=tcp://0.0.0.0:1234 -buildctl build --help -``` - -The images can be also built locally using `./hack/dockerfiles/test.Dockerfile` (or `./hack/dockerfiles/test.buildkit.Dockerfile` if you already have BuildKit). - -### Opentracing support - -BuildKit supports opentracing for buildkitd gRPC API and buildctl commands. To capture the trace to [Jaeger](https://github.com/jaegertracing/jaeger), set `JAEGER_TRACE` environment variable to the collection address. - - -``` -docker run -d -p6831:6831/udp -p16686:16686 jaegertracing/all-in-one:latest -export JAEGER_TRACE=0.0.0.0:6831 -# restart buildkitd and buildctl so they know JAEGER_TRACE -# any buildctl command should be traced to http://127.0.0.1:16686/ -``` - - -### Supported runc version - -During development, BuildKit is tested with the version of runc that is being used by the containerd repository. Please refer to [runc.md](https://github.com/containerd/containerd/blob/v1.2.0-rc.1/RUNC.md) for more information. - -### Running BuildKit without root privileges - -Please refer to [`docs/rootless.md`](docs/rootless.md). - -### Contributing - -Running tests: - -```bash -make test -``` - -This runs all unit and integration tests in a containerized environment. Locally, every package can be tested separately with standard Go tools, but integration tests are skipped if local user doesn't have enough permissions or worker binaries are not installed. - -``` -# test a specific package only -make test TESTPKGS=./client - -# run a specific test with all worker combinations -make test TESTPKGS=./client TESTFLAGS="--run /TestCallDiskUsage -v" - -# run all integration tests with a specific worker -# supported workers: oci, oci-rootless, containerd, containerd-1.0 -make test TESTPKGS=./client TESTFLAGS="--run //worker=containerd -v" -``` - -Updating vendored dependencies: - -```bash -# update vendor.conf -make vendor -``` - -Validating your updates before submission: - -```bash -make validate-all -``` diff --git a/vendor/github.com/moby/buildkit/api/services/control/control.pb.go b/vendor/github.com/moby/buildkit/api/services/control/control.pb.go deleted file mode 100644 index a5bfa63dce3c..000000000000 --- a/vendor/github.com/moby/buildkit/api/services/control/control.pb.go +++ /dev/null @@ -1,4915 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: control.proto - -/* - Package moby_buildkit_v1 is a generated protocol buffer package. - - It is generated from these files: - control.proto - - It has these top-level messages: - PruneRequest - DiskUsageRequest - DiskUsageResponse - UsageRecord - SolveRequest - CacheOptions - SolveResponse - StatusRequest - StatusResponse - Vertex - VertexStatus - VertexLog - BytesMessage - ListWorkersRequest - ListWorkersResponse -*/ -package moby_buildkit_v1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" -import _ "github.com/gogo/protobuf/gogoproto" -import _ "github.com/golang/protobuf/ptypes/timestamp" -import pb "github.com/moby/buildkit/solver/pb" -import moby_buildkit_v1_types "github.com/moby/buildkit/api/types" - -import time "time" -import github_com_moby_buildkit_util_entitlements "github.com/moby/buildkit/util/entitlements" -import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest" - -import context "golang.org/x/net/context" -import grpc "google.golang.org/grpc" - -import types "github.com/gogo/protobuf/types" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf -var _ = time.Kitchen - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -type PruneRequest struct { - Filter []string `protobuf:"bytes,1,rep,name=filter" json:"filter,omitempty"` - All bool `protobuf:"varint,2,opt,name=all,proto3" json:"all,omitempty"` - KeepDuration int64 `protobuf:"varint,3,opt,name=keepDuration,proto3" json:"keepDuration,omitempty"` - KeepBytes int64 `protobuf:"varint,4,opt,name=keepBytes,proto3" json:"keepBytes,omitempty"` -} - -func (m *PruneRequest) Reset() { *m = PruneRequest{} } -func (m *PruneRequest) String() string { return proto.CompactTextString(m) } -func (*PruneRequest) ProtoMessage() {} -func (*PruneRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{0} } - -func (m *PruneRequest) GetFilter() []string { - if m != nil { - return m.Filter - } - return nil -} - -func (m *PruneRequest) GetAll() bool { - if m != nil { - return m.All - } - return false -} - -func (m *PruneRequest) GetKeepDuration() int64 { - if m != nil { - return m.KeepDuration - } - return 0 -} - -func (m *PruneRequest) GetKeepBytes() int64 { - if m != nil { - return m.KeepBytes - } - return 0 -} - -type DiskUsageRequest struct { - Filter []string `protobuf:"bytes,1,rep,name=filter" json:"filter,omitempty"` -} - -func (m *DiskUsageRequest) Reset() { *m = DiskUsageRequest{} } -func (m *DiskUsageRequest) String() string { return proto.CompactTextString(m) } -func (*DiskUsageRequest) ProtoMessage() {} -func (*DiskUsageRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{1} } - -func (m *DiskUsageRequest) GetFilter() []string { - if m != nil { - return m.Filter - } - return nil -} - -type DiskUsageResponse struct { - Record []*UsageRecord `protobuf:"bytes,1,rep,name=record" json:"record,omitempty"` -} - -func (m *DiskUsageResponse) Reset() { *m = DiskUsageResponse{} } -func (m *DiskUsageResponse) String() string { return proto.CompactTextString(m) } -func (*DiskUsageResponse) ProtoMessage() {} -func (*DiskUsageResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{2} } - -func (m *DiskUsageResponse) GetRecord() []*UsageRecord { - if m != nil { - return m.Record - } - return nil -} - -type UsageRecord struct { - ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` - Mutable bool `protobuf:"varint,2,opt,name=Mutable,proto3" json:"Mutable,omitempty"` - InUse bool `protobuf:"varint,3,opt,name=InUse,proto3" json:"InUse,omitempty"` - Size_ int64 `protobuf:"varint,4,opt,name=Size,proto3" json:"Size,omitempty"` - Parent string `protobuf:"bytes,5,opt,name=Parent,proto3" json:"Parent,omitempty"` - CreatedAt time.Time `protobuf:"bytes,6,opt,name=CreatedAt,stdtime" json:"CreatedAt"` - LastUsedAt *time.Time `protobuf:"bytes,7,opt,name=LastUsedAt,stdtime" json:"LastUsedAt,omitempty"` - UsageCount int64 `protobuf:"varint,8,opt,name=UsageCount,proto3" json:"UsageCount,omitempty"` - Description string `protobuf:"bytes,9,opt,name=Description,proto3" json:"Description,omitempty"` - RecordType string `protobuf:"bytes,10,opt,name=RecordType,proto3" json:"RecordType,omitempty"` - Shared bool `protobuf:"varint,11,opt,name=Shared,proto3" json:"Shared,omitempty"` -} - -func (m *UsageRecord) Reset() { *m = UsageRecord{} } -func (m *UsageRecord) String() string { return proto.CompactTextString(m) } -func (*UsageRecord) ProtoMessage() {} -func (*UsageRecord) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{3} } - -func (m *UsageRecord) GetID() string { - if m != nil { - return m.ID - } - return "" -} - -func (m *UsageRecord) GetMutable() bool { - if m != nil { - return m.Mutable - } - return false -} - -func (m *UsageRecord) GetInUse() bool { - if m != nil { - return m.InUse - } - return false -} - -func (m *UsageRecord) GetSize_() int64 { - if m != nil { - return m.Size_ - } - return 0 -} - -func (m *UsageRecord) GetParent() string { - if m != nil { - return m.Parent - } - return "" -} - -func (m *UsageRecord) GetCreatedAt() time.Time { - if m != nil { - return m.CreatedAt - } - return time.Time{} -} - -func (m *UsageRecord) GetLastUsedAt() *time.Time { - if m != nil { - return m.LastUsedAt - } - return nil -} - -func (m *UsageRecord) GetUsageCount() int64 { - if m != nil { - return m.UsageCount - } - return 0 -} - -func (m *UsageRecord) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *UsageRecord) GetRecordType() string { - if m != nil { - return m.RecordType - } - return "" -} - -func (m *UsageRecord) GetShared() bool { - if m != nil { - return m.Shared - } - return false -} - -type SolveRequest struct { - Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` - Definition *pb.Definition `protobuf:"bytes,2,opt,name=Definition" json:"Definition,omitempty"` - Exporter string `protobuf:"bytes,3,opt,name=Exporter,proto3" json:"Exporter,omitempty"` - ExporterAttrs map[string]string `protobuf:"bytes,4,rep,name=ExporterAttrs" json:"ExporterAttrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - Session string `protobuf:"bytes,5,opt,name=Session,proto3" json:"Session,omitempty"` - Frontend string `protobuf:"bytes,6,opt,name=Frontend,proto3" json:"Frontend,omitempty"` - FrontendAttrs map[string]string `protobuf:"bytes,7,rep,name=FrontendAttrs" json:"FrontendAttrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - Cache CacheOptions `protobuf:"bytes,8,opt,name=Cache" json:"Cache"` - Entitlements []github_com_moby_buildkit_util_entitlements.Entitlement `protobuf:"bytes,9,rep,name=Entitlements,customtype=github.com/moby/buildkit/util/entitlements.Entitlement" json:"Entitlements,omitempty"` -} - -func (m *SolveRequest) Reset() { *m = SolveRequest{} } -func (m *SolveRequest) String() string { return proto.CompactTextString(m) } -func (*SolveRequest) ProtoMessage() {} -func (*SolveRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{4} } - -func (m *SolveRequest) GetRef() string { - if m != nil { - return m.Ref - } - return "" -} - -func (m *SolveRequest) GetDefinition() *pb.Definition { - if m != nil { - return m.Definition - } - return nil -} - -func (m *SolveRequest) GetExporter() string { - if m != nil { - return m.Exporter - } - return "" -} - -func (m *SolveRequest) GetExporterAttrs() map[string]string { - if m != nil { - return m.ExporterAttrs - } - return nil -} - -func (m *SolveRequest) GetSession() string { - if m != nil { - return m.Session - } - return "" -} - -func (m *SolveRequest) GetFrontend() string { - if m != nil { - return m.Frontend - } - return "" -} - -func (m *SolveRequest) GetFrontendAttrs() map[string]string { - if m != nil { - return m.FrontendAttrs - } - return nil -} - -func (m *SolveRequest) GetCache() CacheOptions { - if m != nil { - return m.Cache - } - return CacheOptions{} -} - -type CacheOptions struct { - ExportRef string `protobuf:"bytes,1,opt,name=ExportRef,proto3" json:"ExportRef,omitempty"` - ImportRefs []string `protobuf:"bytes,2,rep,name=ImportRefs" json:"ImportRefs,omitempty"` - ExportAttrs map[string]string `protobuf:"bytes,3,rep,name=ExportAttrs" json:"ExportAttrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (m *CacheOptions) Reset() { *m = CacheOptions{} } -func (m *CacheOptions) String() string { return proto.CompactTextString(m) } -func (*CacheOptions) ProtoMessage() {} -func (*CacheOptions) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{5} } - -func (m *CacheOptions) GetExportRef() string { - if m != nil { - return m.ExportRef - } - return "" -} - -func (m *CacheOptions) GetImportRefs() []string { - if m != nil { - return m.ImportRefs - } - return nil -} - -func (m *CacheOptions) GetExportAttrs() map[string]string { - if m != nil { - return m.ExportAttrs - } - return nil -} - -type SolveResponse struct { - ExporterResponse map[string]string `protobuf:"bytes,1,rep,name=ExporterResponse" json:"ExporterResponse,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (m *SolveResponse) Reset() { *m = SolveResponse{} } -func (m *SolveResponse) String() string { return proto.CompactTextString(m) } -func (*SolveResponse) ProtoMessage() {} -func (*SolveResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{6} } - -func (m *SolveResponse) GetExporterResponse() map[string]string { - if m != nil { - return m.ExporterResponse - } - return nil -} - -type StatusRequest struct { - Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` -} - -func (m *StatusRequest) Reset() { *m = StatusRequest{} } -func (m *StatusRequest) String() string { return proto.CompactTextString(m) } -func (*StatusRequest) ProtoMessage() {} -func (*StatusRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{7} } - -func (m *StatusRequest) GetRef() string { - if m != nil { - return m.Ref - } - return "" -} - -type StatusResponse struct { - Vertexes []*Vertex `protobuf:"bytes,1,rep,name=vertexes" json:"vertexes,omitempty"` - Statuses []*VertexStatus `protobuf:"bytes,2,rep,name=statuses" json:"statuses,omitempty"` - Logs []*VertexLog `protobuf:"bytes,3,rep,name=logs" json:"logs,omitempty"` -} - -func (m *StatusResponse) Reset() { *m = StatusResponse{} } -func (m *StatusResponse) String() string { return proto.CompactTextString(m) } -func (*StatusResponse) ProtoMessage() {} -func (*StatusResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{8} } - -func (m *StatusResponse) GetVertexes() []*Vertex { - if m != nil { - return m.Vertexes - } - return nil -} - -func (m *StatusResponse) GetStatuses() []*VertexStatus { - if m != nil { - return m.Statuses - } - return nil -} - -func (m *StatusResponse) GetLogs() []*VertexLog { - if m != nil { - return m.Logs - } - return nil -} - -type Vertex struct { - Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` - Inputs []github_com_opencontainers_go_digest.Digest `protobuf:"bytes,2,rep,name=inputs,customtype=github.com/opencontainers/go-digest.Digest" json:"inputs"` - Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` - Cached bool `protobuf:"varint,4,opt,name=cached,proto3" json:"cached,omitempty"` - Started *time.Time `protobuf:"bytes,5,opt,name=started,stdtime" json:"started,omitempty"` - Completed *time.Time `protobuf:"bytes,6,opt,name=completed,stdtime" json:"completed,omitempty"` - Error string `protobuf:"bytes,7,opt,name=error,proto3" json:"error,omitempty"` -} - -func (m *Vertex) Reset() { *m = Vertex{} } -func (m *Vertex) String() string { return proto.CompactTextString(m) } -func (*Vertex) ProtoMessage() {} -func (*Vertex) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{9} } - -func (m *Vertex) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Vertex) GetCached() bool { - if m != nil { - return m.Cached - } - return false -} - -func (m *Vertex) GetStarted() *time.Time { - if m != nil { - return m.Started - } - return nil -} - -func (m *Vertex) GetCompleted() *time.Time { - if m != nil { - return m.Completed - } - return nil -} - -func (m *Vertex) GetError() string { - if m != nil { - return m.Error - } - return "" -} - -type VertexStatus struct { - ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` - Vertex github_com_opencontainers_go_digest.Digest `protobuf:"bytes,2,opt,name=vertex,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"vertex"` - Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` - Current int64 `protobuf:"varint,4,opt,name=current,proto3" json:"current,omitempty"` - Total int64 `protobuf:"varint,5,opt,name=total,proto3" json:"total,omitempty"` - // TODO: add started, completed - Timestamp time.Time `protobuf:"bytes,6,opt,name=timestamp,stdtime" json:"timestamp"` - Started *time.Time `protobuf:"bytes,7,opt,name=started,stdtime" json:"started,omitempty"` - Completed *time.Time `protobuf:"bytes,8,opt,name=completed,stdtime" json:"completed,omitempty"` -} - -func (m *VertexStatus) Reset() { *m = VertexStatus{} } -func (m *VertexStatus) String() string { return proto.CompactTextString(m) } -func (*VertexStatus) ProtoMessage() {} -func (*VertexStatus) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{10} } - -func (m *VertexStatus) GetID() string { - if m != nil { - return m.ID - } - return "" -} - -func (m *VertexStatus) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *VertexStatus) GetCurrent() int64 { - if m != nil { - return m.Current - } - return 0 -} - -func (m *VertexStatus) GetTotal() int64 { - if m != nil { - return m.Total - } - return 0 -} - -func (m *VertexStatus) GetTimestamp() time.Time { - if m != nil { - return m.Timestamp - } - return time.Time{} -} - -func (m *VertexStatus) GetStarted() *time.Time { - if m != nil { - return m.Started - } - return nil -} - -func (m *VertexStatus) GetCompleted() *time.Time { - if m != nil { - return m.Completed - } - return nil -} - -type VertexLog struct { - Vertex github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=vertex,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"vertex"` - Timestamp time.Time `protobuf:"bytes,2,opt,name=timestamp,stdtime" json:"timestamp"` - Stream int64 `protobuf:"varint,3,opt,name=stream,proto3" json:"stream,omitempty"` - Msg []byte `protobuf:"bytes,4,opt,name=msg,proto3" json:"msg,omitempty"` -} - -func (m *VertexLog) Reset() { *m = VertexLog{} } -func (m *VertexLog) String() string { return proto.CompactTextString(m) } -func (*VertexLog) ProtoMessage() {} -func (*VertexLog) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{11} } - -func (m *VertexLog) GetTimestamp() time.Time { - if m != nil { - return m.Timestamp - } - return time.Time{} -} - -func (m *VertexLog) GetStream() int64 { - if m != nil { - return m.Stream - } - return 0 -} - -func (m *VertexLog) GetMsg() []byte { - if m != nil { - return m.Msg - } - return nil -} - -type BytesMessage struct { - Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` -} - -func (m *BytesMessage) Reset() { *m = BytesMessage{} } -func (m *BytesMessage) String() string { return proto.CompactTextString(m) } -func (*BytesMessage) ProtoMessage() {} -func (*BytesMessage) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{12} } - -func (m *BytesMessage) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -type ListWorkersRequest struct { - Filter []string `protobuf:"bytes,1,rep,name=filter" json:"filter,omitempty"` -} - -func (m *ListWorkersRequest) Reset() { *m = ListWorkersRequest{} } -func (m *ListWorkersRequest) String() string { return proto.CompactTextString(m) } -func (*ListWorkersRequest) ProtoMessage() {} -func (*ListWorkersRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{13} } - -func (m *ListWorkersRequest) GetFilter() []string { - if m != nil { - return m.Filter - } - return nil -} - -type ListWorkersResponse struct { - Record []*moby_buildkit_v1_types.WorkerRecord `protobuf:"bytes,1,rep,name=record" json:"record,omitempty"` -} - -func (m *ListWorkersResponse) Reset() { *m = ListWorkersResponse{} } -func (m *ListWorkersResponse) String() string { return proto.CompactTextString(m) } -func (*ListWorkersResponse) ProtoMessage() {} -func (*ListWorkersResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{14} } - -func (m *ListWorkersResponse) GetRecord() []*moby_buildkit_v1_types.WorkerRecord { - if m != nil { - return m.Record - } - return nil -} - -func init() { - proto.RegisterType((*PruneRequest)(nil), "moby.buildkit.v1.PruneRequest") - proto.RegisterType((*DiskUsageRequest)(nil), "moby.buildkit.v1.DiskUsageRequest") - proto.RegisterType((*DiskUsageResponse)(nil), "moby.buildkit.v1.DiskUsageResponse") - proto.RegisterType((*UsageRecord)(nil), "moby.buildkit.v1.UsageRecord") - proto.RegisterType((*SolveRequest)(nil), "moby.buildkit.v1.SolveRequest") - proto.RegisterType((*CacheOptions)(nil), "moby.buildkit.v1.CacheOptions") - proto.RegisterType((*SolveResponse)(nil), "moby.buildkit.v1.SolveResponse") - proto.RegisterType((*StatusRequest)(nil), "moby.buildkit.v1.StatusRequest") - proto.RegisterType((*StatusResponse)(nil), "moby.buildkit.v1.StatusResponse") - proto.RegisterType((*Vertex)(nil), "moby.buildkit.v1.Vertex") - proto.RegisterType((*VertexStatus)(nil), "moby.buildkit.v1.VertexStatus") - proto.RegisterType((*VertexLog)(nil), "moby.buildkit.v1.VertexLog") - proto.RegisterType((*BytesMessage)(nil), "moby.buildkit.v1.BytesMessage") - proto.RegisterType((*ListWorkersRequest)(nil), "moby.buildkit.v1.ListWorkersRequest") - proto.RegisterType((*ListWorkersResponse)(nil), "moby.buildkit.v1.ListWorkersResponse") -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for Control service - -type ControlClient interface { - DiskUsage(ctx context.Context, in *DiskUsageRequest, opts ...grpc.CallOption) (*DiskUsageResponse, error) - Prune(ctx context.Context, in *PruneRequest, opts ...grpc.CallOption) (Control_PruneClient, error) - Solve(ctx context.Context, in *SolveRequest, opts ...grpc.CallOption) (*SolveResponse, error) - Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (Control_StatusClient, error) - Session(ctx context.Context, opts ...grpc.CallOption) (Control_SessionClient, error) - ListWorkers(ctx context.Context, in *ListWorkersRequest, opts ...grpc.CallOption) (*ListWorkersResponse, error) -} - -type controlClient struct { - cc *grpc.ClientConn -} - -func NewControlClient(cc *grpc.ClientConn) ControlClient { - return &controlClient{cc} -} - -func (c *controlClient) DiskUsage(ctx context.Context, in *DiskUsageRequest, opts ...grpc.CallOption) (*DiskUsageResponse, error) { - out := new(DiskUsageResponse) - err := grpc.Invoke(ctx, "/moby.buildkit.v1.Control/DiskUsage", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controlClient) Prune(ctx context.Context, in *PruneRequest, opts ...grpc.CallOption) (Control_PruneClient, error) { - stream, err := grpc.NewClientStream(ctx, &_Control_serviceDesc.Streams[0], c.cc, "/moby.buildkit.v1.Control/Prune", opts...) - if err != nil { - return nil, err - } - x := &controlPruneClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Control_PruneClient interface { - Recv() (*UsageRecord, error) - grpc.ClientStream -} - -type controlPruneClient struct { - grpc.ClientStream -} - -func (x *controlPruneClient) Recv() (*UsageRecord, error) { - m := new(UsageRecord) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *controlClient) Solve(ctx context.Context, in *SolveRequest, opts ...grpc.CallOption) (*SolveResponse, error) { - out := new(SolveResponse) - err := grpc.Invoke(ctx, "/moby.buildkit.v1.Control/Solve", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controlClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (Control_StatusClient, error) { - stream, err := grpc.NewClientStream(ctx, &_Control_serviceDesc.Streams[1], c.cc, "/moby.buildkit.v1.Control/Status", opts...) - if err != nil { - return nil, err - } - x := &controlStatusClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Control_StatusClient interface { - Recv() (*StatusResponse, error) - grpc.ClientStream -} - -type controlStatusClient struct { - grpc.ClientStream -} - -func (x *controlStatusClient) Recv() (*StatusResponse, error) { - m := new(StatusResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *controlClient) Session(ctx context.Context, opts ...grpc.CallOption) (Control_SessionClient, error) { - stream, err := grpc.NewClientStream(ctx, &_Control_serviceDesc.Streams[2], c.cc, "/moby.buildkit.v1.Control/Session", opts...) - if err != nil { - return nil, err - } - x := &controlSessionClient{stream} - return x, nil -} - -type Control_SessionClient interface { - Send(*BytesMessage) error - Recv() (*BytesMessage, error) - grpc.ClientStream -} - -type controlSessionClient struct { - grpc.ClientStream -} - -func (x *controlSessionClient) Send(m *BytesMessage) error { - return x.ClientStream.SendMsg(m) -} - -func (x *controlSessionClient) Recv() (*BytesMessage, error) { - m := new(BytesMessage) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *controlClient) ListWorkers(ctx context.Context, in *ListWorkersRequest, opts ...grpc.CallOption) (*ListWorkersResponse, error) { - out := new(ListWorkersResponse) - err := grpc.Invoke(ctx, "/moby.buildkit.v1.Control/ListWorkers", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for Control service - -type ControlServer interface { - DiskUsage(context.Context, *DiskUsageRequest) (*DiskUsageResponse, error) - Prune(*PruneRequest, Control_PruneServer) error - Solve(context.Context, *SolveRequest) (*SolveResponse, error) - Status(*StatusRequest, Control_StatusServer) error - Session(Control_SessionServer) error - ListWorkers(context.Context, *ListWorkersRequest) (*ListWorkersResponse, error) -} - -func RegisterControlServer(s *grpc.Server, srv ControlServer) { - s.RegisterService(&_Control_serviceDesc, srv) -} - -func _Control_DiskUsage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DiskUsageRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControlServer).DiskUsage(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/moby.buildkit.v1.Control/DiskUsage", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControlServer).DiskUsage(ctx, req.(*DiskUsageRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Control_Prune_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(PruneRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(ControlServer).Prune(m, &controlPruneServer{stream}) -} - -type Control_PruneServer interface { - Send(*UsageRecord) error - grpc.ServerStream -} - -type controlPruneServer struct { - grpc.ServerStream -} - -func (x *controlPruneServer) Send(m *UsageRecord) error { - return x.ServerStream.SendMsg(m) -} - -func _Control_Solve_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SolveRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControlServer).Solve(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/moby.buildkit.v1.Control/Solve", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControlServer).Solve(ctx, req.(*SolveRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Control_Status_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(StatusRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(ControlServer).Status(m, &controlStatusServer{stream}) -} - -type Control_StatusServer interface { - Send(*StatusResponse) error - grpc.ServerStream -} - -type controlStatusServer struct { - grpc.ServerStream -} - -func (x *controlStatusServer) Send(m *StatusResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _Control_Session_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(ControlServer).Session(&controlSessionServer{stream}) -} - -type Control_SessionServer interface { - Send(*BytesMessage) error - Recv() (*BytesMessage, error) - grpc.ServerStream -} - -type controlSessionServer struct { - grpc.ServerStream -} - -func (x *controlSessionServer) Send(m *BytesMessage) error { - return x.ServerStream.SendMsg(m) -} - -func (x *controlSessionServer) Recv() (*BytesMessage, error) { - m := new(BytesMessage) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _Control_ListWorkers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListWorkersRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControlServer).ListWorkers(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/moby.buildkit.v1.Control/ListWorkers", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControlServer).ListWorkers(ctx, req.(*ListWorkersRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Control_serviceDesc = grpc.ServiceDesc{ - ServiceName: "moby.buildkit.v1.Control", - HandlerType: (*ControlServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "DiskUsage", - Handler: _Control_DiskUsage_Handler, - }, - { - MethodName: "Solve", - Handler: _Control_Solve_Handler, - }, - { - MethodName: "ListWorkers", - Handler: _Control_ListWorkers_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "Prune", - Handler: _Control_Prune_Handler, - ServerStreams: true, - }, - { - StreamName: "Status", - Handler: _Control_Status_Handler, - ServerStreams: true, - }, - { - StreamName: "Session", - Handler: _Control_Session_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "control.proto", -} - -func (m *PruneRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PruneRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Filter) > 0 { - for _, s := range m.Filter { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if m.All { - dAtA[i] = 0x10 - i++ - if m.All { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.KeepDuration != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintControl(dAtA, i, uint64(m.KeepDuration)) - } - if m.KeepBytes != 0 { - dAtA[i] = 0x20 - i++ - i = encodeVarintControl(dAtA, i, uint64(m.KeepBytes)) - } - return i, nil -} - -func (m *DiskUsageRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DiskUsageRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Filter) > 0 { - for _, s := range m.Filter { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *DiskUsageResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DiskUsageResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Record) > 0 { - for _, msg := range m.Record { - dAtA[i] = 0xa - i++ - i = encodeVarintControl(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *UsageRecord) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UsageRecord) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.ID) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintControl(dAtA, i, uint64(len(m.ID))) - i += copy(dAtA[i:], m.ID) - } - if m.Mutable { - dAtA[i] = 0x10 - i++ - if m.Mutable { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.InUse { - dAtA[i] = 0x18 - i++ - if m.InUse { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.Size_ != 0 { - dAtA[i] = 0x20 - i++ - i = encodeVarintControl(dAtA, i, uint64(m.Size_)) - } - if len(m.Parent) > 0 { - dAtA[i] = 0x2a - i++ - i = encodeVarintControl(dAtA, i, uint64(len(m.Parent))) - i += copy(dAtA[i:], m.Parent) - } - dAtA[i] = 0x32 - i++ - i = encodeVarintControl(dAtA, i, uint64(types.SizeOfStdTime(m.CreatedAt))) - n1, err := types.StdTimeMarshalTo(m.CreatedAt, dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - if m.LastUsedAt != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintControl(dAtA, i, uint64(types.SizeOfStdTime(*m.LastUsedAt))) - n2, err := types.StdTimeMarshalTo(*m.LastUsedAt, dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - } - if m.UsageCount != 0 { - dAtA[i] = 0x40 - i++ - i = encodeVarintControl(dAtA, i, uint64(m.UsageCount)) - } - if len(m.Description) > 0 { - dAtA[i] = 0x4a - i++ - i = encodeVarintControl(dAtA, i, uint64(len(m.Description))) - i += copy(dAtA[i:], m.Description) - } - if len(m.RecordType) > 0 { - dAtA[i] = 0x52 - i++ - i = encodeVarintControl(dAtA, i, uint64(len(m.RecordType))) - i += copy(dAtA[i:], m.RecordType) - } - if m.Shared { - dAtA[i] = 0x58 - i++ - if m.Shared { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - return i, nil -} - -func (m *SolveRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SolveRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Ref) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintControl(dAtA, i, uint64(len(m.Ref))) - i += copy(dAtA[i:], m.Ref) - } - if m.Definition != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintControl(dAtA, i, uint64(m.Definition.Size())) - n3, err := m.Definition.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - } - if len(m.Exporter) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintControl(dAtA, i, uint64(len(m.Exporter))) - i += copy(dAtA[i:], m.Exporter) - } - if len(m.ExporterAttrs) > 0 { - for k, _ := range m.ExporterAttrs { - dAtA[i] = 0x22 - i++ - v := m.ExporterAttrs[k] - mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) - i = encodeVarintControl(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintControl(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintControl(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if len(m.Session) > 0 { - dAtA[i] = 0x2a - i++ - i = encodeVarintControl(dAtA, i, uint64(len(m.Session))) - i += copy(dAtA[i:], m.Session) - } - if len(m.Frontend) > 0 { - dAtA[i] = 0x32 - i++ - i = encodeVarintControl(dAtA, i, uint64(len(m.Frontend))) - i += copy(dAtA[i:], m.Frontend) - } - if len(m.FrontendAttrs) > 0 { - for k, _ := range m.FrontendAttrs { - dAtA[i] = 0x3a - i++ - v := m.FrontendAttrs[k] - mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) - i = encodeVarintControl(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintControl(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintControl(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - dAtA[i] = 0x42 - i++ - i = encodeVarintControl(dAtA, i, uint64(m.Cache.Size())) - n4, err := m.Cache.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - if len(m.Entitlements) > 0 { - for _, s := range m.Entitlements { - dAtA[i] = 0x4a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *CacheOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CacheOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.ExportRef) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintControl(dAtA, i, uint64(len(m.ExportRef))) - i += copy(dAtA[i:], m.ExportRef) - } - if len(m.ImportRefs) > 0 { - for _, s := range m.ImportRefs { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.ExportAttrs) > 0 { - for k, _ := range m.ExportAttrs { - dAtA[i] = 0x1a - i++ - v := m.ExportAttrs[k] - mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) - i = encodeVarintControl(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintControl(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintControl(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - return i, nil -} - -func (m *SolveResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SolveResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.ExporterResponse) > 0 { - for k, _ := range m.ExporterResponse { - dAtA[i] = 0xa - i++ - v := m.ExporterResponse[k] - mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) - i = encodeVarintControl(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintControl(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintControl(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - return i, nil -} - -func (m *StatusRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatusRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Ref) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintControl(dAtA, i, uint64(len(m.Ref))) - i += copy(dAtA[i:], m.Ref) - } - return i, nil -} - -func (m *StatusResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Vertexes) > 0 { - for _, msg := range m.Vertexes { - dAtA[i] = 0xa - i++ - i = encodeVarintControl(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Statuses) > 0 { - for _, msg := range m.Statuses { - dAtA[i] = 0x12 - i++ - i = encodeVarintControl(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Logs) > 0 { - for _, msg := range m.Logs { - dAtA[i] = 0x1a - i++ - i = encodeVarintControl(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *Vertex) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Vertex) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Digest) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintControl(dAtA, i, uint64(len(m.Digest))) - i += copy(dAtA[i:], m.Digest) - } - if len(m.Inputs) > 0 { - for _, s := range m.Inputs { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.Name) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintControl(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if m.Cached { - dAtA[i] = 0x20 - i++ - if m.Cached { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.Started != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintControl(dAtA, i, uint64(types.SizeOfStdTime(*m.Started))) - n5, err := types.StdTimeMarshalTo(*m.Started, dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - } - if m.Completed != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintControl(dAtA, i, uint64(types.SizeOfStdTime(*m.Completed))) - n6, err := types.StdTimeMarshalTo(*m.Completed, dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - } - if len(m.Error) > 0 { - dAtA[i] = 0x3a - i++ - i = encodeVarintControl(dAtA, i, uint64(len(m.Error))) - i += copy(dAtA[i:], m.Error) - } - return i, nil -} - -func (m *VertexStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VertexStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.ID) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintControl(dAtA, i, uint64(len(m.ID))) - i += copy(dAtA[i:], m.ID) - } - if len(m.Vertex) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintControl(dAtA, i, uint64(len(m.Vertex))) - i += copy(dAtA[i:], m.Vertex) - } - if len(m.Name) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintControl(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if m.Current != 0 { - dAtA[i] = 0x20 - i++ - i = encodeVarintControl(dAtA, i, uint64(m.Current)) - } - if m.Total != 0 { - dAtA[i] = 0x28 - i++ - i = encodeVarintControl(dAtA, i, uint64(m.Total)) - } - dAtA[i] = 0x32 - i++ - i = encodeVarintControl(dAtA, i, uint64(types.SizeOfStdTime(m.Timestamp))) - n7, err := types.StdTimeMarshalTo(m.Timestamp, dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - if m.Started != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintControl(dAtA, i, uint64(types.SizeOfStdTime(*m.Started))) - n8, err := types.StdTimeMarshalTo(*m.Started, dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - } - if m.Completed != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintControl(dAtA, i, uint64(types.SizeOfStdTime(*m.Completed))) - n9, err := types.StdTimeMarshalTo(*m.Completed, dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - } - return i, nil -} - -func (m *VertexLog) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VertexLog) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Vertex) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintControl(dAtA, i, uint64(len(m.Vertex))) - i += copy(dAtA[i:], m.Vertex) - } - dAtA[i] = 0x12 - i++ - i = encodeVarintControl(dAtA, i, uint64(types.SizeOfStdTime(m.Timestamp))) - n10, err := types.StdTimeMarshalTo(m.Timestamp, dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - if m.Stream != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintControl(dAtA, i, uint64(m.Stream)) - } - if len(m.Msg) > 0 { - dAtA[i] = 0x22 - i++ - i = encodeVarintControl(dAtA, i, uint64(len(m.Msg))) - i += copy(dAtA[i:], m.Msg) - } - return i, nil -} - -func (m *BytesMessage) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *BytesMessage) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Data) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintControl(dAtA, i, uint64(len(m.Data))) - i += copy(dAtA[i:], m.Data) - } - return i, nil -} - -func (m *ListWorkersRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListWorkersRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Filter) > 0 { - for _, s := range m.Filter { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *ListWorkersResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListWorkersResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Record) > 0 { - for _, msg := range m.Record { - dAtA[i] = 0xa - i++ - i = encodeVarintControl(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func encodeVarintControl(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *PruneRequest) Size() (n int) { - var l int - _ = l - if len(m.Filter) > 0 { - for _, s := range m.Filter { - l = len(s) - n += 1 + l + sovControl(uint64(l)) - } - } - if m.All { - n += 2 - } - if m.KeepDuration != 0 { - n += 1 + sovControl(uint64(m.KeepDuration)) - } - if m.KeepBytes != 0 { - n += 1 + sovControl(uint64(m.KeepBytes)) - } - return n -} - -func (m *DiskUsageRequest) Size() (n int) { - var l int - _ = l - if len(m.Filter) > 0 { - for _, s := range m.Filter { - l = len(s) - n += 1 + l + sovControl(uint64(l)) - } - } - return n -} - -func (m *DiskUsageResponse) Size() (n int) { - var l int - _ = l - if len(m.Record) > 0 { - for _, e := range m.Record { - l = e.Size() - n += 1 + l + sovControl(uint64(l)) - } - } - return n -} - -func (m *UsageRecord) Size() (n int) { - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - if m.Mutable { - n += 2 - } - if m.InUse { - n += 2 - } - if m.Size_ != 0 { - n += 1 + sovControl(uint64(m.Size_)) - } - l = len(m.Parent) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - l = types.SizeOfStdTime(m.CreatedAt) - n += 1 + l + sovControl(uint64(l)) - if m.LastUsedAt != nil { - l = types.SizeOfStdTime(*m.LastUsedAt) - n += 1 + l + sovControl(uint64(l)) - } - if m.UsageCount != 0 { - n += 1 + sovControl(uint64(m.UsageCount)) - } - l = len(m.Description) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - l = len(m.RecordType) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - if m.Shared { - n += 2 - } - return n -} - -func (m *SolveRequest) Size() (n int) { - var l int - _ = l - l = len(m.Ref) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - if m.Definition != nil { - l = m.Definition.Size() - n += 1 + l + sovControl(uint64(l)) - } - l = len(m.Exporter) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - if len(m.ExporterAttrs) > 0 { - for k, v := range m.ExporterAttrs { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) - n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) - } - } - l = len(m.Session) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - l = len(m.Frontend) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - if len(m.FrontendAttrs) > 0 { - for k, v := range m.FrontendAttrs { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) - n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) - } - } - l = m.Cache.Size() - n += 1 + l + sovControl(uint64(l)) - if len(m.Entitlements) > 0 { - for _, s := range m.Entitlements { - l = len(s) - n += 1 + l + sovControl(uint64(l)) - } - } - return n -} - -func (m *CacheOptions) Size() (n int) { - var l int - _ = l - l = len(m.ExportRef) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - if len(m.ImportRefs) > 0 { - for _, s := range m.ImportRefs { - l = len(s) - n += 1 + l + sovControl(uint64(l)) - } - } - if len(m.ExportAttrs) > 0 { - for k, v := range m.ExportAttrs { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) - n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) - } - } - return n -} - -func (m *SolveResponse) Size() (n int) { - var l int - _ = l - if len(m.ExporterResponse) > 0 { - for k, v := range m.ExporterResponse { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) - n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) - } - } - return n -} - -func (m *StatusRequest) Size() (n int) { - var l int - _ = l - l = len(m.Ref) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - return n -} - -func (m *StatusResponse) Size() (n int) { - var l int - _ = l - if len(m.Vertexes) > 0 { - for _, e := range m.Vertexes { - l = e.Size() - n += 1 + l + sovControl(uint64(l)) - } - } - if len(m.Statuses) > 0 { - for _, e := range m.Statuses { - l = e.Size() - n += 1 + l + sovControl(uint64(l)) - } - } - if len(m.Logs) > 0 { - for _, e := range m.Logs { - l = e.Size() - n += 1 + l + sovControl(uint64(l)) - } - } - return n -} - -func (m *Vertex) Size() (n int) { - var l int - _ = l - l = len(m.Digest) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - if len(m.Inputs) > 0 { - for _, s := range m.Inputs { - l = len(s) - n += 1 + l + sovControl(uint64(l)) - } - } - l = len(m.Name) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - if m.Cached { - n += 2 - } - if m.Started != nil { - l = types.SizeOfStdTime(*m.Started) - n += 1 + l + sovControl(uint64(l)) - } - if m.Completed != nil { - l = types.SizeOfStdTime(*m.Completed) - n += 1 + l + sovControl(uint64(l)) - } - l = len(m.Error) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - return n -} - -func (m *VertexStatus) Size() (n int) { - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - l = len(m.Vertex) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - l = len(m.Name) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - if m.Current != 0 { - n += 1 + sovControl(uint64(m.Current)) - } - if m.Total != 0 { - n += 1 + sovControl(uint64(m.Total)) - } - l = types.SizeOfStdTime(m.Timestamp) - n += 1 + l + sovControl(uint64(l)) - if m.Started != nil { - l = types.SizeOfStdTime(*m.Started) - n += 1 + l + sovControl(uint64(l)) - } - if m.Completed != nil { - l = types.SizeOfStdTime(*m.Completed) - n += 1 + l + sovControl(uint64(l)) - } - return n -} - -func (m *VertexLog) Size() (n int) { - var l int - _ = l - l = len(m.Vertex) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - l = types.SizeOfStdTime(m.Timestamp) - n += 1 + l + sovControl(uint64(l)) - if m.Stream != 0 { - n += 1 + sovControl(uint64(m.Stream)) - } - l = len(m.Msg) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - return n -} - -func (m *BytesMessage) Size() (n int) { - var l int - _ = l - l = len(m.Data) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - return n -} - -func (m *ListWorkersRequest) Size() (n int) { - var l int - _ = l - if len(m.Filter) > 0 { - for _, s := range m.Filter { - l = len(s) - n += 1 + l + sovControl(uint64(l)) - } - } - return n -} - -func (m *ListWorkersResponse) Size() (n int) { - var l int - _ = l - if len(m.Record) > 0 { - for _, e := range m.Record { - l = e.Size() - n += 1 + l + sovControl(uint64(l)) - } - } - return n -} - -func sovControl(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozControl(x uint64) (n int) { - return sovControl(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *PruneRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PruneRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PruneRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Filter = append(m.Filter, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field All", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.All = bool(v != 0) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field KeepDuration", wireType) - } - m.KeepDuration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.KeepDuration |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field KeepBytes", wireType) - } - m.KeepBytes = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.KeepBytes |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DiskUsageRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DiskUsageRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DiskUsageRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Filter = append(m.Filter, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DiskUsageResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DiskUsageResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DiskUsageResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Record", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Record = append(m.Record, &UsageRecord{}) - if err := m.Record[len(m.Record)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UsageRecord) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UsageRecord: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UsageRecord: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Mutable", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Mutable = bool(v != 0) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field InUse", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.InUse = bool(v != 0) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType) - } - m.Size_ = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Size_ |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parent", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Parent = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := types.StdTimeUnmarshal(&m.CreatedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastUsedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.LastUsedAt == nil { - m.LastUsedAt = new(time.Time) - } - if err := types.StdTimeUnmarshal(m.LastUsedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UsageCount", wireType) - } - m.UsageCount = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.UsageCount |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RecordType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RecordType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Shared", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Shared = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SolveRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SolveRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SolveRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ref = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Definition", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Definition == nil { - m.Definition = &pb.Definition{} - } - if err := m.Definition.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Exporter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Exporter = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExporterAttrs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ExporterAttrs == nil { - m.ExporterAttrs = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthControl - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthControl - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.ExporterAttrs[mapkey] = mapvalue - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Session", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Session = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Frontend", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Frontend = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FrontendAttrs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.FrontendAttrs == nil { - m.FrontendAttrs = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthControl - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthControl - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.FrontendAttrs[mapkey] = mapvalue - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cache", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Cache.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Entitlements", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Entitlements = append(m.Entitlements, github_com_moby_buildkit_util_entitlements.Entitlement(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CacheOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CacheOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CacheOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExportRef", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExportRef = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImportRefs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ImportRefs = append(m.ImportRefs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExportAttrs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ExportAttrs == nil { - m.ExportAttrs = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthControl - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthControl - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.ExportAttrs[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SolveResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SolveResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SolveResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExporterResponse", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ExporterResponse == nil { - m.ExporterResponse = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthControl - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthControl - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.ExporterResponse[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatusRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatusRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ref = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatusResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatusResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Vertexes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Vertexes = append(m.Vertexes, &Vertex{}) - if err := m.Vertexes[len(m.Vertexes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Statuses", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Statuses = append(m.Statuses, &VertexStatus{}) - if err := m.Statuses[len(m.Statuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Logs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Logs = append(m.Logs, &VertexLog{}) - if err := m.Logs[len(m.Logs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Vertex) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Vertex: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Vertex: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Inputs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Inputs = append(m.Inputs, github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Cached", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Cached = bool(v != 0) - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Started", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Started == nil { - m.Started = new(time.Time) - } - if err := types.StdTimeUnmarshal(m.Started, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Completed", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Completed == nil { - m.Completed = new(time.Time) - } - if err := types.StdTimeUnmarshal(m.Completed, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Error = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VertexStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VertexStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VertexStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Vertex", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Vertex = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) - } - m.Current = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Current |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) - } - m.Total = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Total |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Started", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Started == nil { - m.Started = new(time.Time) - } - if err := types.StdTimeUnmarshal(m.Started, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Completed", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Completed == nil { - m.Completed = new(time.Time) - } - if err := types.StdTimeUnmarshal(m.Completed, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VertexLog) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VertexLog: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VertexLog: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Vertex", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Vertex = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Stream", wireType) - } - m.Stream = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Stream |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Msg = append(m.Msg[:0], dAtA[iNdEx:postIndex]...) - if m.Msg == nil { - m.Msg = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *BytesMessage) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BytesMessage: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BytesMessage: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListWorkersRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListWorkersRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListWorkersRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Filter = append(m.Filter, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListWorkersResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListWorkersResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListWorkersResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Record", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Record = append(m.Record, &moby_buildkit_v1_types.WorkerRecord{}) - if err := m.Record[len(m.Record)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipControl(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowControl - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowControl - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowControl - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthControl - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowControl - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipControl(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthControl = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowControl = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("control.proto", fileDescriptorControl) } - -var fileDescriptorControl = []byte{ - // 1279 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x57, 0x4f, 0x6f, 0x1b, 0x45, - 0x14, 0xef, 0xda, 0x89, 0xed, 0x7d, 0x76, 0xaa, 0x30, 0x40, 0xb5, 0x5a, 0x20, 0x31, 0x0b, 0x48, - 0x56, 0xd5, 0xee, 0xb6, 0x81, 0x22, 0x14, 0xa1, 0xaa, 0x75, 0x5c, 0x44, 0xaa, 0x46, 0x94, 0x49, - 0x4b, 0x25, 0x0e, 0x48, 0x6b, 0x7b, 0xe2, 0xae, 0xb2, 0xde, 0x59, 0x66, 0x66, 0x43, 0xcd, 0x07, - 0xe0, 0xcc, 0x77, 0xe1, 0xc0, 0x27, 0x40, 0xea, 0x91, 0x73, 0x0f, 0x29, 0xea, 0x1d, 0x4e, 0x5c, - 0xb8, 0xa1, 0xf9, 0xb3, 0xce, 0x38, 0x76, 0xea, 0xa6, 0x3d, 0x65, 0xde, 0xe4, 0xf7, 0x7e, 0xfb, - 0xfe, 0xcd, 0x7b, 0xcf, 0xb0, 0x36, 0xa0, 0x99, 0x60, 0x34, 0x0d, 0x73, 0x46, 0x05, 0x45, 0xeb, - 0x63, 0xda, 0x9f, 0x84, 0xfd, 0x22, 0x49, 0x87, 0x87, 0x89, 0x08, 0x8f, 0xae, 0xfb, 0x57, 0x47, - 0x89, 0x78, 0x5c, 0xf4, 0xc3, 0x01, 0x1d, 0x47, 0x23, 0x3a, 0xa2, 0x91, 0x02, 0xf6, 0x8b, 0x03, - 0x25, 0x29, 0x41, 0x9d, 0x34, 0x81, 0xbf, 0x39, 0xa2, 0x74, 0x94, 0x92, 0x13, 0x94, 0x48, 0xc6, - 0x84, 0x8b, 0x78, 0x9c, 0x1b, 0xc0, 0x15, 0x8b, 0x4f, 0x7e, 0x2c, 0x2a, 0x3f, 0x16, 0x71, 0x9a, - 0x1e, 0x11, 0x16, 0xe5, 0xfd, 0x88, 0xe6, 0xdc, 0xa0, 0xa3, 0x33, 0xd1, 0x71, 0x9e, 0x44, 0x62, - 0x92, 0x13, 0x1e, 0xfd, 0x44, 0xd9, 0x21, 0x61, 0x5a, 0x21, 0xf8, 0xc5, 0x81, 0xd6, 0x7d, 0x56, - 0x64, 0x04, 0x93, 0x1f, 0x0b, 0xc2, 0x05, 0xba, 0x04, 0xb5, 0x83, 0x24, 0x15, 0x84, 0x79, 0x4e, - 0xbb, 0xda, 0x71, 0xb1, 0x91, 0xd0, 0x3a, 0x54, 0xe3, 0x34, 0xf5, 0x2a, 0x6d, 0xa7, 0xd3, 0xc0, - 0xf2, 0x88, 0x3a, 0xd0, 0x3a, 0x24, 0x24, 0xef, 0x15, 0x2c, 0x16, 0x09, 0xcd, 0xbc, 0x6a, 0xdb, - 0xe9, 0x54, 0xbb, 0x2b, 0x4f, 0x8f, 0x37, 0x1d, 0x3c, 0xf3, 0x1f, 0x14, 0x80, 0x2b, 0xe5, 0xee, - 0x44, 0x10, 0xee, 0xad, 0x58, 0xb0, 0x93, 0xeb, 0xe0, 0x32, 0xac, 0xf7, 0x12, 0x7e, 0xf8, 0x90, - 0xc7, 0xa3, 0x65, 0xb6, 0x04, 0x77, 0xe1, 0x2d, 0x0b, 0xcb, 0x73, 0x9a, 0x71, 0x82, 0x6e, 0x40, - 0x8d, 0x91, 0x01, 0x65, 0x43, 0x05, 0x6e, 0x6e, 0x7d, 0x10, 0x9e, 0xce, 0x4d, 0x68, 0x14, 0x24, - 0x08, 0x1b, 0x70, 0xf0, 0x5f, 0x05, 0x9a, 0xd6, 0x3d, 0xba, 0x08, 0x95, 0xdd, 0x9e, 0xe7, 0xb4, - 0x9d, 0x8e, 0x8b, 0x2b, 0xbb, 0x3d, 0xe4, 0x41, 0x7d, 0xaf, 0x10, 0x71, 0x3f, 0x25, 0xc6, 0xf7, - 0x52, 0x44, 0xef, 0xc0, 0xea, 0x6e, 0xf6, 0x90, 0x13, 0xe5, 0x78, 0x03, 0x6b, 0x01, 0x21, 0x58, - 0xd9, 0x4f, 0x7e, 0x26, 0xda, 0x4d, 0xac, 0xce, 0xd2, 0x8f, 0xfb, 0x31, 0x23, 0x99, 0xf0, 0x56, - 0x15, 0xaf, 0x91, 0x50, 0x17, 0xdc, 0x1d, 0x46, 0x62, 0x41, 0x86, 0xb7, 0x85, 0x57, 0x6b, 0x3b, - 0x9d, 0xe6, 0x96, 0x1f, 0xea, 0x82, 0x08, 0xcb, 0x82, 0x08, 0x1f, 0x94, 0x05, 0xd1, 0x6d, 0x3c, - 0x3d, 0xde, 0xbc, 0xf0, 0xeb, 0x73, 0x19, 0xb7, 0xa9, 0x1a, 0xba, 0x05, 0x70, 0x2f, 0xe6, 0xe2, - 0x21, 0x57, 0x24, 0xf5, 0xa5, 0x24, 0x2b, 0x8a, 0xc0, 0xd2, 0x41, 0x1b, 0x00, 0x2a, 0x00, 0x3b, - 0xb4, 0xc8, 0x84, 0xd7, 0x50, 0x76, 0x5b, 0x37, 0xa8, 0x0d, 0xcd, 0x1e, 0xe1, 0x03, 0x96, 0xe4, - 0x2a, 0xcd, 0xae, 0x72, 0xc1, 0xbe, 0x92, 0x0c, 0x3a, 0x7a, 0x0f, 0x26, 0x39, 0xf1, 0x40, 0x01, - 0xac, 0x1b, 0xe9, 0xff, 0xfe, 0xe3, 0x98, 0x91, 0xa1, 0xd7, 0x54, 0xa1, 0x32, 0x52, 0xf0, 0xef, - 0x0a, 0xb4, 0xf6, 0x65, 0x15, 0x97, 0x09, 0x5f, 0x87, 0x2a, 0x26, 0x07, 0x26, 0xfa, 0xf2, 0x88, - 0x42, 0x80, 0x1e, 0x39, 0x48, 0xb2, 0x44, 0x7d, 0xbb, 0xa2, 0xdc, 0xbb, 0x18, 0xe6, 0xfd, 0xf0, - 0xe4, 0x16, 0x5b, 0x08, 0xe4, 0x43, 0xe3, 0xce, 0x93, 0x9c, 0x32, 0x59, 0x34, 0x55, 0x45, 0x33, - 0x95, 0xd1, 0x23, 0x58, 0x2b, 0xcf, 0xb7, 0x85, 0x60, 0xb2, 0x14, 0x65, 0xa1, 0x5c, 0x9f, 0x2f, - 0x14, 0xdb, 0xa8, 0x70, 0x46, 0xe7, 0x4e, 0x26, 0xd8, 0x04, 0xcf, 0xf2, 0xc8, 0x1a, 0xd9, 0x27, - 0x9c, 0x4b, 0x0b, 0x75, 0x82, 0x4b, 0x51, 0x9a, 0xf3, 0x15, 0xa3, 0x99, 0x20, 0xd9, 0x50, 0x25, - 0xd8, 0xc5, 0x53, 0x59, 0x9a, 0x53, 0x9e, 0xb5, 0x39, 0xf5, 0x57, 0x32, 0x67, 0x46, 0xc7, 0x98, - 0x33, 0x73, 0x87, 0xb6, 0x61, 0x75, 0x27, 0x1e, 0x3c, 0x26, 0x2a, 0x97, 0xcd, 0xad, 0x8d, 0x79, - 0x42, 0xf5, 0xef, 0x6f, 0x54, 0xf2, 0xb8, 0x7a, 0x8a, 0x17, 0xb0, 0x56, 0x41, 0x3f, 0x40, 0xeb, - 0x4e, 0x26, 0x12, 0x91, 0x92, 0x31, 0xc9, 0x04, 0xf7, 0x5c, 0xf9, 0xf0, 0xba, 0xdb, 0xcf, 0x8e, - 0x37, 0x3f, 0x3f, 0xb3, 0xb5, 0x14, 0x22, 0x49, 0x23, 0x62, 0x69, 0x85, 0x16, 0x05, 0x9e, 0xe1, - 0xf3, 0x6f, 0x01, 0x9a, 0x8f, 0xa7, 0xcc, 0xfb, 0x21, 0x99, 0x94, 0x79, 0x3f, 0x24, 0x13, 0xf9, - 0xb8, 0x8e, 0xe2, 0xb4, 0xd0, 0x8f, 0xce, 0xc5, 0x5a, 0xd8, 0xae, 0x7c, 0xe1, 0x48, 0x86, 0xf9, - 0x10, 0x9c, 0x87, 0x21, 0x78, 0xee, 0x40, 0xcb, 0x8e, 0x00, 0x7a, 0x1f, 0x5c, 0x6d, 0xd4, 0x49, - 0xf1, 0x9d, 0x5c, 0xc8, 0xea, 0xde, 0x1d, 0x1b, 0x81, 0x7b, 0x15, 0xd5, 0x89, 0xac, 0x1b, 0xf4, - 0x2d, 0x34, 0x35, 0x58, 0x67, 0xb1, 0xaa, 0xb2, 0x18, 0xbd, 0x3c, 0xe8, 0xa1, 0xa5, 0xa1, 0x73, - 0x68, 0x73, 0xf8, 0x37, 0x61, 0xfd, 0x34, 0xe0, 0x5c, 0x1e, 0xfe, 0xee, 0xc0, 0x9a, 0x29, 0x1a, - 0xd3, 0x1d, 0xe3, 0x92, 0x91, 0xb0, 0xf2, 0xce, 0xf4, 0xc9, 0x1b, 0x67, 0xd6, 0x9b, 0x86, 0x85, - 0xa7, 0xf5, 0xb4, 0xbd, 0x73, 0x74, 0xfe, 0x0e, 0xbc, 0xbb, 0x10, 0x7a, 0x2e, 0xcb, 0x3f, 0x84, - 0xb5, 0x7d, 0x11, 0x8b, 0x82, 0x9f, 0xd9, 0x12, 0x82, 0xdf, 0x1c, 0xb8, 0x58, 0x62, 0x8c, 0x77, - 0x9f, 0x41, 0xe3, 0x88, 0x30, 0x41, 0x9e, 0x10, 0x6e, 0xbc, 0xf2, 0xe6, 0xbd, 0xfa, 0x4e, 0x21, - 0xf0, 0x14, 0x89, 0xb6, 0xa1, 0xc1, 0x15, 0x0f, 0xd1, 0x69, 0x5d, 0xf8, 0x54, 0xb4, 0x96, 0xf9, - 0xde, 0x14, 0x8f, 0x22, 0x58, 0x49, 0xe9, 0xa8, 0xcc, 0xf6, 0x7b, 0x67, 0xe9, 0xdd, 0xa3, 0x23, - 0xac, 0x80, 0xc1, 0x71, 0x05, 0x6a, 0xfa, 0x0e, 0xdd, 0x85, 0xda, 0x30, 0x19, 0x11, 0x2e, 0xb4, - 0x57, 0xdd, 0x2d, 0xf9, 0x00, 0x9f, 0x1d, 0x6f, 0x5e, 0xb6, 0x5e, 0x18, 0xcd, 0x49, 0x26, 0x57, - 0x8d, 0x38, 0xc9, 0x08, 0xe3, 0xd1, 0x88, 0x5e, 0xd5, 0x2a, 0x61, 0x4f, 0xfd, 0xc1, 0x86, 0x41, - 0x72, 0x25, 0x59, 0x5e, 0x08, 0x53, 0x98, 0xaf, 0xc7, 0xa5, 0x19, 0xe4, 0xe8, 0xca, 0xe2, 0x31, - 0x31, 0x7d, 0x53, 0x9d, 0x65, 0xeb, 0x1e, 0xc8, 0xba, 0x1d, 0xaa, 0x81, 0xd6, 0xc0, 0x46, 0x42, - 0xdb, 0x50, 0xe7, 0x22, 0x66, 0x82, 0x0c, 0x55, 0xcb, 0x7b, 0x95, 0x99, 0x53, 0x2a, 0xa0, 0x9b, - 0xe0, 0x0e, 0xe8, 0x38, 0x4f, 0x89, 0xd4, 0xae, 0xbd, 0xa2, 0xf6, 0x89, 0x8a, 0xac, 0x1e, 0xc2, - 0x18, 0x65, 0x6a, 0xda, 0xb9, 0x58, 0x0b, 0xc1, 0x3f, 0x15, 0x68, 0xd9, 0xc9, 0x9a, 0x9b, 0xe4, - 0x77, 0xa1, 0xa6, 0x53, 0xaf, 0xab, 0xee, 0xf5, 0x42, 0xa5, 0x19, 0x16, 0x86, 0xca, 0x83, 0xfa, - 0xa0, 0x60, 0x6a, 0xcc, 0xeb, 0xe1, 0x5f, 0x8a, 0xd2, 0x60, 0x41, 0x45, 0x9c, 0xaa, 0x50, 0x55, - 0xb1, 0x16, 0xe4, 0xf4, 0x9f, 0x2e, 0x7b, 0xe7, 0x9b, 0xfe, 0x53, 0x35, 0x3b, 0x0d, 0xf5, 0x37, - 0x4a, 0x43, 0xe3, 0xdc, 0x69, 0x08, 0xfe, 0x70, 0xc0, 0x9d, 0x56, 0xb9, 0x15, 0x5d, 0xe7, 0x8d, - 0xa3, 0x3b, 0x13, 0x99, 0xca, 0xeb, 0x45, 0xe6, 0x12, 0xd4, 0xb8, 0x60, 0x24, 0x1e, 0xeb, 0xbd, - 0x14, 0x1b, 0x49, 0xf6, 0x93, 0x31, 0x1f, 0xa9, 0x0c, 0xb5, 0xb0, 0x3c, 0x06, 0x01, 0xb4, 0xd4, - 0x0a, 0xba, 0x47, 0xb8, 0x5c, 0x7a, 0x64, 0x6e, 0x87, 0xb1, 0x88, 0x95, 0x1f, 0x2d, 0xac, 0xce, - 0xc1, 0x15, 0x40, 0xf7, 0x12, 0x2e, 0x1e, 0xa9, 0xd5, 0x99, 0x2f, 0xdb, 0x4f, 0xf7, 0xe1, 0xed, - 0x19, 0xb4, 0xe9, 0x52, 0x5f, 0x9e, 0xda, 0x50, 0x3f, 0x9e, 0xef, 0x1a, 0x6a, 0x43, 0x0f, 0xb5, - 0xe2, 0xec, 0xa2, 0xba, 0xf5, 0x77, 0x15, 0xea, 0x3b, 0xfa, 0xc7, 0x07, 0x7a, 0x00, 0xee, 0x74, - 0x01, 0x46, 0xc1, 0x3c, 0xcd, 0xe9, 0x4d, 0xda, 0xff, 0xe8, 0xa5, 0x18, 0x63, 0xdf, 0xd7, 0xb0, - 0xaa, 0x7e, 0x0a, 0xa0, 0x05, 0x6d, 0xd0, 0xfe, 0x8d, 0xe0, 0xbf, 0x7c, 0xb5, 0xbe, 0xe6, 0x48, - 0x26, 0x35, 0x43, 0x16, 0x31, 0xd9, 0xcb, 0x8c, 0xbf, 0xb9, 0x64, 0xf8, 0xa0, 0x3d, 0xa8, 0x99, - 0xe7, 0xbc, 0x08, 0x6a, 0x4f, 0x0a, 0xbf, 0x7d, 0x36, 0x40, 0x93, 0x5d, 0x73, 0xd0, 0xde, 0x74, - 0x53, 0x5b, 0x64, 0x9a, 0x5d, 0x06, 0xfe, 0x92, 0xff, 0x77, 0x9c, 0x6b, 0x0e, 0xfa, 0x1e, 0x9a, - 0x56, 0xa2, 0xd1, 0x82, 0x84, 0xce, 0x57, 0x8d, 0xff, 0xc9, 0x12, 0x94, 0x36, 0xb6, 0xdb, 0x7a, - 0xfa, 0x62, 0xc3, 0xf9, 0xf3, 0xc5, 0x86, 0xf3, 0xd7, 0x8b, 0x0d, 0xa7, 0x5f, 0x53, 0x75, 0xff, - 0xe9, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0xfe, 0x98, 0x98, 0x82, 0x80, 0x0e, 0x00, 0x00, -} diff --git a/vendor/github.com/moby/buildkit/api/services/control/control.proto b/vendor/github.com/moby/buildkit/api/services/control/control.proto deleted file mode 100644 index 7ac4095b7a58..000000000000 --- a/vendor/github.com/moby/buildkit/api/services/control/control.proto +++ /dev/null @@ -1,127 +0,0 @@ -syntax = "proto3"; - -package moby.buildkit.v1; - -// The control API is currently considered experimental and may break in a backwards -// incompatible way. - -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; -import "google/protobuf/timestamp.proto"; -import "github.com/moby/buildkit/solver/pb/ops.proto"; -import "github.com/moby/buildkit/api/types/worker.proto"; - -option (gogoproto.sizer_all) = true; -option (gogoproto.marshaler_all) = true; -option (gogoproto.unmarshaler_all) = true; - -service Control { - rpc DiskUsage(DiskUsageRequest) returns (DiskUsageResponse); - rpc Prune(PruneRequest) returns (stream UsageRecord); - rpc Solve(SolveRequest) returns (SolveResponse); - rpc Status(StatusRequest) returns (stream StatusResponse); - rpc Session(stream BytesMessage) returns (stream BytesMessage); - rpc ListWorkers(ListWorkersRequest) returns (ListWorkersResponse); - // rpc Info(InfoRequest) returns (InfoResponse); -} - -message PruneRequest { - repeated string filter = 1; - bool all = 2; - int64 keepDuration = 3 [(gogoproto.nullable) = true]; - int64 keepBytes = 4 [(gogoproto.nullable) = true]; -} - -message DiskUsageRequest { - repeated string filter = 1; -} - -message DiskUsageResponse { - repeated UsageRecord record = 1; -} - -message UsageRecord { - string ID = 1; - bool Mutable = 2; - bool InUse = 3; - int64 Size = 4; - string Parent = 5; - google.protobuf.Timestamp CreatedAt = 6 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; - google.protobuf.Timestamp LastUsedAt = 7 [(gogoproto.stdtime) = true]; - int64 UsageCount = 8; - string Description = 9; - string RecordType = 10; - bool Shared = 11; -} - -message SolveRequest { - string Ref = 1; - pb.Definition Definition = 2; - string Exporter = 3; - map ExporterAttrs = 4; - string Session = 5; - string Frontend = 6; - map FrontendAttrs = 7; - CacheOptions Cache = 8 [(gogoproto.nullable) = false]; - repeated string Entitlements = 9 [(gogoproto.customtype) = "github.com/moby/buildkit/util/entitlements.Entitlement" ]; -} - -message CacheOptions { - string ExportRef = 1; - repeated string ImportRefs = 2; - map ExportAttrs = 3; -} - -message SolveResponse { - map ExporterResponse = 1; -} - -message StatusRequest { - string Ref = 1; -} - -message StatusResponse { - repeated Vertex vertexes = 1; - repeated VertexStatus statuses = 2; - repeated VertexLog logs = 3; -} - -message Vertex { - string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; - repeated string inputs = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; - string name = 3; - bool cached = 4; - google.protobuf.Timestamp started = 5 [(gogoproto.stdtime) = true ]; - google.protobuf.Timestamp completed = 6 [(gogoproto.stdtime) = true ]; - string error = 7; // typed errors? -} - -message VertexStatus { - string ID = 1; - string vertex = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; - string name = 3; - int64 current = 4; - int64 total = 5; - // TODO: add started, completed - google.protobuf.Timestamp timestamp = 6 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; - google.protobuf.Timestamp started = 7 [(gogoproto.stdtime) = true ]; - google.protobuf.Timestamp completed = 8 [(gogoproto.stdtime) = true ]; -} - -message VertexLog { - string vertex = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; - google.protobuf.Timestamp timestamp = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; - int64 stream = 3; - bytes msg = 4; -} - -message BytesMessage { - bytes data = 1; -} - -message ListWorkersRequest { - repeated string filter = 1; // containerd style -} - -message ListWorkersResponse { - repeated moby.buildkit.v1.types.WorkerRecord record = 1; -} \ No newline at end of file diff --git a/vendor/github.com/moby/buildkit/api/services/control/generate.go b/vendor/github.com/moby/buildkit/api/services/control/generate.go deleted file mode 100644 index 1c161155f502..000000000000 --- a/vendor/github.com/moby/buildkit/api/services/control/generate.go +++ /dev/null @@ -1,3 +0,0 @@ -package moby_buildkit_v1 - -//go:generate protoc -I=. -I=../../../vendor/ -I=../../../../../../ --gogo_out=plugins=grpc:. control.proto diff --git a/vendor/github.com/moby/buildkit/api/types/generate.go b/vendor/github.com/moby/buildkit/api/types/generate.go deleted file mode 100644 index 84007df1d9b8..000000000000 --- a/vendor/github.com/moby/buildkit/api/types/generate.go +++ /dev/null @@ -1,3 +0,0 @@ -package moby_buildkit_v1_types - -//go:generate protoc -I=. -I=../../vendor/ -I=../../../../../ --gogo_out=plugins=grpc:. worker.proto diff --git a/vendor/github.com/moby/buildkit/api/types/worker.pb.go b/vendor/github.com/moby/buildkit/api/types/worker.pb.go deleted file mode 100644 index 0344d523e7f1..000000000000 --- a/vendor/github.com/moby/buildkit/api/types/worker.pb.go +++ /dev/null @@ -1,838 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: worker.proto - -/* - Package moby_buildkit_v1_types is a generated protocol buffer package. - - It is generated from these files: - worker.proto - - It has these top-level messages: - WorkerRecord - GCPolicy -*/ -package moby_buildkit_v1_types - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" -import _ "github.com/gogo/protobuf/gogoproto" -import pb "github.com/moby/buildkit/solver/pb" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -type WorkerRecord struct { - ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` - Labels map[string]string `protobuf:"bytes,2,rep,name=Labels" json:"Labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - Platforms []pb.Platform `protobuf:"bytes,3,rep,name=platforms" json:"platforms"` - GCPolicy []*GCPolicy `protobuf:"bytes,4,rep,name=GCPolicy" json:"GCPolicy,omitempty"` -} - -func (m *WorkerRecord) Reset() { *m = WorkerRecord{} } -func (m *WorkerRecord) String() string { return proto.CompactTextString(m) } -func (*WorkerRecord) ProtoMessage() {} -func (*WorkerRecord) Descriptor() ([]byte, []int) { return fileDescriptorWorker, []int{0} } - -func (m *WorkerRecord) GetID() string { - if m != nil { - return m.ID - } - return "" -} - -func (m *WorkerRecord) GetLabels() map[string]string { - if m != nil { - return m.Labels - } - return nil -} - -func (m *WorkerRecord) GetPlatforms() []pb.Platform { - if m != nil { - return m.Platforms - } - return nil -} - -func (m *WorkerRecord) GetGCPolicy() []*GCPolicy { - if m != nil { - return m.GCPolicy - } - return nil -} - -type GCPolicy struct { - All bool `protobuf:"varint,1,opt,name=all,proto3" json:"all,omitempty"` - KeepDuration int64 `protobuf:"varint,2,opt,name=keepDuration,proto3" json:"keepDuration,omitempty"` - KeepBytes int64 `protobuf:"varint,3,opt,name=keepBytes,proto3" json:"keepBytes,omitempty"` - Filters []string `protobuf:"bytes,4,rep,name=filters" json:"filters,omitempty"` -} - -func (m *GCPolicy) Reset() { *m = GCPolicy{} } -func (m *GCPolicy) String() string { return proto.CompactTextString(m) } -func (*GCPolicy) ProtoMessage() {} -func (*GCPolicy) Descriptor() ([]byte, []int) { return fileDescriptorWorker, []int{1} } - -func (m *GCPolicy) GetAll() bool { - if m != nil { - return m.All - } - return false -} - -func (m *GCPolicy) GetKeepDuration() int64 { - if m != nil { - return m.KeepDuration - } - return 0 -} - -func (m *GCPolicy) GetKeepBytes() int64 { - if m != nil { - return m.KeepBytes - } - return 0 -} - -func (m *GCPolicy) GetFilters() []string { - if m != nil { - return m.Filters - } - return nil -} - -func init() { - proto.RegisterType((*WorkerRecord)(nil), "moby.buildkit.v1.types.WorkerRecord") - proto.RegisterType((*GCPolicy)(nil), "moby.buildkit.v1.types.GCPolicy") -} -func (m *WorkerRecord) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WorkerRecord) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.ID) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintWorker(dAtA, i, uint64(len(m.ID))) - i += copy(dAtA[i:], m.ID) - } - if len(m.Labels) > 0 { - for k, _ := range m.Labels { - dAtA[i] = 0x12 - i++ - v := m.Labels[k] - mapSize := 1 + len(k) + sovWorker(uint64(len(k))) + 1 + len(v) + sovWorker(uint64(len(v))) - i = encodeVarintWorker(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintWorker(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintWorker(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if len(m.Platforms) > 0 { - for _, msg := range m.Platforms { - dAtA[i] = 0x1a - i++ - i = encodeVarintWorker(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.GCPolicy) > 0 { - for _, msg := range m.GCPolicy { - dAtA[i] = 0x22 - i++ - i = encodeVarintWorker(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *GCPolicy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GCPolicy) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.All { - dAtA[i] = 0x8 - i++ - if m.All { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.KeepDuration != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintWorker(dAtA, i, uint64(m.KeepDuration)) - } - if m.KeepBytes != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintWorker(dAtA, i, uint64(m.KeepBytes)) - } - if len(m.Filters) > 0 { - for _, s := range m.Filters { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func encodeVarintWorker(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *WorkerRecord) Size() (n int) { - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovWorker(uint64(l)) - } - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovWorker(uint64(len(k))) + 1 + len(v) + sovWorker(uint64(len(v))) - n += mapEntrySize + 1 + sovWorker(uint64(mapEntrySize)) - } - } - if len(m.Platforms) > 0 { - for _, e := range m.Platforms { - l = e.Size() - n += 1 + l + sovWorker(uint64(l)) - } - } - if len(m.GCPolicy) > 0 { - for _, e := range m.GCPolicy { - l = e.Size() - n += 1 + l + sovWorker(uint64(l)) - } - } - return n -} - -func (m *GCPolicy) Size() (n int) { - var l int - _ = l - if m.All { - n += 2 - } - if m.KeepDuration != 0 { - n += 1 + sovWorker(uint64(m.KeepDuration)) - } - if m.KeepBytes != 0 { - n += 1 + sovWorker(uint64(m.KeepBytes)) - } - if len(m.Filters) > 0 { - for _, s := range m.Filters { - l = len(s) - n += 1 + l + sovWorker(uint64(l)) - } - } - return n -} - -func sovWorker(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozWorker(x uint64) (n int) { - return sovWorker(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *WorkerRecord) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWorker - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WorkerRecord: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WorkerRecord: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWorker - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthWorker - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWorker - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthWorker - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Labels == nil { - m.Labels = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWorker - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWorker - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthWorker - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWorker - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthWorker - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipWorker(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthWorker - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Labels[mapkey] = mapvalue - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Platforms", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWorker - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthWorker - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Platforms = append(m.Platforms, pb.Platform{}) - if err := m.Platforms[len(m.Platforms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GCPolicy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWorker - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthWorker - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.GCPolicy = append(m.GCPolicy, &GCPolicy{}) - if err := m.GCPolicy[len(m.GCPolicy)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipWorker(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthWorker - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GCPolicy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWorker - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GCPolicy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GCPolicy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field All", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWorker - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.All = bool(v != 0) - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field KeepDuration", wireType) - } - m.KeepDuration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWorker - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.KeepDuration |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field KeepBytes", wireType) - } - m.KeepBytes = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWorker - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.KeepBytes |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWorker - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthWorker - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Filters = append(m.Filters, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipWorker(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthWorker - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipWorker(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowWorker - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowWorker - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowWorker - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthWorker - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowWorker - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipWorker(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthWorker = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowWorker = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("worker.proto", fileDescriptorWorker) } - -var fileDescriptorWorker = []byte{ - // 355 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0xc1, 0x4e, 0xea, 0x40, - 0x14, 0x86, 0x6f, 0x5b, 0x2e, 0x97, 0x0e, 0xcd, 0x8d, 0x99, 0x18, 0xd3, 0x10, 0x83, 0x84, 0x15, - 0x0b, 0x9d, 0xa2, 0x6e, 0xd4, 0xb8, 0x42, 0x8c, 0x92, 0xb8, 0x20, 0xb3, 0x71, 0xdd, 0x81, 0x01, - 0x9b, 0x0e, 0x9c, 0xc9, 0x74, 0x8a, 0xf6, 0x39, 0x7c, 0x29, 0x96, 0x3e, 0x81, 0x31, 0x3c, 0x89, - 0x99, 0x29, 0x08, 0x26, 0xba, 0x3b, 0xff, 0x9f, 0xff, 0xfb, 0xe7, 0x9c, 0x0c, 0x0a, 0x9e, 0x41, - 0xa5, 0x5c, 0x11, 0xa9, 0x40, 0x03, 0x3e, 0x98, 0x01, 0x2b, 0x08, 0xcb, 0x13, 0x31, 0x4e, 0x13, - 0x4d, 0x16, 0xa7, 0x44, 0x17, 0x92, 0x67, 0x8d, 0x93, 0x69, 0xa2, 0x9f, 0x72, 0x46, 0x46, 0x30, - 0x8b, 0xa6, 0x30, 0x85, 0xc8, 0xc6, 0x59, 0x3e, 0xb1, 0xca, 0x0a, 0x3b, 0x95, 0x35, 0x8d, 0xe3, - 0x9d, 0xb8, 0x69, 0x8c, 0x36, 0x8d, 0x51, 0x06, 0x62, 0xc1, 0x55, 0x24, 0x59, 0x04, 0x32, 0x2b, - 0xd3, 0xed, 0x57, 0x17, 0x05, 0x8f, 0x76, 0x0b, 0xca, 0x47, 0xa0, 0xc6, 0xf8, 0x3f, 0x72, 0x07, - 0xfd, 0xd0, 0x69, 0x39, 0x1d, 0x9f, 0xba, 0x83, 0x3e, 0xbe, 0x47, 0xd5, 0x87, 0x98, 0x71, 0x91, - 0x85, 0x6e, 0xcb, 0xeb, 0xd4, 0xcf, 0xba, 0xe4, 0xe7, 0x35, 0xc9, 0x6e, 0x0b, 0x29, 0x91, 0xdb, - 0xb9, 0x56, 0x05, 0x5d, 0xf3, 0xb8, 0x8b, 0x7c, 0x29, 0x62, 0x3d, 0x01, 0x35, 0xcb, 0x42, 0xcf, - 0x96, 0x05, 0x44, 0x32, 0x32, 0x5c, 0x9b, 0xbd, 0xca, 0xf2, 0xfd, 0xe8, 0x0f, 0xdd, 0x86, 0xf0, - 0x35, 0xaa, 0xdd, 0xdd, 0x0c, 0x41, 0x24, 0xa3, 0x22, 0xac, 0x58, 0xa0, 0xf5, 0xdb, 0xeb, 0x9b, - 0x1c, 0xfd, 0x22, 0x1a, 0x97, 0xa8, 0xbe, 0xb3, 0x06, 0xde, 0x43, 0x5e, 0xca, 0x8b, 0xf5, 0x65, - 0x66, 0xc4, 0xfb, 0xe8, 0xef, 0x22, 0x16, 0x39, 0x0f, 0x5d, 0xeb, 0x95, 0xe2, 0xca, 0xbd, 0x70, - 0xda, 0x2f, 0xdb, 0x87, 0x0d, 0x17, 0x0b, 0x61, 0xb9, 0x1a, 0x35, 0x23, 0x6e, 0xa3, 0x20, 0xe5, - 0x5c, 0xf6, 0x73, 0x15, 0xeb, 0x04, 0xe6, 0x16, 0xf7, 0xe8, 0x37, 0x0f, 0x1f, 0x22, 0xdf, 0xe8, - 0x5e, 0xa1, 0xb9, 0x39, 0xd6, 0x04, 0xb6, 0x06, 0x0e, 0xd1, 0xbf, 0x49, 0x22, 0x34, 0x57, 0x99, - 0xbd, 0xcb, 0xa7, 0x1b, 0xd9, 0x0b, 0x96, 0xab, 0xa6, 0xf3, 0xb6, 0x6a, 0x3a, 0x1f, 0xab, 0xa6, - 0xc3, 0xaa, 0xf6, 0x93, 0xce, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xfc, 0x79, 0x52, 0x6a, 0x29, - 0x02, 0x00, 0x00, -} diff --git a/vendor/github.com/moby/buildkit/api/types/worker.proto b/vendor/github.com/moby/buildkit/api/types/worker.proto deleted file mode 100644 index 82dd7ad65145..000000000000 --- a/vendor/github.com/moby/buildkit/api/types/worker.proto +++ /dev/null @@ -1,24 +0,0 @@ -syntax = "proto3"; - -package moby.buildkit.v1.types; - -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; -import "github.com/moby/buildkit/solver/pb/ops.proto"; - -option (gogoproto.sizer_all) = true; -option (gogoproto.marshaler_all) = true; -option (gogoproto.unmarshaler_all) = true; - -message WorkerRecord { - string ID = 1; - map Labels = 2; - repeated pb.Platform platforms = 3 [(gogoproto.nullable) = false]; - repeated GCPolicy GCPolicy = 4; -} - -message GCPolicy { - bool all = 1; - int64 keepDuration = 2; - int64 keepBytes = 3; - repeated string filters = 4; -} diff --git a/vendor/github.com/moby/buildkit/cache/blobs/blobs.go b/vendor/github.com/moby/buildkit/cache/blobs/blobs.go deleted file mode 100644 index a1e1b6069feb..000000000000 --- a/vendor/github.com/moby/buildkit/cache/blobs/blobs.go +++ /dev/null @@ -1,151 +0,0 @@ -package blobs - -import ( - "context" - "time" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/diff" - "github.com/containerd/containerd/mount" - "github.com/moby/buildkit/cache" - "github.com/moby/buildkit/snapshot" - "github.com/moby/buildkit/util/flightcontrol" - "github.com/moby/buildkit/util/winlayers" - digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "golang.org/x/sync/errgroup" -) - -var g flightcontrol.Group - -const containerdUncompressed = "containerd.io/uncompressed" - -type DiffPair struct { - DiffID digest.Digest - Blobsum digest.Digest -} - -var ErrNoBlobs = errors.Errorf("no blobs for snapshot") - -func GetDiffPairs(ctx context.Context, contentStore content.Store, snapshotter snapshot.Snapshotter, differ diff.Comparer, ref cache.ImmutableRef, createBlobs bool) ([]DiffPair, error) { - if ref == nil { - return nil, nil - } - - if err := ref.Finalize(ctx, true); err != nil { - return nil, err - } - - if isTypeWindows(ref) { - ctx = winlayers.UseWindowsLayerMode(ctx) - } - - return getDiffPairs(ctx, contentStore, snapshotter, differ, ref, createBlobs) -} - -func getDiffPairs(ctx context.Context, contentStore content.Store, snapshotter snapshot.Snapshotter, differ diff.Comparer, ref cache.ImmutableRef, createBlobs bool) ([]DiffPair, error) { - if ref == nil { - return nil, nil - } - - eg, ctx := errgroup.WithContext(ctx) - var diffPairs []DiffPair - var currentPair DiffPair - parent := ref.Parent() - if parent != nil { - defer parent.Release(context.TODO()) - eg.Go(func() error { - dp, err := getDiffPairs(ctx, contentStore, snapshotter, differ, parent, createBlobs) - if err != nil { - return err - } - diffPairs = dp - return nil - }) - } - eg.Go(func() error { - dp, err := g.Do(ctx, ref.ID(), func(ctx context.Context) (interface{}, error) { - diffID, blob, err := snapshotter.GetBlob(ctx, ref.ID()) - if err != nil { - return nil, err - } - if blob != "" { - return DiffPair{DiffID: diffID, Blobsum: blob}, nil - } else if !createBlobs { - return nil, errors.WithStack(ErrNoBlobs) - } - // reference needs to be committed - parent := ref.Parent() - var lower []mount.Mount - if parent != nil { - defer parent.Release(context.TODO()) - m, err := parent.Mount(ctx, true) - if err != nil { - return nil, err - } - lower, err = m.Mount() - if err != nil { - return nil, err - } - defer m.Release() - } - m, err := ref.Mount(ctx, true) - if err != nil { - return nil, err - } - upper, err := m.Mount() - if err != nil { - return nil, err - } - defer m.Release() - descr, err := differ.Compare(ctx, lower, upper, - diff.WithMediaType(ocispec.MediaTypeImageLayerGzip), - diff.WithReference(ref.ID()), - diff.WithLabels(map[string]string{ - "containerd.io/gc.root": time.Now().UTC().Format(time.RFC3339Nano), - }), - ) - if err != nil { - return nil, err - } - info, err := contentStore.Info(ctx, descr.Digest) - if err != nil { - return nil, err - } - diffIDStr, ok := info.Labels[containerdUncompressed] - if !ok { - return nil, errors.Errorf("invalid differ response with no diffID: %v", descr.Digest) - } - diffIDDigest, err := digest.Parse(diffIDStr) - if err != nil { - return nil, err - } - if err := snapshotter.SetBlob(ctx, ref.ID(), diffIDDigest, descr.Digest); err != nil { - return nil, err - } - return DiffPair{DiffID: diffIDDigest, Blobsum: descr.Digest}, nil - }) - if err != nil { - return err - } - currentPair = dp.(DiffPair) - return nil - }) - err := eg.Wait() - if err != nil { - return nil, err - } - return append(diffPairs, currentPair), nil -} - -func isTypeWindows(ref cache.ImmutableRef) bool { - if cache.GetLayerType(ref) == "windows" { - return true - } - if parent := ref.Parent(); parent != nil { - defer parent.Release(context.TODO()) - return isTypeWindows(parent) - } - return false -} diff --git a/vendor/github.com/moby/buildkit/cache/contenthash/checksum.go b/vendor/github.com/moby/buildkit/cache/contenthash/checksum.go deleted file mode 100644 index 8cb03359be4f..000000000000 --- a/vendor/github.com/moby/buildkit/cache/contenthash/checksum.go +++ /dev/null @@ -1,721 +0,0 @@ -package contenthash - -import ( - "bytes" - "context" - "crypto/sha256" - "io" - "os" - "path" - "path/filepath" - "sync" - - "github.com/docker/docker/pkg/locker" - iradix "github.com/hashicorp/go-immutable-radix" - "github.com/hashicorp/golang-lru/simplelru" - "github.com/moby/buildkit/cache" - "github.com/moby/buildkit/cache/metadata" - "github.com/moby/buildkit/snapshot" - digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/tonistiigi/fsutil" - fstypes "github.com/tonistiigi/fsutil/types" -) - -var errNotFound = errors.Errorf("not found") - -var defaultManager *cacheManager -var defaultManagerOnce sync.Once - -const keyContentHash = "buildkit.contenthash.v0" - -func getDefaultManager() *cacheManager { - defaultManagerOnce.Do(func() { - lru, _ := simplelru.NewLRU(20, nil) // error is impossible on positive size - defaultManager = &cacheManager{lru: lru, locker: locker.New()} - }) - return defaultManager -} - -// Layout in the radix tree: Every path is saved by cleaned absolute unix path. -// Directories have 2 records, one contains digest for directory header, other -// the recursive digest for directory contents. "/dir/" is the record for -// header, "/dir" is for contents. For the root node "" (empty string) is the -// key for root, "/" for the root header - -func Checksum(ctx context.Context, ref cache.ImmutableRef, path string) (digest.Digest, error) { - return getDefaultManager().Checksum(ctx, ref, path) -} - -func GetCacheContext(ctx context.Context, md *metadata.StorageItem) (CacheContext, error) { - return getDefaultManager().GetCacheContext(ctx, md) -} - -func SetCacheContext(ctx context.Context, md *metadata.StorageItem, cc CacheContext) error { - return getDefaultManager().SetCacheContext(ctx, md, cc) -} - -type CacheContext interface { - Checksum(ctx context.Context, ref cache.Mountable, p string) (digest.Digest, error) - HandleChange(kind fsutil.ChangeKind, p string, fi os.FileInfo, err error) error -} - -type Hashed interface { - Digest() digest.Digest -} - -type cacheManager struct { - locker *locker.Locker - lru *simplelru.LRU - lruMu sync.Mutex -} - -func (cm *cacheManager) Checksum(ctx context.Context, ref cache.ImmutableRef, p string) (digest.Digest, error) { - cc, err := cm.GetCacheContext(ctx, ensureOriginMetadata(ref.Metadata())) - if err != nil { - return "", nil - } - return cc.Checksum(ctx, ref, p) -} - -func (cm *cacheManager) GetCacheContext(ctx context.Context, md *metadata.StorageItem) (CacheContext, error) { - cm.locker.Lock(md.ID()) - cm.lruMu.Lock() - v, ok := cm.lru.Get(md.ID()) - cm.lruMu.Unlock() - if ok { - cm.locker.Unlock(md.ID()) - return v.(*cacheContext), nil - } - cc, err := newCacheContext(md) - if err != nil { - cm.locker.Unlock(md.ID()) - return nil, err - } - cm.lruMu.Lock() - cm.lru.Add(md.ID(), cc) - cm.lruMu.Unlock() - cm.locker.Unlock(md.ID()) - return cc, nil -} - -func (cm *cacheManager) SetCacheContext(ctx context.Context, md *metadata.StorageItem, cci CacheContext) error { - cc, ok := cci.(*cacheContext) - if !ok { - return errors.Errorf("invalid cachecontext: %T", cc) - } - if md.ID() != cc.md.ID() { - cc = &cacheContext{ - md: md, - tree: cci.(*cacheContext).tree, - dirtyMap: map[string]struct{}{}, - } - } else { - if err := cc.save(); err != nil { - return err - } - } - cm.lruMu.Lock() - cm.lru.Add(md.ID(), cc) - cm.lruMu.Unlock() - return nil -} - -type cacheContext struct { - mu sync.RWMutex - md *metadata.StorageItem - tree *iradix.Tree - dirty bool // needs to be persisted to disk - - // used in HandleChange - txn *iradix.Txn - node *iradix.Node - dirtyMap map[string]struct{} -} - -type mount struct { - mountable cache.Mountable - mountPath string - unmount func() error -} - -func (m *mount) mount(ctx context.Context) (string, error) { - if m.mountPath != "" { - return m.mountPath, nil - } - mounts, err := m.mountable.Mount(ctx, true) - if err != nil { - return "", err - } - - lm := snapshot.LocalMounter(mounts) - - mp, err := lm.Mount() - if err != nil { - return "", err - } - - m.mountPath = mp - m.unmount = lm.Unmount - return mp, nil -} - -func (m *mount) clean() error { - if m.mountPath != "" { - if err := m.unmount(); err != nil { - return err - } - m.mountPath = "" - } - return nil -} - -func newCacheContext(md *metadata.StorageItem) (*cacheContext, error) { - cc := &cacheContext{ - md: md, - tree: iradix.New(), - dirtyMap: map[string]struct{}{}, - } - if err := cc.load(); err != nil { - return nil, err - } - return cc, nil -} - -func (cc *cacheContext) load() error { - dt, err := cc.md.GetExternal(keyContentHash) - if err != nil { - return nil - } - - var l CacheRecords - if err := l.Unmarshal(dt); err != nil { - return err - } - - txn := cc.tree.Txn() - for _, p := range l.Paths { - txn.Insert([]byte(p.Path), p.Record) - } - cc.tree = txn.Commit() - return nil -} - -func (cc *cacheContext) save() error { - cc.mu.Lock() - defer cc.mu.Unlock() - - if cc.txn != nil { - cc.commitActiveTransaction() - } - - var l CacheRecords - node := cc.tree.Root() - node.Walk(func(k []byte, v interface{}) bool { - l.Paths = append(l.Paths, &CacheRecordWithPath{ - Path: string(k), - Record: v.(*CacheRecord), - }) - return false - }) - - dt, err := l.Marshal() - if err != nil { - return err - } - - return cc.md.SetExternal(keyContentHash, dt) -} - -// HandleChange notifies the source about a modification operation -func (cc *cacheContext) HandleChange(kind fsutil.ChangeKind, p string, fi os.FileInfo, err error) (retErr error) { - p = path.Join("/", filepath.ToSlash(p)) - if p == "/" { - p = "" - } - k := convertPathToKey([]byte(p)) - - deleteDir := func(cr *CacheRecord) { - if cr.Type == CacheRecordTypeDir { - cc.node.WalkPrefix(append(k, 0), func(k []byte, v interface{}) bool { - cc.txn.Delete(k) - return false - }) - } - } - - cc.mu.Lock() - defer cc.mu.Unlock() - if cc.txn == nil { - cc.txn = cc.tree.Txn() - cc.node = cc.tree.Root() - - // root is not called by HandleChange. need to fake it - if _, ok := cc.node.Get([]byte{0}); !ok { - cc.txn.Insert([]byte{0}, &CacheRecord{ - Type: CacheRecordTypeDirHeader, - Digest: digest.FromBytes(nil), - }) - cc.txn.Insert([]byte(""), &CacheRecord{ - Type: CacheRecordTypeDir, - }) - } - } - - if kind == fsutil.ChangeKindDelete { - v, ok := cc.txn.Delete(k) - if ok { - deleteDir(v.(*CacheRecord)) - } - d := path.Dir(p) - if d == "/" { - d = "" - } - cc.dirtyMap[d] = struct{}{} - return - } - - stat, ok := fi.Sys().(*fstypes.Stat) - if !ok { - return errors.Errorf("%s invalid change without stat information", p) - } - - h, ok := fi.(Hashed) - if !ok { - return errors.Errorf("invalid fileinfo: %s", p) - } - - v, ok := cc.node.Get(k) - if ok { - deleteDir(v.(*CacheRecord)) - } - - cr := &CacheRecord{ - Type: CacheRecordTypeFile, - } - if fi.Mode()&os.ModeSymlink != 0 { - cr.Type = CacheRecordTypeSymlink - cr.Linkname = filepath.ToSlash(stat.Linkname) - } - if fi.IsDir() { - cr.Type = CacheRecordTypeDirHeader - cr2 := &CacheRecord{ - Type: CacheRecordTypeDir, - } - cc.txn.Insert(k, cr2) - k = append(k, 0) - p += "/" - } - cr.Digest = h.Digest() - cc.txn.Insert(k, cr) - d := path.Dir(p) - if d == "/" { - d = "" - } - cc.dirtyMap[d] = struct{}{} - - return nil -} - -func (cc *cacheContext) Checksum(ctx context.Context, mountable cache.Mountable, p string) (digest.Digest, error) { - m := &mount{mountable: mountable} - defer m.clean() - - const maxSymlinkLimit = 255 - i := 0 - for { - if i > maxSymlinkLimit { - return "", errors.Errorf("too many symlinks: %s", p) - } - cr, err := cc.checksumNoFollow(ctx, m, p) - if err != nil { - return "", err - } - if cr.Type == CacheRecordTypeSymlink { - link := cr.Linkname - if !path.IsAbs(cr.Linkname) { - link = path.Join(path.Dir(p), link) - } - i++ - p = link - } else { - return cr.Digest, nil - } - } -} - -func (cc *cacheContext) checksumNoFollow(ctx context.Context, m *mount, p string) (*CacheRecord, error) { - p = path.Join("/", filepath.ToSlash(p)) - if p == "/" { - p = "" - } - - cc.mu.RLock() - if cc.txn == nil { - root := cc.tree.Root() - cc.mu.RUnlock() - v, ok := root.Get(convertPathToKey([]byte(p))) - if ok { - cr := v.(*CacheRecord) - if cr.Digest != "" { - return cr, nil - } - } - } else { - cc.mu.RUnlock() - } - - cc.mu.Lock() - defer cc.mu.Unlock() - - if cc.txn != nil { - cc.commitActiveTransaction() - } - - defer func() { - if cc.dirty { - go cc.save() - cc.dirty = false - } - }() - - return cc.lazyChecksum(ctx, m, p) -} - -func (cc *cacheContext) commitActiveTransaction() { - for d := range cc.dirtyMap { - addParentToMap(d, cc.dirtyMap) - } - for d := range cc.dirtyMap { - k := convertPathToKey([]byte(d)) - if _, ok := cc.txn.Get(k); ok { - cc.txn.Insert(k, &CacheRecord{Type: CacheRecordTypeDir}) - } - } - cc.tree = cc.txn.Commit() - cc.node = nil - cc.dirtyMap = map[string]struct{}{} - cc.txn = nil -} - -func (cc *cacheContext) lazyChecksum(ctx context.Context, m *mount, p string) (*CacheRecord, error) { - root := cc.tree.Root() - scan, err := cc.needsScan(root, p) - if err != nil { - return nil, err - } - if scan { - if err := cc.scanPath(ctx, m, p); err != nil { - return nil, err - } - } - k := convertPathToKey([]byte(p)) - txn := cc.tree.Txn() - root = txn.Root() - cr, updated, err := cc.checksum(ctx, root, txn, m, k) - if err != nil { - return nil, err - } - cc.tree = txn.Commit() - cc.dirty = updated - return cr, err -} - -func (cc *cacheContext) checksum(ctx context.Context, root *iradix.Node, txn *iradix.Txn, m *mount, k []byte) (*CacheRecord, bool, error) { - k, cr, err := getFollowLinks(root, k) - if err != nil { - return nil, false, err - } - if cr == nil { - return nil, false, errors.Wrapf(errNotFound, "%s not found", convertKeyToPath(k)) - } - if cr.Digest != "" { - return cr, false, nil - } - var dgst digest.Digest - - switch cr.Type { - case CacheRecordTypeDir: - h := sha256.New() - next := append(k, 0) - iter := root.Seek(next) - subk := next - ok := true - for { - if !ok || !bytes.HasPrefix(subk, next) { - break - } - h.Write(bytes.TrimPrefix(subk, k)) - - subcr, _, err := cc.checksum(ctx, root, txn, m, subk) - if err != nil { - return nil, false, err - } - - h.Write([]byte(subcr.Digest)) - - if subcr.Type == CacheRecordTypeDir { // skip subfiles - next := append(subk, 0, 0xff) - iter = root.Seek(next) - } - subk, _, ok = iter.Next() - } - dgst = digest.NewDigest(digest.SHA256, h) - - default: - p := string(convertKeyToPath(bytes.TrimSuffix(k, []byte{0}))) - - target, err := m.mount(ctx) - if err != nil { - return nil, false, err - } - - // no FollowSymlinkInScope because invalid paths should not be inserted - fp := filepath.Join(target, filepath.FromSlash(p)) - - fi, err := os.Lstat(fp) - if err != nil { - return nil, false, err - } - - dgst, err = prepareDigest(fp, p, fi) - if err != nil { - return nil, false, err - } - } - - cr2 := &CacheRecord{ - Digest: dgst, - Type: cr.Type, - Linkname: cr.Linkname, - } - - txn.Insert(k, cr2) - - return cr2, true, nil -} - -// needsScan returns false if path is in the tree or a parent path is in tree -// and subpath is missing -func (cc *cacheContext) needsScan(root *iradix.Node, p string) (bool, error) { - var linksWalked int - return cc.needsScanFollow(root, p, &linksWalked) -} - -func (cc *cacheContext) needsScanFollow(root *iradix.Node, p string, linksWalked *int) (bool, error) { - if p == "/" { - p = "" - } - if v, ok := root.Get(convertPathToKey([]byte(p))); !ok { - if p == "" { - return true, nil - } - return cc.needsScanFollow(root, path.Clean(path.Dir(p)), linksWalked) - } else { - cr := v.(*CacheRecord) - if cr.Type == CacheRecordTypeSymlink { - if *linksWalked > 255 { - return false, errTooManyLinks - } - *linksWalked++ - link := path.Clean(cr.Linkname) - if !path.IsAbs(cr.Linkname) { - link = path.Join("/", path.Dir(p), link) - } - return cc.needsScanFollow(root, link, linksWalked) - } - } - return false, nil -} - -func (cc *cacheContext) scanPath(ctx context.Context, m *mount, p string) (retErr error) { - p = path.Join("/", p) - d, _ := path.Split(p) - - mp, err := m.mount(ctx) - if err != nil { - return err - } - - n := cc.tree.Root() - txn := cc.tree.Txn() - - parentPath, err := rootPath(mp, filepath.FromSlash(d), func(p, link string) error { - cr := &CacheRecord{ - Type: CacheRecordTypeSymlink, - Linkname: filepath.ToSlash(link), - } - k := []byte(filepath.Join("/", filepath.ToSlash(p))) - k = convertPathToKey(k) - txn.Insert(k, cr) - return nil - }) - if err != nil { - return err - } - - err = filepath.Walk(parentPath, func(path string, fi os.FileInfo, err error) error { - if err != nil { - return errors.Wrapf(err, "failed to walk %s", path) - } - rel, err := filepath.Rel(mp, path) - if err != nil { - return err - } - k := []byte(filepath.Join("/", filepath.ToSlash(rel))) - if string(k) == "/" { - k = []byte{} - } - k = convertPathToKey(k) - if _, ok := n.Get(k); !ok { - cr := &CacheRecord{ - Type: CacheRecordTypeFile, - } - if fi.Mode()&os.ModeSymlink != 0 { - cr.Type = CacheRecordTypeSymlink - link, err := os.Readlink(path) - if err != nil { - return err - } - cr.Linkname = filepath.ToSlash(link) - } - if fi.IsDir() { - cr.Type = CacheRecordTypeDirHeader - cr2 := &CacheRecord{ - Type: CacheRecordTypeDir, - } - txn.Insert(k, cr2) - k = append(k, 0) - } - txn.Insert(k, cr) - } - return nil - }) - if err != nil { - return err - } - - cc.tree = txn.Commit() - return nil -} - -func getFollowLinks(root *iradix.Node, k []byte) ([]byte, *CacheRecord, error) { - var linksWalked int - return getFollowLinksWalk(root, k, &linksWalked) -} - -func getFollowLinksWalk(root *iradix.Node, k []byte, linksWalked *int) ([]byte, *CacheRecord, error) { - v, ok := root.Get(k) - if ok { - return k, v.(*CacheRecord), nil - } - if len(k) == 0 { - return nil, nil, nil - } - - dir, file := splitKey(k) - - _, parent, err := getFollowLinksWalk(root, dir, linksWalked) - if err != nil { - return nil, nil, err - } - if parent != nil && parent.Type == CacheRecordTypeSymlink { - *linksWalked++ - if *linksWalked > 255 { - return nil, nil, errors.Errorf("too many links") - } - dirPath := path.Clean(string(convertKeyToPath(dir))) - if dirPath == "." || dirPath == "/" { - dirPath = "" - } - link := path.Clean(parent.Linkname) - if !path.IsAbs(link) { - link = path.Join("/", path.Join(path.Dir(dirPath), link)) - } - return getFollowLinksWalk(root, append(convertPathToKey([]byte(link)), file...), linksWalked) - } - - return nil, nil, nil -} - -func prepareDigest(fp, p string, fi os.FileInfo) (digest.Digest, error) { - h, err := NewFileHash(fp, fi) - if err != nil { - return "", errors.Wrapf(err, "failed to create hash for %s", p) - } - if fi.Mode().IsRegular() && fi.Size() > 0 { - // TODO: would be nice to put the contents to separate hash first - // so it can be cached for hardlinks - f, err := os.Open(fp) - if err != nil { - return "", errors.Wrapf(err, "failed to open %s", p) - } - defer f.Close() - if _, err := poolsCopy(h, f); err != nil { - return "", errors.Wrapf(err, "failed to copy file data for %s", p) - } - } - return digest.NewDigest(digest.SHA256, h), nil -} - -func addParentToMap(d string, m map[string]struct{}) { - if d == "" { - return - } - d = path.Dir(d) - if d == "/" { - d = "" - } - m[d] = struct{}{} - addParentToMap(d, m) -} - -func ensureOriginMetadata(md *metadata.StorageItem) *metadata.StorageItem { - v := md.Get("cache.equalMutable") // TODO: const - if v == nil { - return md - } - var mutable string - if err := v.Unmarshal(&mutable); err != nil { - return md - } - si, ok := md.Storage().Get(mutable) - if ok { - return si - } - return md -} - -var pool32K = sync.Pool{ - New: func() interface{} { return make([]byte, 32*1024) }, // 32K -} - -func poolsCopy(dst io.Writer, src io.Reader) (written int64, err error) { - buf := pool32K.Get().([]byte) - written, err = io.CopyBuffer(dst, src, buf) - pool32K.Put(buf) - return -} - -func convertPathToKey(p []byte) []byte { - return bytes.Replace([]byte(p), []byte("/"), []byte{0}, -1) -} - -func convertKeyToPath(p []byte) []byte { - return bytes.Replace([]byte(p), []byte{0}, []byte("/"), -1) -} - -func splitKey(k []byte) ([]byte, []byte) { - foundBytes := false - i := len(k) - 1 - for { - if i <= 0 || foundBytes && k[i] == 0 { - break - } - if k[i] != 0 { - foundBytes = true - } - i-- - } - return append([]byte{}, k[:i]...), k[i:] -} diff --git a/vendor/github.com/moby/buildkit/cache/contenthash/checksum.pb.go b/vendor/github.com/moby/buildkit/cache/contenthash/checksum.pb.go deleted file mode 100644 index 31dcce95ff87..000000000000 --- a/vendor/github.com/moby/buildkit/cache/contenthash/checksum.pb.go +++ /dev/null @@ -1,755 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: checksum.proto - -/* - Package contenthash is a generated protocol buffer package. - - It is generated from these files: - checksum.proto - - It has these top-level messages: - CacheRecord - CacheRecordWithPath - CacheRecords -*/ -package contenthash - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" -import _ "github.com/gogo/protobuf/gogoproto" - -import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -type CacheRecordType int32 - -const ( - CacheRecordTypeFile CacheRecordType = 0 - CacheRecordTypeDir CacheRecordType = 1 - CacheRecordTypeDirHeader CacheRecordType = 2 - CacheRecordTypeSymlink CacheRecordType = 3 -) - -var CacheRecordType_name = map[int32]string{ - 0: "FILE", - 1: "DIR", - 2: "DIR_HEADER", - 3: "SYMLINK", -} -var CacheRecordType_value = map[string]int32{ - "FILE": 0, - "DIR": 1, - "DIR_HEADER": 2, - "SYMLINK": 3, -} - -func (x CacheRecordType) String() string { - return proto.EnumName(CacheRecordType_name, int32(x)) -} -func (CacheRecordType) EnumDescriptor() ([]byte, []int) { return fileDescriptorChecksum, []int{0} } - -type CacheRecord struct { - Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` - Type CacheRecordType `protobuf:"varint,2,opt,name=type,proto3,enum=contenthash.CacheRecordType" json:"type,omitempty"` - Linkname string `protobuf:"bytes,3,opt,name=linkname,proto3" json:"linkname,omitempty"` -} - -func (m *CacheRecord) Reset() { *m = CacheRecord{} } -func (m *CacheRecord) String() string { return proto.CompactTextString(m) } -func (*CacheRecord) ProtoMessage() {} -func (*CacheRecord) Descriptor() ([]byte, []int) { return fileDescriptorChecksum, []int{0} } - -func (m *CacheRecord) GetType() CacheRecordType { - if m != nil { - return m.Type - } - return CacheRecordTypeFile -} - -func (m *CacheRecord) GetLinkname() string { - if m != nil { - return m.Linkname - } - return "" -} - -type CacheRecordWithPath struct { - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` - Record *CacheRecord `protobuf:"bytes,2,opt,name=record" json:"record,omitempty"` -} - -func (m *CacheRecordWithPath) Reset() { *m = CacheRecordWithPath{} } -func (m *CacheRecordWithPath) String() string { return proto.CompactTextString(m) } -func (*CacheRecordWithPath) ProtoMessage() {} -func (*CacheRecordWithPath) Descriptor() ([]byte, []int) { return fileDescriptorChecksum, []int{1} } - -func (m *CacheRecordWithPath) GetPath() string { - if m != nil { - return m.Path - } - return "" -} - -func (m *CacheRecordWithPath) GetRecord() *CacheRecord { - if m != nil { - return m.Record - } - return nil -} - -type CacheRecords struct { - Paths []*CacheRecordWithPath `protobuf:"bytes,1,rep,name=paths" json:"paths,omitempty"` -} - -func (m *CacheRecords) Reset() { *m = CacheRecords{} } -func (m *CacheRecords) String() string { return proto.CompactTextString(m) } -func (*CacheRecords) ProtoMessage() {} -func (*CacheRecords) Descriptor() ([]byte, []int) { return fileDescriptorChecksum, []int{2} } - -func (m *CacheRecords) GetPaths() []*CacheRecordWithPath { - if m != nil { - return m.Paths - } - return nil -} - -func init() { - proto.RegisterType((*CacheRecord)(nil), "contenthash.CacheRecord") - proto.RegisterType((*CacheRecordWithPath)(nil), "contenthash.CacheRecordWithPath") - proto.RegisterType((*CacheRecords)(nil), "contenthash.CacheRecords") - proto.RegisterEnum("contenthash.CacheRecordType", CacheRecordType_name, CacheRecordType_value) -} -func (m *CacheRecord) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CacheRecord) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Digest) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintChecksum(dAtA, i, uint64(len(m.Digest))) - i += copy(dAtA[i:], m.Digest) - } - if m.Type != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintChecksum(dAtA, i, uint64(m.Type)) - } - if len(m.Linkname) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintChecksum(dAtA, i, uint64(len(m.Linkname))) - i += copy(dAtA[i:], m.Linkname) - } - return i, nil -} - -func (m *CacheRecordWithPath) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CacheRecordWithPath) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Path) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintChecksum(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - } - if m.Record != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintChecksum(dAtA, i, uint64(m.Record.Size())) - n1, err := m.Record.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - } - return i, nil -} - -func (m *CacheRecords) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CacheRecords) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Paths) > 0 { - for _, msg := range m.Paths { - dAtA[i] = 0xa - i++ - i = encodeVarintChecksum(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func encodeVarintChecksum(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *CacheRecord) Size() (n int) { - var l int - _ = l - l = len(m.Digest) - if l > 0 { - n += 1 + l + sovChecksum(uint64(l)) - } - if m.Type != 0 { - n += 1 + sovChecksum(uint64(m.Type)) - } - l = len(m.Linkname) - if l > 0 { - n += 1 + l + sovChecksum(uint64(l)) - } - return n -} - -func (m *CacheRecordWithPath) Size() (n int) { - var l int - _ = l - l = len(m.Path) - if l > 0 { - n += 1 + l + sovChecksum(uint64(l)) - } - if m.Record != nil { - l = m.Record.Size() - n += 1 + l + sovChecksum(uint64(l)) - } - return n -} - -func (m *CacheRecords) Size() (n int) { - var l int - _ = l - if len(m.Paths) > 0 { - for _, e := range m.Paths { - l = e.Size() - n += 1 + l + sovChecksum(uint64(l)) - } - } - return n -} - -func sovChecksum(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozChecksum(x uint64) (n int) { - return sovChecksum(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *CacheRecord) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowChecksum - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CacheRecord: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CacheRecord: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowChecksum - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthChecksum - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowChecksum - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Type |= (CacheRecordType(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Linkname", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowChecksum - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthChecksum - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Linkname = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipChecksum(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthChecksum - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CacheRecordWithPath) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowChecksum - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CacheRecordWithPath: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CacheRecordWithPath: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowChecksum - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthChecksum - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Record", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowChecksum - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthChecksum - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Record == nil { - m.Record = &CacheRecord{} - } - if err := m.Record.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipChecksum(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthChecksum - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CacheRecords) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowChecksum - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CacheRecords: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CacheRecords: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowChecksum - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthChecksum - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Paths = append(m.Paths, &CacheRecordWithPath{}) - if err := m.Paths[len(m.Paths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipChecksum(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthChecksum - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipChecksum(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowChecksum - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowChecksum - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowChecksum - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthChecksum - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowChecksum - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipChecksum(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthChecksum = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowChecksum = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("checksum.proto", fileDescriptorChecksum) } - -var fileDescriptorChecksum = []byte{ - // 418 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xc1, 0x6a, 0xd4, 0x40, - 0x18, 0xc7, 0x77, 0xba, 0xeb, 0xaa, 0xdf, 0x4a, 0x0d, 0x53, 0x68, 0xc3, 0x50, 0xb2, 0xe3, 0x5e, - 0x5c, 0x8a, 0xcd, 0x96, 0x08, 0xde, 0xad, 0xd9, 0xa5, 0xd1, 0x2a, 0x32, 0x15, 0x44, 0x3c, 0x48, - 0x36, 0x3b, 0x66, 0x42, 0x9b, 0x4c, 0x48, 0x66, 0x0f, 0xfb, 0x06, 0x92, 0x93, 0x2f, 0x90, 0x93, - 0x82, 0xef, 0xe0, 0x5d, 0xe8, 0xd1, 0xb3, 0x87, 0x22, 0xeb, 0x8b, 0x48, 0x26, 0x55, 0x42, 0xca, - 0x9e, 0xe6, 0xfb, 0x66, 0x7e, 0xdf, 0xff, 0xff, 0x9f, 0x61, 0x60, 0x3b, 0x10, 0x3c, 0x38, 0xcf, - 0x97, 0xb1, 0x9d, 0x66, 0x52, 0x49, 0x3c, 0x08, 0x64, 0xa2, 0x78, 0xa2, 0x84, 0x9f, 0x0b, 0x72, - 0x18, 0x46, 0x4a, 0x2c, 0xe7, 0x76, 0x20, 0xe3, 0x49, 0x28, 0x43, 0x39, 0xd1, 0xcc, 0x7c, 0xf9, - 0x51, 0x77, 0xba, 0xd1, 0x55, 0x3d, 0x3b, 0xfa, 0x86, 0x60, 0xf0, 0xcc, 0x0f, 0x04, 0x67, 0x3c, - 0x90, 0xd9, 0x02, 0x3f, 0x87, 0xfe, 0x22, 0x0a, 0x79, 0xae, 0x4c, 0x44, 0xd1, 0xf8, 0xee, 0xb1, - 0x73, 0x79, 0x35, 0xec, 0xfc, 0xba, 0x1a, 0x1e, 0x34, 0x64, 0x65, 0xca, 0x93, 0xca, 0xd2, 0x8f, - 0x12, 0x9e, 0xe5, 0x93, 0x50, 0x1e, 0xd6, 0x23, 0xb6, 0xab, 0x17, 0x76, 0xad, 0x80, 0x8f, 0xa0, - 0xa7, 0x56, 0x29, 0x37, 0xb7, 0x28, 0x1a, 0x6f, 0x3b, 0xfb, 0x76, 0x23, 0xa6, 0xdd, 0xf0, 0x7c, - 0xb3, 0x4a, 0x39, 0xd3, 0x24, 0x26, 0x70, 0xe7, 0x22, 0x4a, 0xce, 0x13, 0x3f, 0xe6, 0x66, 0xb7, - 0xf2, 0x67, 0xff, 0xfb, 0xd1, 0x7b, 0xd8, 0x69, 0x0c, 0xbd, 0x8d, 0x94, 0x78, 0xed, 0x2b, 0x81, - 0x31, 0xf4, 0x52, 0x5f, 0x89, 0x3a, 0x2e, 0xd3, 0x35, 0x3e, 0x82, 0x7e, 0xa6, 0x29, 0x6d, 0x3d, - 0x70, 0xcc, 0x4d, 0xd6, 0xec, 0x9a, 0x1b, 0xcd, 0xe0, 0x5e, 0x63, 0x3b, 0xc7, 0x4f, 0xe0, 0x56, - 0xa5, 0x94, 0x9b, 0x88, 0x76, 0xc7, 0x03, 0x87, 0x6e, 0x12, 0xf8, 0x17, 0x83, 0xd5, 0xf8, 0xc1, - 0x0f, 0x04, 0xf7, 0x5b, 0x57, 0xc3, 0x0f, 0xa0, 0x37, 0xf3, 0x4e, 0xa7, 0x46, 0x87, 0xec, 0x15, - 0x25, 0xdd, 0x69, 0x1d, 0xcf, 0xa2, 0x0b, 0x8e, 0x87, 0xd0, 0x75, 0x3d, 0x66, 0x20, 0xb2, 0x5b, - 0x94, 0x14, 0xb7, 0x08, 0x37, 0xca, 0xf0, 0x23, 0x00, 0xd7, 0x63, 0x1f, 0x4e, 0xa6, 0x4f, 0xdd, - 0x29, 0x33, 0xb6, 0xc8, 0x7e, 0x51, 0x52, 0xf3, 0x26, 0x77, 0xc2, 0xfd, 0x05, 0xcf, 0xf0, 0x43, - 0xb8, 0x7d, 0xf6, 0xee, 0xe5, 0xa9, 0xf7, 0xea, 0x85, 0xd1, 0x25, 0xa4, 0x28, 0xe9, 0x6e, 0x0b, - 0x3d, 0x5b, 0xc5, 0xd5, 0xbb, 0x92, 0xbd, 0x4f, 0x5f, 0xac, 0xce, 0xf7, 0xaf, 0x56, 0x3b, 0xf3, - 0xb1, 0x71, 0xb9, 0xb6, 0xd0, 0xcf, 0xb5, 0x85, 0x7e, 0xaf, 0x2d, 0xf4, 0xf9, 0x8f, 0xd5, 0x99, - 0xf7, 0xf5, 0x7f, 0x79, 0xfc, 0x37, 0x00, 0x00, 0xff, 0xff, 0x55, 0xf2, 0x2e, 0x06, 0x7d, 0x02, - 0x00, 0x00, -} diff --git a/vendor/github.com/moby/buildkit/cache/contenthash/checksum.proto b/vendor/github.com/moby/buildkit/cache/contenthash/checksum.proto deleted file mode 100644 index d6e524ea7ca2..000000000000 --- a/vendor/github.com/moby/buildkit/cache/contenthash/checksum.proto +++ /dev/null @@ -1,30 +0,0 @@ -syntax = "proto3"; - -package contenthash; - -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; - -enum CacheRecordType { - option (gogoproto.goproto_enum_prefix) = false; - option (gogoproto.enum_customname) = "CacheRecordType"; - - FILE = 0 [(gogoproto.enumvalue_customname) = "CacheRecordTypeFile"]; - DIR = 1 [(gogoproto.enumvalue_customname) = "CacheRecordTypeDir"]; - DIR_HEADER = 2 [(gogoproto.enumvalue_customname) = "CacheRecordTypeDirHeader"]; - SYMLINK = 3 [(gogoproto.enumvalue_customname) = "CacheRecordTypeSymlink"]; -} - -message CacheRecord { - string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; - CacheRecordType type = 2; - string linkname = 3; -} - -message CacheRecordWithPath { - string path = 1; - CacheRecord record = 2; -} - -message CacheRecords { - repeated CacheRecordWithPath paths = 1; -} \ No newline at end of file diff --git a/vendor/github.com/moby/buildkit/cache/contenthash/checksum_test.go b/vendor/github.com/moby/buildkit/cache/contenthash/checksum_test.go deleted file mode 100644 index 034e74694457..000000000000 --- a/vendor/github.com/moby/buildkit/cache/contenthash/checksum_test.go +++ /dev/null @@ -1,739 +0,0 @@ -package contenthash - -import ( - "context" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "testing" - "time" - - "github.com/containerd/containerd/snapshots" - "github.com/containerd/containerd/snapshots/native" - "github.com/moby/buildkit/cache" - "github.com/moby/buildkit/cache/metadata" - "github.com/moby/buildkit/snapshot" - digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/stretchr/testify/require" - "github.com/tonistiigi/fsutil" - fstypes "github.com/tonistiigi/fsutil/types" -) - -const ( - dgstFileData0 = digest.Digest("sha256:cd8e75bca50f2d695f220d0cb0997d8ead387e4f926e8669a92d7f104cc9885b") - dgstDirD0 = digest.Digest("sha256:d47454417d2c554067fbefe5f5719edc49f3cfe969c36b62e34a187a4da0cc9a") - dgstDirD0Modified = digest.Digest("sha256:555ffa3028630d97ba37832b749eda85ab676fd64ffb629fbf0f4ec8c1e3bff1") -) - -func TestChecksumBasicFile(t *testing.T) { - t.Parallel() - tmpdir, err := ioutil.TempDir("", "buildkit-state") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) - - snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) - require.NoError(t, err) - cm := setupCacheManager(t, tmpdir, snapshotter) - defer cm.Close() - - ch := []string{ - "ADD foo file data0", - "ADD bar file data1", - "ADD d0 dir", - "ADD d0/abc file data0", - "ADD d0/def symlink abc", - "ADD d0/ghi symlink nosuchfile", - } - - ref := createRef(t, cm, ch) - - // for the digest values, the actual values are not important in development - // phase but consistency is - - cc, err := newCacheContext(ref.Metadata()) - require.NoError(t, err) - - _, err = cc.Checksum(context.TODO(), ref, "nosuch") - require.Error(t, err) - - dgst, err := cc.Checksum(context.TODO(), ref, "foo") - require.NoError(t, err) - - require.Equal(t, dgstFileData0, dgst) - - // second file returns different hash - dgst, err = cc.Checksum(context.TODO(), ref, "bar") - require.NoError(t, err) - - require.Equal(t, digest.Digest("sha256:c2b5e234f5f38fc5864da7def04782f82501a40d46192e4207d5b3f0c3c4732b"), dgst) - - // same file inside a directory - dgst, err = cc.Checksum(context.TODO(), ref, "d0/abc") - require.NoError(t, err) - - require.Equal(t, dgstFileData0, dgst) - - // repeat because codepath is different - dgst, err = cc.Checksum(context.TODO(), ref, "d0/abc") - require.NoError(t, err) - - require.Equal(t, dgstFileData0, dgst) - - // symlink to the same file is followed, returns same hash - dgst, err = cc.Checksum(context.TODO(), ref, "d0/def") - require.NoError(t, err) - - require.Equal(t, dgstFileData0, dgst) - - _, err = cc.Checksum(context.TODO(), ref, "d0/ghi") - require.Error(t, err) - require.Equal(t, errNotFound, errors.Cause(err)) - - dgst, err = cc.Checksum(context.TODO(), ref, "/") - require.NoError(t, err) - - require.Equal(t, digest.Digest("sha256:427c9cf9ae98c0f81fb57a3076b965c7c149b6b0a85625ad4e884236649a42c6"), dgst) - - dgst, err = cc.Checksum(context.TODO(), ref, "d0") - require.NoError(t, err) - - require.Equal(t, dgstDirD0, dgst) - - err = ref.Release(context.TODO()) - require.NoError(t, err) - - // this is same directory as previous d0 - ch = []string{ - "ADD abc file data0", - "ADD def symlink abc", - "ADD ghi symlink nosuchfile", - } - - ref = createRef(t, cm, ch) - - cc, err = newCacheContext(ref.Metadata()) - require.NoError(t, err) - - dgst, err = cc.Checksum(context.TODO(), ref, "/") - require.NoError(t, err) - - require.Equal(t, dgstDirD0, dgst) - - err = ref.Release(context.TODO()) - require.NoError(t, err) - - // test that removing broken symlink changes hash even though symlink itself can't be checksummed - ch = []string{ - "ADD abc file data0", - "ADD def symlink abc", - } - - ref = createRef(t, cm, ch) - - cc, err = newCacheContext(ref.Metadata()) - require.NoError(t, err) - - dgst, err = cc.Checksum(context.TODO(), ref, "/") - require.NoError(t, err) - - require.Equal(t, dgstDirD0Modified, dgst) - require.NotEqual(t, dgstDirD0, dgst) - - err = ref.Release(context.TODO()) - require.NoError(t, err) - - // test multiple scans, get checksum of nested file first - - ch = []string{ - "ADD abc dir", - "ADD abc/aa dir", - "ADD abc/aa/foo file data2", - "ADD d0 dir", - "ADD d0/abc file data0", - "ADD d0/def symlink abc", - "ADD d0/ghi symlink nosuchfile", - } - - ref = createRef(t, cm, ch) - - cc, err = newCacheContext(ref.Metadata()) - require.NoError(t, err) - - dgst, err = cc.Checksum(context.TODO(), ref, "abc/aa/foo") - require.NoError(t, err) - - require.Equal(t, digest.Digest("sha256:1c67653c3cf95b12a0014e2c4cd1d776b474b3218aee54155d6ae27b9b999c54"), dgst) - require.NotEqual(t, dgstDirD0, dgst) - - // this will force rescan - dgst, err = cc.Checksum(context.TODO(), ref, "d0") - require.NoError(t, err) - - require.Equal(t, dgstDirD0, dgst) - - err = ref.Release(context.TODO()) - require.NoError(t, err) -} - -func TestHandleChange(t *testing.T) { - t.Parallel() - tmpdir, err := ioutil.TempDir("", "buildkit-state") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) - - snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) - require.NoError(t, err) - cm := setupCacheManager(t, tmpdir, snapshotter) - defer cm.Close() - - ch := []string{ - "ADD foo file data0", - "ADD bar file data1", - "ADD d0 dir", - "ADD d0/abc file data0", - "ADD d0/def symlink abc", - "ADD d0/ghi symlink nosuchfile", - } - - ref := createRef(t, cm, nil) - - // for the digest values, the actual values are not important in development - // phase but consistency is - - cc, err := newCacheContext(ref.Metadata()) - require.NoError(t, err) - - err = emit(cc.HandleChange, changeStream(ch)) - require.NoError(t, err) - - dgstFoo, err := cc.Checksum(context.TODO(), ref, "foo") - require.NoError(t, err) - - require.Equal(t, dgstFileData0, dgstFoo) - - // symlink to the same file is followed, returns same hash - dgst, err := cc.Checksum(context.TODO(), ref, "d0/def") - require.NoError(t, err) - - require.Equal(t, dgstFoo, dgst) - - // symlink to the same file is followed, returns same hash - dgst, err = cc.Checksum(context.TODO(), ref, "d0") - require.NoError(t, err) - - require.Equal(t, dgstDirD0, dgst) - - ch = []string{ - "DEL d0/ghi file", - } - - err = emit(cc.HandleChange, changeStream(ch)) - require.NoError(t, err) - - dgst, err = cc.Checksum(context.TODO(), ref, "d0") - require.NoError(t, err) - require.Equal(t, dgstDirD0Modified, dgst) - - ch = []string{ - "DEL d0 dir", - } - - err = emit(cc.HandleChange, changeStream(ch)) - require.NoError(t, err) - - _, err = cc.Checksum(context.TODO(), ref, "d0") - require.Error(t, err) - require.Equal(t, errNotFound, errors.Cause(err)) - - _, err = cc.Checksum(context.TODO(), ref, "d0/abc") - require.Error(t, err) - require.Equal(t, errNotFound, errors.Cause(err)) - - err = ref.Release(context.TODO()) - require.NoError(t, err) -} - -func TestHandleRecursiveDir(t *testing.T) { - t.Parallel() - tmpdir, err := ioutil.TempDir("", "buildkit-state") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) - - snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) - require.NoError(t, err) - cm := setupCacheManager(t, tmpdir, snapshotter) - defer cm.Close() - - ch := []string{ - "ADD d0 dir", - "ADD d0/foo dir", - "ADD d0/foo/bar dir", - "ADD d0/foo/bar/foo file data0", - "ADD d0/foo/bar/bar file data1", - "ADD d1 dir", - "ADD d1/foo file data0", - } - - ref := createRef(t, cm, nil) - - cc, err := newCacheContext(ref.Metadata()) - require.NoError(t, err) - - err = emit(cc.HandleChange, changeStream(ch)) - require.NoError(t, err) - - dgst, err := cc.Checksum(context.TODO(), ref, "d0/foo/bar") - require.NoError(t, err) - - ch = []string{ - "DEL d0 dir", - "DEL d0/foo dir", // the differ can produce a record for subdir as well - "ADD d1/bar file data1", - } - - err = emit(cc.HandleChange, changeStream(ch)) - require.NoError(t, err) - - dgst2, err := cc.Checksum(context.TODO(), ref, "d1") - require.NoError(t, err) - require.Equal(t, dgst2, dgst) - - _, err = cc.Checksum(context.TODO(), ref, "") - require.NoError(t, err) -} - -func TestChecksumUnorderedFiles(t *testing.T) { - t.Parallel() - tmpdir, err := ioutil.TempDir("", "buildkit-state") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) - - snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) - require.NoError(t, err) - cm := setupCacheManager(t, tmpdir, snapshotter) - defer cm.Close() - - ch := []string{ - "ADD d0 dir", - "ADD d0/foo dir", - "ADD d0/foo/bar file data0", - "ADD d0/foo-subdir dir", - "ADD d0/foo.subdir file data1", - } - - ref := createRef(t, cm, nil) - - cc, err := newCacheContext(ref.Metadata()) - require.NoError(t, err) - - err = emit(cc.HandleChange, changeStream(ch)) - require.NoError(t, err) - - dgst, err := cc.Checksum(context.TODO(), ref, "d0") - require.NoError(t, err) - - require.Equal(t, dgst, digest.Digest("sha256:14276c302c940a80f82ca5477bf766c98a24702d6a9948ee71bb277cdad3ae05")) - - // check regression from earier version that didn't track some files - ch = []string{ - "ADD d0 dir", - "ADD d0/foo dir", - "ADD d0/foo/bar file data0", - } - - ref = createRef(t, cm, nil) - - cc, err = newCacheContext(ref.Metadata()) - require.NoError(t, err) - - err = emit(cc.HandleChange, changeStream(ch)) - require.NoError(t, err) - - dgst2, err := cc.Checksum(context.TODO(), ref, "d0") - require.NoError(t, err) - - require.NotEqual(t, dgst, dgst2) -} - -func TestSymlinkInPathScan(t *testing.T) { - t.Parallel() - tmpdir, err := ioutil.TempDir("", "buildkit-state") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) - - snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) - require.NoError(t, err) - cm := setupCacheManager(t, tmpdir, snapshotter) - defer cm.Close() - - ch := []string{ - "ADD d0 dir", - "ADD d0/sub dir", - "ADD d0/sub/foo file data0", - "ADD d0/def symlink sub", - } - ref := createRef(t, cm, ch) - - dgst, err := Checksum(context.TODO(), ref, "d0/def/foo") - require.NoError(t, err) - require.Equal(t, dgstFileData0, dgst) - - dgst, err = Checksum(context.TODO(), ref, "d0/def/foo") - require.NoError(t, err) - require.Equal(t, dgstFileData0, dgst) - - err = ref.Release(context.TODO()) - require.NoError(t, err) -} - -func TestSymlinkNeedsScan(t *testing.T) { - t.Parallel() - tmpdir, err := ioutil.TempDir("", "buildkit-state") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) - - snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) - require.NoError(t, err) - cm := setupCacheManager(t, tmpdir, snapshotter) - defer cm.Close() - - ch := []string{ - "ADD c0 dir", - "ADD c0/sub dir", - "ADD c0/sub/foo file data0", - "ADD d0 dir", - "ADD d0/d1 dir", - "ADD d0/d1/def symlink ../../c0/sub", - } - ref := createRef(t, cm, ch) - - // scan the d0 path containing the symlink that doesn't get followed - _, err = Checksum(context.TODO(), ref, "d0/d1") - require.NoError(t, err) - - dgst, err := Checksum(context.TODO(), ref, "d0/d1/def/foo") - require.NoError(t, err) - require.Equal(t, dgstFileData0, dgst) - - err = ref.Release(context.TODO()) - require.NoError(t, err) -} - -func TestSymlinkAbsDirSuffix(t *testing.T) { - t.Parallel() - tmpdir, err := ioutil.TempDir("", "buildkit-state") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) - - snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) - require.NoError(t, err) - cm := setupCacheManager(t, tmpdir, snapshotter) - defer cm.Close() - - ch := []string{ - "ADD c0 dir", - "ADD c0/sub dir", - "ADD c0/sub/foo file data0", - "ADD link symlink /c0/sub/", - } - ref := createRef(t, cm, ch) - - dgst, err := Checksum(context.TODO(), ref, "link/foo") - require.NoError(t, err) - require.Equal(t, dgstFileData0, dgst) - - err = ref.Release(context.TODO()) - require.NoError(t, err) -} - -func TestSymlinkInPathHandleChange(t *testing.T) { - t.Parallel() - tmpdir, err := ioutil.TempDir("", "buildkit-state") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) - - snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) - require.NoError(t, err) - cm := setupCacheManager(t, tmpdir, snapshotter) - defer cm.Close() - - ch := []string{ - "ADD d1 dir", - "ADD d1/sub dir", - "ADD d1/sub/foo file data0", - "ADD d1/sub/bar symlink /link", - "ADD d1/sub/baz symlink ../../../link", - "ADD d1/sub/bay symlink ../../../../link/.", // weird link - "ADD d1/def symlink sub", - "ADD sub dir", - "ADD sub/d0 dir", - "ADD sub/d0/abc file data0", - "ADD sub/d0/def symlink abc", - "ADD sub/d0/ghi symlink nosuchfile", - "ADD link symlink sub/d0", - } - - ref := createRef(t, cm, nil) - - cc, err := newCacheContext(ref.Metadata()) - require.NoError(t, err) - - err = emit(cc.HandleChange, changeStream(ch)) - require.NoError(t, err) - - dgst, err := cc.Checksum(context.TODO(), ref, "d1/def/foo") - require.NoError(t, err) - require.Equal(t, dgstFileData0, dgst) - - dgst, err = cc.Checksum(context.TODO(), ref, "d1/def/bar/abc") - require.NoError(t, err) - require.Equal(t, dgstFileData0, dgst) - - dgstFileData0, err := cc.Checksum(context.TODO(), ref, "sub/d0") - require.NoError(t, err) - require.Equal(t, dgstFileData0, dgstDirD0) - - dgstFileData0, err = cc.Checksum(context.TODO(), ref, "d1/def/baz") - require.NoError(t, err) - require.Equal(t, dgstFileData0, dgstDirD0) - - dgstFileData0, err = cc.Checksum(context.TODO(), ref, "d1/def/bay") - require.NoError(t, err) - require.Equal(t, dgstFileData0, dgstDirD0) - - dgstFileData0, err = cc.Checksum(context.TODO(), ref, "link") - require.NoError(t, err) - require.Equal(t, dgstFileData0, dgstDirD0) - - err = ref.Release(context.TODO()) - require.NoError(t, err) -} - -func TestPersistence(t *testing.T) { - t.Parallel() - tmpdir, err := ioutil.TempDir("", "buildkit-state") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) - - snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) - require.NoError(t, err) - cm := setupCacheManager(t, tmpdir, snapshotter) - defer cm.Close() - - ch := []string{ - "ADD foo file data0", - "ADD bar file data1", - "ADD d0 dir", - "ADD d0/abc file data0", - "ADD d0/def symlink abc", - "ADD d0/ghi symlink nosuchfile", - } - - ref := createRef(t, cm, ch) - id := ref.ID() - - dgst, err := Checksum(context.TODO(), ref, "foo") - require.NoError(t, err) - require.Equal(t, dgstFileData0, dgst) - - err = ref.Release(context.TODO()) - require.NoError(t, err) - - ref, err = cm.Get(context.TODO(), id) - require.NoError(t, err) - - dgst, err = Checksum(context.TODO(), ref, "foo") - require.NoError(t, err) - require.Equal(t, dgstFileData0, dgst) - - err = ref.Release(context.TODO()) - require.NoError(t, err) - - time.Sleep(100 * time.Millisecond) // saving happens on the background - - // we can't close snapshotter and open it twice (especially, its internal bbolt store) - cm.Close() - getDefaultManager().lru.Purge() - cm = setupCacheManager(t, tmpdir, snapshotter) - defer cm.Close() - - ref, err = cm.Get(context.TODO(), id) - require.NoError(t, err) - - dgst, err = Checksum(context.TODO(), ref, "foo") - require.NoError(t, err) - require.Equal(t, dgstFileData0, dgst) -} - -func createRef(t *testing.T, cm cache.Manager, files []string) cache.ImmutableRef { - mref, err := cm.New(context.TODO(), nil, cache.CachePolicyRetain) - require.NoError(t, err) - - mounts, err := mref.Mount(context.TODO(), false) - require.NoError(t, err) - - lm := snapshot.LocalMounter(mounts) - - mp, err := lm.Mount() - require.NoError(t, err) - - err = writeChanges(mp, changeStream(files)) - lm.Unmount() - require.NoError(t, err) - - ref, err := mref.Commit(context.TODO()) - require.NoError(t, err) - - return ref -} - -func setupCacheManager(t *testing.T, tmpdir string, snapshotter snapshots.Snapshotter) cache.Manager { - md, err := metadata.NewStore(filepath.Join(tmpdir, "metadata.db")) - require.NoError(t, err) - - cm, err := cache.NewManager(cache.ManagerOpt{ - Snapshotter: snapshot.FromContainerdSnapshotter(snapshotter), - MetadataStore: md, - }) - require.NoError(t, err) - - return cm -} - -// these test helpers are from tonistiigi/fsutil - -type change struct { - kind fsutil.ChangeKind - path string - fi os.FileInfo - data string -} - -func changeStream(dt []string) (changes []*change) { - for _, s := range dt { - changes = append(changes, parseChange(s)) - } - return -} - -func parseChange(str string) *change { - f := strings.Fields(str) - errStr := fmt.Sprintf("invalid change %q", str) - if len(f) < 3 { - panic(errStr) - } - c := &change{} - switch f[0] { - case "ADD": - c.kind = fsutil.ChangeKindAdd - case "CHG": - c.kind = fsutil.ChangeKindModify - case "DEL": - c.kind = fsutil.ChangeKindDelete - default: - panic(errStr) - } - c.path = f[1] - st := &fstypes.Stat{} - switch f[2] { - case "file": - if len(f) > 3 { - if f[3][0] == '>' { - st.Linkname = f[3][1:] - } else { - c.data = f[3] - st.Size_ = int64(len(f[3])) - } - } - st.Mode |= 0644 - case "dir": - st.Mode |= uint32(os.ModeDir) - st.Mode |= 0755 - case "symlink": - if len(f) < 4 { - panic(errStr) - } - st.Mode |= uint32(os.ModeSymlink) - st.Linkname = f[3] - st.Mode |= 0777 - } - - c.fi = &fsutil.StatInfo{Stat: st} - return c -} - -func emit(fn fsutil.HandleChangeFn, inp []*change) error { - for _, c := range inp { - stat, ok := c.fi.Sys().(*fstypes.Stat) - if !ok { - return errors.Errorf("invalid non-stat change %s", c.fi.Name()) - } - fi := c.fi - if c.kind != fsutil.ChangeKindDelete { - h, err := NewFromStat(stat) - if err != nil { - return err - } - if _, err := io.Copy(h, strings.NewReader(c.data)); err != nil { - return err - } - fi = &withHash{FileInfo: c.fi, digest: digest.NewDigest(digest.SHA256, h)} - } - if err := fn(c.kind, c.path, fi, nil); err != nil { - return err - } - } - return nil -} - -type withHash struct { - digest digest.Digest - os.FileInfo -} - -func (wh *withHash) Digest() digest.Digest { - return wh.digest -} - -func writeChanges(p string, inp []*change) error { - for _, c := range inp { - if c.kind == fsutil.ChangeKindAdd { - p := filepath.Join(p, c.path) - stat, ok := c.fi.Sys().(*fstypes.Stat) - if !ok { - return errors.Errorf("invalid non-stat change %s", p) - } - if c.fi.IsDir() { - // The snapshot root ('/') is always created with 0755. - // We use the same permission mode here. - if err := os.Mkdir(p, 0755); err != nil { - return err - } - } else if c.fi.Mode()&os.ModeSymlink != 0 { - if err := os.Symlink(stat.Linkname, p); err != nil { - return err - } - } else if len(stat.Linkname) > 0 { - if err := os.Link(filepath.Join(p, stat.Linkname), p); err != nil { - return err - } - } else { - f, err := os.Create(p) - if err != nil { - return err - } - if len(c.data) > 0 { - if _, err := f.Write([]byte(c.data)); err != nil { - return err - } - } - f.Close() - } - } - } - return nil -} diff --git a/vendor/github.com/moby/buildkit/cache/contenthash/filehash.go b/vendor/github.com/moby/buildkit/cache/contenthash/filehash.go deleted file mode 100644 index 84018e785226..000000000000 --- a/vendor/github.com/moby/buildkit/cache/contenthash/filehash.go +++ /dev/null @@ -1,98 +0,0 @@ -package contenthash - -import ( - "archive/tar" - "crypto/sha256" - "hash" - "os" - "path/filepath" - "time" - - fstypes "github.com/tonistiigi/fsutil/types" -) - -// NewFileHash returns new hash that is used for the builder cache keys -func NewFileHash(path string, fi os.FileInfo) (hash.Hash, error) { - var link string - if fi.Mode()&os.ModeSymlink != 0 { - var err error - link, err = os.Readlink(path) - if err != nil { - return nil, err - } - } - - stat := &fstypes.Stat{ - Mode: uint32(fi.Mode()), - Size_: fi.Size(), - ModTime: fi.ModTime().UnixNano(), - Linkname: link, - } - - if fi.Mode()&os.ModeSymlink != 0 { - stat.Mode = stat.Mode | 0777 - } - - if err := setUnixOpt(path, fi, stat); err != nil { - return nil, err - } - return NewFromStat(stat) -} - -func NewFromStat(stat *fstypes.Stat) (hash.Hash, error) { - fi := &statInfo{stat} - hdr, err := tar.FileInfoHeader(fi, stat.Linkname) - if err != nil { - return nil, err - } - hdr.Name = "" // note: empty name is different from current has in docker build. Name is added on recursive directory scan instead - hdr.Mode = int64(chmodWindowsTarEntry(os.FileMode(hdr.Mode))) - hdr.Devmajor = stat.Devmajor - hdr.Devminor = stat.Devminor - - if len(stat.Xattrs) > 0 { - hdr.Xattrs = make(map[string]string, len(stat.Xattrs)) - for k, v := range stat.Xattrs { - hdr.Xattrs[k] = string(v) - } - } - // fmt.Printf("hdr: %#v\n", hdr) - tsh := &tarsumHash{hdr: hdr, Hash: sha256.New()} - tsh.Reset() // initialize header - return tsh, nil -} - -type tarsumHash struct { - hash.Hash - hdr *tar.Header -} - -// Reset resets the Hash to its initial state. -func (tsh *tarsumHash) Reset() { - // comply with hash.Hash and reset to the state hash had before any writes - tsh.Hash.Reset() - WriteV1TarsumHeaders(tsh.hdr, tsh.Hash) -} - -type statInfo struct { - *fstypes.Stat -} - -func (s *statInfo) Name() string { - return filepath.Base(s.Stat.Path) -} -func (s *statInfo) Size() int64 { - return s.Stat.Size_ -} -func (s *statInfo) Mode() os.FileMode { - return os.FileMode(s.Stat.Mode) -} -func (s *statInfo) ModTime() time.Time { - return time.Unix(s.Stat.ModTime/1e9, s.Stat.ModTime%1e9) -} -func (s *statInfo) IsDir() bool { - return s.Mode().IsDir() -} -func (s *statInfo) Sys() interface{} { - return s.Stat -} diff --git a/vendor/github.com/moby/buildkit/cache/contenthash/filehash_unix.go b/vendor/github.com/moby/buildkit/cache/contenthash/filehash_unix.go deleted file mode 100644 index 4f610d772db6..000000000000 --- a/vendor/github.com/moby/buildkit/cache/contenthash/filehash_unix.go +++ /dev/null @@ -1,47 +0,0 @@ -// +build !windows - -package contenthash - -import ( - "os" - "syscall" - - "github.com/containerd/continuity/sysx" - fstypes "github.com/tonistiigi/fsutil/types" - - "golang.org/x/sys/unix" -) - -func chmodWindowsTarEntry(perm os.FileMode) os.FileMode { - return perm -} - -func setUnixOpt(path string, fi os.FileInfo, stat *fstypes.Stat) error { - s := fi.Sys().(*syscall.Stat_t) - - stat.Uid = s.Uid - stat.Gid = s.Gid - - if !fi.IsDir() { - if s.Mode&syscall.S_IFBLK != 0 || - s.Mode&syscall.S_IFCHR != 0 { - stat.Devmajor = int64(unix.Major(uint64(s.Rdev))) - stat.Devminor = int64(unix.Minor(uint64(s.Rdev))) - } - } - - attrs, err := sysx.LListxattr(path) - if err != nil { - return err - } - if len(attrs) > 0 { - stat.Xattrs = map[string][]byte{} - for _, attr := range attrs { - v, err := sysx.LGetxattr(path, attr) - if err == nil { - stat.Xattrs[attr] = v - } - } - } - return nil -} diff --git a/vendor/github.com/moby/buildkit/cache/contenthash/filehash_windows.go b/vendor/github.com/moby/buildkit/cache/contenthash/filehash_windows.go deleted file mode 100644 index e15bf1e5abd8..000000000000 --- a/vendor/github.com/moby/buildkit/cache/contenthash/filehash_windows.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build windows - -package contenthash - -import ( - "os" - - fstypes "github.com/tonistiigi/fsutil/types" -) - -// chmodWindowsTarEntry is used to adjust the file permissions used in tar -// header based on the platform the archival is done. -func chmodWindowsTarEntry(perm os.FileMode) os.FileMode { - perm &= 0755 - // Add the x bit: make everything +x from windows - perm |= 0111 - - return perm -} - -func setUnixOpt(path string, fi os.FileInfo, stat *fstypes.Stat) error { - return nil -} diff --git a/vendor/github.com/moby/buildkit/cache/contenthash/generate.go b/vendor/github.com/moby/buildkit/cache/contenthash/generate.go deleted file mode 100644 index e4bd2c50c0da..000000000000 --- a/vendor/github.com/moby/buildkit/cache/contenthash/generate.go +++ /dev/null @@ -1,3 +0,0 @@ -package contenthash - -//go:generate protoc -I=. -I=../../vendor/ --gogofaster_out=. checksum.proto diff --git a/vendor/github.com/moby/buildkit/cache/contenthash/path.go b/vendor/github.com/moby/buildkit/cache/contenthash/path.go deleted file mode 100644 index 1084da084a77..000000000000 --- a/vendor/github.com/moby/buildkit/cache/contenthash/path.go +++ /dev/null @@ -1,107 +0,0 @@ -package contenthash - -import ( - "errors" - "os" - "path/filepath" -) - -var ( - errTooManyLinks = errors.New("too many links") -) - -type onSymlinkFunc func(string, string) error - -// rootPath joins a path with a root, evaluating and bounding any -// symlink to the root directory. -// This is containerd/continuity/fs RootPath implementation with a callback on -// resolving the symlink. -func rootPath(root, path string, cb onSymlinkFunc) (string, error) { - if path == "" { - return root, nil - } - var linksWalked int // to protect against cycles - for { - i := linksWalked - newpath, err := walkLinks(root, path, &linksWalked, cb) - if err != nil { - return "", err - } - path = newpath - if i == linksWalked { - newpath = filepath.Join("/", newpath) - if path == newpath { - return filepath.Join(root, newpath), nil - } - path = newpath - } - } -} - -func walkLink(root, path string, linksWalked *int, cb onSymlinkFunc) (newpath string, islink bool, err error) { - if *linksWalked > 255 { - return "", false, errTooManyLinks - } - - path = filepath.Join("/", path) - if path == "/" { - return path, false, nil - } - realPath := filepath.Join(root, path) - - fi, err := os.Lstat(realPath) - if err != nil { - // If path does not yet exist, treat as non-symlink - if os.IsNotExist(err) { - return path, false, nil - } - return "", false, err - } - if fi.Mode()&os.ModeSymlink == 0 { - return path, false, nil - } - newpath, err = os.Readlink(realPath) - if err != nil { - return "", false, err - } - if cb != nil { - if err := cb(path, newpath); err != nil { - return "", false, err - } - } - *linksWalked++ - return newpath, true, nil -} - -func walkLinks(root, path string, linksWalked *int, cb onSymlinkFunc) (string, error) { - switch dir, file := filepath.Split(path); { - case dir == "": - newpath, _, err := walkLink(root, file, linksWalked, cb) - return newpath, err - case file == "": - if os.IsPathSeparator(dir[len(dir)-1]) { - if dir == "/" { - return dir, nil - } - return walkLinks(root, dir[:len(dir)-1], linksWalked, cb) - } - newpath, _, err := walkLink(root, dir, linksWalked, cb) - return newpath, err - default: - newdir, err := walkLinks(root, dir, linksWalked, cb) - if err != nil { - return "", err - } - newpath, islink, err := walkLink(root, filepath.Join(newdir, file), linksWalked, cb) - if err != nil { - return "", err - } - if !islink { - return newpath, nil - } - if filepath.IsAbs(newpath) { - return newpath, nil - } - return filepath.Join(newdir, newpath), nil - } -} diff --git a/vendor/github.com/moby/buildkit/cache/contenthash/tarsum.go b/vendor/github.com/moby/buildkit/cache/contenthash/tarsum.go deleted file mode 100644 index 601c41ecb924..000000000000 --- a/vendor/github.com/moby/buildkit/cache/contenthash/tarsum.go +++ /dev/null @@ -1,60 +0,0 @@ -package contenthash - -import ( - "archive/tar" - "io" - "sort" - "strconv" -) - -// WriteV1TarsumHeaders writes a tar header to a writer in V1 tarsum format. -func WriteV1TarsumHeaders(h *tar.Header, w io.Writer) { - for _, elem := range v1TarHeaderSelect(h) { - w.Write([]byte(elem[0] + elem[1])) - } -} - -// Functions below are from docker legacy tarsum implementation. -// There is no valid technical reason to continue using them. - -func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { - return [][2]string{ - {"name", h.Name}, - {"mode", strconv.FormatInt(h.Mode, 10)}, - {"uid", strconv.Itoa(h.Uid)}, - {"gid", strconv.Itoa(h.Gid)}, - {"size", strconv.FormatInt(h.Size, 10)}, - {"mtime", strconv.FormatInt(h.ModTime.UTC().Unix(), 10)}, - {"typeflag", string([]byte{h.Typeflag})}, - {"linkname", h.Linkname}, - {"uname", h.Uname}, - {"gname", h.Gname}, - {"devmajor", strconv.FormatInt(h.Devmajor, 10)}, - {"devminor", strconv.FormatInt(h.Devminor, 10)}, - } -} - -func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { - // Get extended attributes. - xAttrKeys := make([]string, len(h.Xattrs)) - for k := range h.Xattrs { - xAttrKeys = append(xAttrKeys, k) - } - sort.Strings(xAttrKeys) - - // Make the slice with enough capacity to hold the 11 basic headers - // we want from the v0 selector plus however many xattrs we have. - orderedHeaders = make([][2]string, 0, 11+len(xAttrKeys)) - - // Copy all headers from v0 excluding the 'mtime' header (the 5th element). - v0headers := v0TarHeaderSelect(h) - orderedHeaders = append(orderedHeaders, v0headers[0:5]...) - orderedHeaders = append(orderedHeaders, v0headers[6:]...) - - // Finally, append the sorted xattrs. - for _, k := range xAttrKeys { - orderedHeaders = append(orderedHeaders, [2]string{k, h.Xattrs[k]}) - } - - return -} diff --git a/vendor/github.com/moby/buildkit/cache/manager.go b/vendor/github.com/moby/buildkit/cache/manager.go deleted file mode 100644 index 948eb8df8b3a..000000000000 --- a/vendor/github.com/moby/buildkit/cache/manager.go +++ /dev/null @@ -1,863 +0,0 @@ -package cache - -import ( - "context" - "sort" - "sync" - "time" - - "github.com/containerd/containerd/filters" - "github.com/containerd/containerd/snapshots" - "github.com/moby/buildkit/cache/metadata" - "github.com/moby/buildkit/client" - "github.com/moby/buildkit/identity" - "github.com/moby/buildkit/snapshot" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/sync/errgroup" -) - -var ( - ErrLocked = errors.New("locked") - errNotFound = errors.New("not found") - errInvalid = errors.New("invalid") -) - -type ManagerOpt struct { - Snapshotter snapshot.SnapshotterBase - MetadataStore *metadata.Store - PruneRefChecker ExternalRefCheckerFunc -} - -type Accessor interface { - Get(ctx context.Context, id string, opts ...RefOption) (ImmutableRef, error) - GetFromSnapshotter(ctx context.Context, id string, opts ...RefOption) (ImmutableRef, error) - New(ctx context.Context, s ImmutableRef, opts ...RefOption) (MutableRef, error) - GetMutable(ctx context.Context, id string) (MutableRef, error) // Rebase? -} - -type Controller interface { - DiskUsage(ctx context.Context, info client.DiskUsageInfo) ([]*client.UsageInfo, error) - Prune(ctx context.Context, ch chan client.UsageInfo, info ...client.PruneInfo) error -} - -type Manager interface { - Accessor - Controller - Close() error -} - -type ExternalRefCheckerFunc func() (ExternalRefChecker, error) - -type ExternalRefChecker interface { - Exists(key string) bool -} - -type cacheManager struct { - records map[string]*cacheRecord - mu sync.Mutex - ManagerOpt - md *metadata.Store - - muPrune sync.Mutex // make sure parallel prune is not allowed so there will not be inconsistent results -} - -func NewManager(opt ManagerOpt) (Manager, error) { - cm := &cacheManager{ - ManagerOpt: opt, - md: opt.MetadataStore, - records: make(map[string]*cacheRecord), - } - - if err := cm.init(context.TODO()); err != nil { - return nil, err - } - - // cm.scheduleGC(5 * time.Minute) - - return cm, nil -} - -// init loads all snapshots from metadata state and tries to load the records -// from the snapshotter. If snaphot can't be found, metadata is deleted as well. -func (cm *cacheManager) init(ctx context.Context) error { - items, err := cm.md.All() - if err != nil { - return err - } - - for _, si := range items { - if _, err := cm.getRecord(ctx, si.ID(), false); err != nil { - logrus.Debugf("could not load snapshot %s: %v", si.ID(), err) - cm.md.Clear(si.ID()) - // TODO: make sure content is deleted as well - } - } - return nil -} - -// Close closes the manager and releases the metadata database lock. No other -// method should be called after Close. -func (cm *cacheManager) Close() error { - // TODO: allocate internal context and cancel it here - return cm.md.Close() -} - -// Get returns an immutable snapshot reference for ID -func (cm *cacheManager) Get(ctx context.Context, id string, opts ...RefOption) (ImmutableRef, error) { - cm.mu.Lock() - defer cm.mu.Unlock() - return cm.get(ctx, id, false, opts...) -} - -// Get returns an immutable snapshot reference for ID -func (cm *cacheManager) GetFromSnapshotter(ctx context.Context, id string, opts ...RefOption) (ImmutableRef, error) { - cm.mu.Lock() - defer cm.mu.Unlock() - return cm.get(ctx, id, true, opts...) -} - -// get requires manager lock to be taken -func (cm *cacheManager) get(ctx context.Context, id string, fromSnapshotter bool, opts ...RefOption) (ImmutableRef, error) { - rec, err := cm.getRecord(ctx, id, fromSnapshotter, opts...) - if err != nil { - return nil, err - } - rec.mu.Lock() - defer rec.mu.Unlock() - - triggerUpdate := true - for _, o := range opts { - if o == NoUpdateLastUsed { - triggerUpdate = false - } - } - - if rec.mutable { - if len(rec.refs) != 0 { - return nil, errors.Wrapf(ErrLocked, "%s is locked", id) - } - if rec.equalImmutable != nil { - return rec.equalImmutable.ref(triggerUpdate), nil - } - return rec.mref(triggerUpdate).commit(ctx) - } - - return rec.ref(triggerUpdate), nil -} - -// getRecord returns record for id. Requires manager lock. -func (cm *cacheManager) getRecord(ctx context.Context, id string, fromSnapshotter bool, opts ...RefOption) (cr *cacheRecord, retErr error) { - if rec, ok := cm.records[id]; ok { - if rec.isDead() { - return nil, errNotFound - } - return rec, nil - } - - md, ok := cm.md.Get(id) - if !ok && !fromSnapshotter { - return nil, errNotFound - } - if mutableID := getEqualMutable(md); mutableID != "" { - mutable, err := cm.getRecord(ctx, mutableID, fromSnapshotter) - if err != nil { - // check loading mutable deleted record from disk - if errors.Cause(err) == errNotFound { - cm.md.Clear(id) - } - return nil, err - } - rec := &cacheRecord{ - mu: &sync.Mutex{}, - cm: cm, - refs: make(map[ref]struct{}), - parent: mutable.parentRef(false), - md: md, - equalMutable: &mutableRef{cacheRecord: mutable}, - } - mutable.equalImmutable = &immutableRef{cacheRecord: rec} - cm.records[id] = rec - return rec, nil - } - - info, err := cm.Snapshotter.Stat(ctx, id) - if err != nil { - return nil, errors.Wrap(errNotFound, err.Error()) - } - - var parent ImmutableRef - if info.Parent != "" { - parent, err = cm.get(ctx, info.Parent, fromSnapshotter, append(opts, NoUpdateLastUsed)...) - if err != nil { - return nil, err - } - defer func() { - if retErr != nil { - parent.Release(context.TODO()) - } - }() - } - - rec := &cacheRecord{ - mu: &sync.Mutex{}, - mutable: info.Kind != snapshots.KindCommitted, - cm: cm, - refs: make(map[ref]struct{}), - parent: parent, - md: md, - } - - // the record was deleted but we crashed before data on disk was removed - if getDeleted(md) { - if err := rec.remove(ctx, true); err != nil { - return nil, err - } - return nil, errNotFound - } - - if err := initializeMetadata(rec, opts...); err != nil { - if parent != nil { - parent.Release(context.TODO()) - } - return nil, err - } - - cm.records[id] = rec - return rec, nil -} - -func (cm *cacheManager) New(ctx context.Context, s ImmutableRef, opts ...RefOption) (MutableRef, error) { - id := identity.NewID() - - var parent ImmutableRef - var parentID string - if s != nil { - var err error - parent, err = cm.Get(ctx, s.ID(), NoUpdateLastUsed) - if err != nil { - return nil, err - } - if err := parent.Finalize(ctx, true); err != nil { - return nil, err - } - parentID = parent.ID() - } - - if err := cm.Snapshotter.Prepare(ctx, id, parentID); err != nil { - if parent != nil { - parent.Release(context.TODO()) - } - return nil, errors.Wrapf(err, "failed to prepare %s", id) - } - - md, _ := cm.md.Get(id) - - rec := &cacheRecord{ - mu: &sync.Mutex{}, - mutable: true, - cm: cm, - refs: make(map[ref]struct{}), - parent: parent, - md: md, - } - - if err := initializeMetadata(rec, opts...); err != nil { - if parent != nil { - parent.Release(context.TODO()) - } - return nil, err - } - - cm.mu.Lock() - defer cm.mu.Unlock() - - cm.records[id] = rec // TODO: save to db - - return rec.mref(true), nil -} -func (cm *cacheManager) GetMutable(ctx context.Context, id string) (MutableRef, error) { - cm.mu.Lock() - defer cm.mu.Unlock() - - rec, err := cm.getRecord(ctx, id, false) - if err != nil { - return nil, err - } - - rec.mu.Lock() - defer rec.mu.Unlock() - if !rec.mutable { - return nil, errors.Wrapf(errInvalid, "%s is not mutable", id) - } - - if len(rec.refs) != 0 { - return nil, errors.Wrapf(ErrLocked, "%s is locked", id) - } - - if rec.equalImmutable != nil { - if len(rec.equalImmutable.refs) != 0 { - return nil, errors.Wrapf(ErrLocked, "%s is locked", id) - } - delete(cm.records, rec.equalImmutable.ID()) - if err := rec.equalImmutable.remove(ctx, false); err != nil { - return nil, err - } - rec.equalImmutable = nil - } - - return rec.mref(true), nil -} - -func (cm *cacheManager) Prune(ctx context.Context, ch chan client.UsageInfo, opts ...client.PruneInfo) error { - cm.muPrune.Lock() - defer cm.muPrune.Unlock() - - for _, opt := range opts { - if err := cm.pruneOnce(ctx, ch, opt); err != nil { - return err - } - } - return nil -} - -func (cm *cacheManager) pruneOnce(ctx context.Context, ch chan client.UsageInfo, opt client.PruneInfo) error { - filter, err := filters.ParseAll(opt.Filter...) - if err != nil { - return err - } - - var check ExternalRefChecker - if f := cm.PruneRefChecker; f != nil && (!opt.All || len(opt.Filter) > 0) { - c, err := f() - if err != nil { - return err - } - check = c - } - - totalSize := int64(0) - if opt.KeepBytes != 0 { - du, err := cm.DiskUsage(ctx, client.DiskUsageInfo{}) - if err != nil { - return err - } - for _, ui := range du { - if check != nil { - if check.Exists(ui.ID) { - continue - } - } - totalSize += ui.Size - } - } - - return cm.prune(ctx, ch, pruneOpt{ - filter: filter, - all: opt.All, - checkShared: check, - keepDuration: opt.KeepDuration, - keepBytes: opt.KeepBytes, - totalSize: totalSize, - }) -} - -func (cm *cacheManager) prune(ctx context.Context, ch chan client.UsageInfo, opt pruneOpt) error { - var toDelete []*deleteRecord - - if opt.keepBytes != 0 && opt.totalSize < opt.keepBytes { - return nil - } - - cm.mu.Lock() - - gcMode := opt.keepBytes != 0 - cutOff := time.Now().Add(-opt.keepDuration) - - locked := map[*sync.Mutex]struct{}{} - - for _, cr := range cm.records { - if _, ok := locked[cr.mu]; ok { - continue - } - cr.mu.Lock() - - // ignore duplicates that share data - if cr.equalImmutable != nil && len(cr.equalImmutable.refs) > 0 || cr.equalMutable != nil && len(cr.refs) == 0 { - cr.mu.Unlock() - continue - } - - if cr.isDead() { - cr.mu.Unlock() - continue - } - - if len(cr.refs) == 0 { - recordType := GetRecordType(cr) - if recordType == "" { - recordType = client.UsageRecordTypeRegular - } - - shared := false - if opt.checkShared != nil { - shared = opt.checkShared.Exists(cr.ID()) - } - - if !opt.all { - if recordType == client.UsageRecordTypeInternal || recordType == client.UsageRecordTypeFrontend || shared { - cr.mu.Unlock() - continue - } - } - - c := &client.UsageInfo{ - ID: cr.ID(), - Mutable: cr.mutable, - RecordType: recordType, - Shared: shared, - } - - usageCount, lastUsedAt := getLastUsed(cr.md) - c.LastUsedAt = lastUsedAt - c.UsageCount = usageCount - - if opt.keepDuration != 0 { - if lastUsedAt != nil && lastUsedAt.After(cutOff) { - cr.mu.Unlock() - continue - } - } - - if opt.filter.Match(adaptUsageInfo(c)) { - toDelete = append(toDelete, &deleteRecord{ - cacheRecord: cr, - lastUsedAt: c.LastUsedAt, - usageCount: c.UsageCount, - }) - if !gcMode { - cr.dead = true - - // mark metadata as deleted in case we crash before cleanup finished - if err := setDeleted(cr.md); err != nil { - cr.mu.Unlock() - cm.mu.Unlock() - return err - } - } else { - locked[cr.mu] = struct{}{} - continue // leave the record locked - } - } - } - cr.mu.Unlock() - } - - if gcMode && len(toDelete) > 0 { - sortDeleteRecords(toDelete) - var err error - for i, cr := range toDelete { - // only remove single record at a time - if i == 0 { - cr.dead = true - err = setDeleted(cr.md) - } - cr.mu.Unlock() - } - if err != nil { - return err - } - toDelete = toDelete[:1] - } - - cm.mu.Unlock() - - if len(toDelete) == 0 { - return nil - } - - var err error - for _, cr := range toDelete { - cr.mu.Lock() - - usageCount, lastUsedAt := getLastUsed(cr.md) - - c := client.UsageInfo{ - ID: cr.ID(), - Mutable: cr.mutable, - InUse: len(cr.refs) > 0, - Size: getSize(cr.md), - CreatedAt: GetCreatedAt(cr.md), - Description: GetDescription(cr.md), - LastUsedAt: lastUsedAt, - UsageCount: usageCount, - } - - if cr.parent != nil { - c.Parent = cr.parent.ID() - } - if c.Size == sizeUnknown && cr.equalImmutable != nil { - c.Size = getSize(cr.equalImmutable.md) // benefit from DiskUsage calc - } - if c.Size == sizeUnknown { - cr.mu.Unlock() // all the non-prune modifications already protected by cr.dead - s, err := cr.Size(ctx) - if err != nil { - return err - } - c.Size = s - cr.mu.Lock() - } - - opt.totalSize -= c.Size - - if cr.equalImmutable != nil { - if err1 := cr.equalImmutable.remove(ctx, false); err == nil { - err = err1 - } - } - if err1 := cr.remove(ctx, true); err == nil { - err = err1 - } - - if err == nil && ch != nil { - ch <- c - } - cr.mu.Unlock() - } - if err != nil { - return err - } - - select { - case <-ctx.Done(): - return ctx.Err() - default: - return cm.prune(ctx, ch, opt) - } -} - -func (cm *cacheManager) markShared(m map[string]*cacheUsageInfo) error { - if cm.PruneRefChecker == nil { - return nil - } - c, err := cm.PruneRefChecker() - if err != nil { - return err - } - - var markAllParentsShared func(string) - markAllParentsShared = func(id string) { - if v, ok := m[id]; ok { - v.shared = true - if v.parent != "" { - markAllParentsShared(v.parent) - } - } - } - - for id := range m { - if m[id].shared { - continue - } - if b := c.Exists(id); b { - markAllParentsShared(id) - } - } - return nil -} - -type cacheUsageInfo struct { - refs int - parent string - size int64 - mutable bool - createdAt time.Time - usageCount int - lastUsedAt *time.Time - description string - doubleRef bool - recordType client.UsageRecordType - shared bool -} - -func (cm *cacheManager) DiskUsage(ctx context.Context, opt client.DiskUsageInfo) ([]*client.UsageInfo, error) { - filter, err := filters.ParseAll(opt.Filter...) - if err != nil { - return nil, err - } - - cm.mu.Lock() - - m := make(map[string]*cacheUsageInfo, len(cm.records)) - rescan := make(map[string]struct{}, len(cm.records)) - - for id, cr := range cm.records { - cr.mu.Lock() - // ignore duplicates that share data - if cr.equalImmutable != nil && len(cr.equalImmutable.refs) > 0 || cr.equalMutable != nil && len(cr.refs) == 0 { - cr.mu.Unlock() - continue - } - - usageCount, lastUsedAt := getLastUsed(cr.md) - c := &cacheUsageInfo{ - refs: len(cr.refs), - mutable: cr.mutable, - size: getSize(cr.md), - createdAt: GetCreatedAt(cr.md), - usageCount: usageCount, - lastUsedAt: lastUsedAt, - description: GetDescription(cr.md), - doubleRef: cr.equalImmutable != nil, - recordType: GetRecordType(cr), - } - if c.recordType == "" { - c.recordType = client.UsageRecordTypeRegular - } - if cr.parent != nil { - c.parent = cr.parent.ID() - } - if cr.mutable && c.refs > 0 { - c.size = 0 // size can not be determined because it is changing - } - m[id] = c - rescan[id] = struct{}{} - cr.mu.Unlock() - } - cm.mu.Unlock() - - for { - if len(rescan) == 0 { - break - } - for id := range rescan { - v := m[id] - if v.refs == 0 && v.parent != "" { - m[v.parent].refs-- - if v.doubleRef { - m[v.parent].refs-- - } - rescan[v.parent] = struct{}{} - } - delete(rescan, id) - } - } - - if err := cm.markShared(m); err != nil { - return nil, err - } - - var du []*client.UsageInfo - for id, cr := range m { - c := &client.UsageInfo{ - ID: id, - Mutable: cr.mutable, - InUse: cr.refs > 0, - Size: cr.size, - Parent: cr.parent, - CreatedAt: cr.createdAt, - Description: cr.description, - LastUsedAt: cr.lastUsedAt, - UsageCount: cr.usageCount, - RecordType: cr.recordType, - Shared: cr.shared, - } - if filter.Match(adaptUsageInfo(c)) { - du = append(du, c) - } - } - - eg, ctx := errgroup.WithContext(ctx) - - for _, d := range du { - if d.Size == sizeUnknown { - func(d *client.UsageInfo) { - eg.Go(func() error { - ref, err := cm.Get(ctx, d.ID, NoUpdateLastUsed) - if err != nil { - d.Size = 0 - return nil - } - s, err := ref.Size(ctx) - if err != nil { - return err - } - d.Size = s - return ref.Release(context.TODO()) - }) - }(d) - } - } - - if err := eg.Wait(); err != nil { - return du, err - } - - return du, nil -} - -func IsLocked(err error) bool { - return errors.Cause(err) == ErrLocked -} - -func IsNotFound(err error) bool { - return errors.Cause(err) == errNotFound -} - -type RefOption interface{} - -type cachePolicy int - -const ( - cachePolicyDefault cachePolicy = iota - cachePolicyRetain -) - -type withMetadata interface { - Metadata() *metadata.StorageItem -} - -type noUpdateLastUsed struct{} - -var NoUpdateLastUsed noUpdateLastUsed - -func HasCachePolicyRetain(m withMetadata) bool { - return getCachePolicy(m.Metadata()) == cachePolicyRetain -} - -func CachePolicyRetain(m withMetadata) error { - return queueCachePolicy(m.Metadata(), cachePolicyRetain) -} - -func WithDescription(descr string) RefOption { - return func(m withMetadata) error { - return queueDescription(m.Metadata(), descr) - } -} - -func WithRecordType(t client.UsageRecordType) RefOption { - return func(m withMetadata) error { - return queueRecordType(m.Metadata(), t) - } -} - -func WithCreationTime(tm time.Time) RefOption { - return func(m withMetadata) error { - return queueCreatedAt(m.Metadata(), tm) - } -} - -func initializeMetadata(m withMetadata, opts ...RefOption) error { - md := m.Metadata() - if tm := GetCreatedAt(md); !tm.IsZero() { - return nil - } - - if err := queueCreatedAt(md, time.Now()); err != nil { - return err - } - - for _, opt := range opts { - if fn, ok := opt.(func(withMetadata) error); ok { - if err := fn(m); err != nil { - return err - } - } - } - - return md.Commit() -} - -func adaptUsageInfo(info *client.UsageInfo) filters.Adaptor { - return filters.AdapterFunc(func(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } - - switch fieldpath[0] { - case "id": - return info.ID, info.ID != "" - case "parent": - return info.Parent, info.Parent != "" - case "description": - return info.Description, info.Description != "" - case "inuse": - return "", info.InUse - case "mutable": - return "", info.Mutable - case "immutable": - return "", !info.Mutable - case "type": - return string(info.RecordType), info.RecordType != "" - case "shared": - return "", info.Shared - case "private": - return "", !info.Shared - } - - // TODO: add int/datetime/bytes support for more fields - - return "", false - }) -} - -type pruneOpt struct { - filter filters.Filter - all bool - checkShared ExternalRefChecker - keepDuration time.Duration - keepBytes int64 - totalSize int64 -} - -type deleteRecord struct { - *cacheRecord - lastUsedAt *time.Time - usageCount int - lastUsedAtIndex int - usageCountIndex int -} - -func sortDeleteRecords(toDelete []*deleteRecord) { - sort.Slice(toDelete, func(i, j int) bool { - if toDelete[i].lastUsedAt == nil { - return true - } - if toDelete[j].lastUsedAt == nil { - return false - } - return toDelete[i].lastUsedAt.Before(*toDelete[j].lastUsedAt) - }) - - maxLastUsedIndex := 0 - var val time.Time - for _, v := range toDelete { - if v.lastUsedAt != nil && v.lastUsedAt.After(val) { - val = *v.lastUsedAt - maxLastUsedIndex++ - } - v.lastUsedAtIndex = maxLastUsedIndex - } - - sort.Slice(toDelete, func(i, j int) bool { - return toDelete[i].usageCount < toDelete[j].usageCount - }) - - maxUsageCountIndex := 0 - var count int - for _, v := range toDelete { - if v.usageCount != count { - count = v.usageCount - maxUsageCountIndex++ - } - v.usageCountIndex = maxUsageCountIndex - } - - sort.Slice(toDelete, func(i, j int) bool { - return float64(toDelete[i].lastUsedAtIndex)/float64(maxLastUsedIndex)+ - float64(toDelete[i].usageCountIndex)/float64(maxUsageCountIndex) < - float64(toDelete[j].lastUsedAtIndex)/float64(maxLastUsedIndex)+ - float64(toDelete[j].usageCountIndex)/float64(maxUsageCountIndex) - }) -} diff --git a/vendor/github.com/moby/buildkit/cache/manager_test.go b/vendor/github.com/moby/buildkit/cache/manager_test.go deleted file mode 100644 index 3468bce1111c..000000000000 --- a/vendor/github.com/moby/buildkit/cache/manager_test.go +++ /dev/null @@ -1,419 +0,0 @@ -package cache - -import ( - "context" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "testing" - - "github.com/containerd/containerd/namespaces" - "github.com/containerd/containerd/snapshots" - "github.com/containerd/containerd/snapshots/native" - "github.com/moby/buildkit/cache/metadata" - "github.com/moby/buildkit/client" - "github.com/moby/buildkit/snapshot" - "github.com/pkg/errors" - "github.com/stretchr/testify/require" -) - -func TestManager(t *testing.T) { - t.Parallel() - ctx := namespaces.WithNamespace(context.Background(), "buildkit-test") - - tmpdir, err := ioutil.TempDir("", "cachemanager") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) - - snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) - require.NoError(t, err) - cm := getCacheManager(t, tmpdir, snapshotter) - - _, err = cm.Get(ctx, "foobar") - require.Error(t, err) - - checkDiskUsage(ctx, t, cm, 0, 0) - - active, err := cm.New(ctx, nil, CachePolicyRetain) - require.NoError(t, err) - - m, err := active.Mount(ctx, false) - require.NoError(t, err) - - lm := snapshot.LocalMounter(m) - target, err := lm.Mount() - require.NoError(t, err) - - fi, err := os.Stat(target) - require.NoError(t, err) - require.Equal(t, fi.IsDir(), true) - - err = lm.Unmount() - require.NoError(t, err) - - _, err = cm.GetMutable(ctx, active.ID()) - require.Error(t, err) - require.Equal(t, ErrLocked, errors.Cause(err)) - - checkDiskUsage(ctx, t, cm, 1, 0) - - snap, err := active.Commit(ctx) - require.NoError(t, err) - - checkDiskUsage(ctx, t, cm, 1, 0) - - _, err = cm.GetMutable(ctx, active.ID()) - require.Error(t, err) - require.Equal(t, ErrLocked, errors.Cause(err)) - - err = snap.Release(ctx) - require.NoError(t, err) - - checkDiskUsage(ctx, t, cm, 0, 1) - - active, err = cm.GetMutable(ctx, active.ID()) - require.NoError(t, err) - - checkDiskUsage(ctx, t, cm, 1, 0) - - snap, err = active.Commit(ctx) - require.NoError(t, err) - - checkDiskUsage(ctx, t, cm, 1, 0) - - err = snap.Finalize(ctx, true) - require.NoError(t, err) - - err = snap.Release(ctx) - require.NoError(t, err) - - _, err = cm.GetMutable(ctx, active.ID()) - require.Error(t, err) - require.Equal(t, errNotFound, errors.Cause(err)) - - _, err = cm.GetMutable(ctx, snap.ID()) - require.Error(t, err) - require.Equal(t, errInvalid, errors.Cause(err)) - - snap, err = cm.Get(ctx, snap.ID()) - require.NoError(t, err) - - snap2, err := cm.Get(ctx, snap.ID()) - require.NoError(t, err) - - checkDiskUsage(ctx, t, cm, 1, 0) - - err = snap.Release(ctx) - require.NoError(t, err) - - active2, err := cm.New(ctx, snap2, CachePolicyRetain) - require.NoError(t, err) - - checkDiskUsage(ctx, t, cm, 2, 0) - - snap3, err := active2.Commit(ctx) - require.NoError(t, err) - - err = snap2.Release(ctx) - require.NoError(t, err) - - checkDiskUsage(ctx, t, cm, 2, 0) - - err = snap3.Release(ctx) - require.NoError(t, err) - - checkDiskUsage(ctx, t, cm, 0, 2) - - buf := pruneResultBuffer() - err = cm.Prune(ctx, buf.C, client.PruneInfo{}) - buf.close() - require.NoError(t, err) - - checkDiskUsage(ctx, t, cm, 0, 0) - - require.Equal(t, len(buf.all), 2) - - err = cm.Close() - require.NoError(t, err) - - dirs, err := ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots")) - require.NoError(t, err) - require.Equal(t, 0, len(dirs)) -} - -func TestPrune(t *testing.T) { - t.Parallel() - ctx := namespaces.WithNamespace(context.Background(), "buildkit-test") - - tmpdir, err := ioutil.TempDir("", "cachemanager") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) - - snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) - require.NoError(t, err) - cm := getCacheManager(t, tmpdir, snapshotter) - - active, err := cm.New(ctx, nil) - require.NoError(t, err) - - snap, err := active.Commit(ctx) - require.NoError(t, err) - - active, err = cm.New(ctx, snap, CachePolicyRetain) - require.NoError(t, err) - - snap2, err := active.Commit(ctx) - require.NoError(t, err) - - checkDiskUsage(ctx, t, cm, 2, 0) - - // prune with keeping refs does nothing - buf := pruneResultBuffer() - err = cm.Prune(ctx, buf.C, client.PruneInfo{}) - buf.close() - require.NoError(t, err) - - checkDiskUsage(ctx, t, cm, 2, 0) - require.Equal(t, len(buf.all), 0) - - dirs, err := ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots")) - require.NoError(t, err) - require.Equal(t, 2, len(dirs)) - - err = snap2.Release(ctx) - require.NoError(t, err) - - checkDiskUsage(ctx, t, cm, 1, 1) - - // prune with keeping single refs deletes one - buf = pruneResultBuffer() - err = cm.Prune(ctx, buf.C, client.PruneInfo{}) - buf.close() - require.NoError(t, err) - - checkDiskUsage(ctx, t, cm, 1, 0) - require.Equal(t, len(buf.all), 1) - - dirs, err = ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots")) - require.NoError(t, err) - require.Equal(t, 1, len(dirs)) - - err = snap.Release(ctx) - require.NoError(t, err) - - active, err = cm.New(ctx, snap, CachePolicyRetain) - require.NoError(t, err) - - snap2, err = active.Commit(ctx) - require.NoError(t, err) - - err = snap.Release(ctx) - require.NoError(t, err) - - checkDiskUsage(ctx, t, cm, 2, 0) - - // prune with parent released does nothing - buf = pruneResultBuffer() - err = cm.Prune(ctx, buf.C, client.PruneInfo{}) - buf.close() - require.NoError(t, err) - - checkDiskUsage(ctx, t, cm, 2, 0) - require.Equal(t, len(buf.all), 0) - - // releasing last reference - err = snap2.Release(ctx) - require.NoError(t, err) - checkDiskUsage(ctx, t, cm, 0, 2) - - buf = pruneResultBuffer() - err = cm.Prune(ctx, buf.C, client.PruneInfo{}) - buf.close() - require.NoError(t, err) - - checkDiskUsage(ctx, t, cm, 0, 0) - require.Equal(t, len(buf.all), 2) - - dirs, err = ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots")) - require.NoError(t, err) - require.Equal(t, 0, len(dirs)) -} - -func TestLazyCommit(t *testing.T) { - t.Parallel() - ctx := namespaces.WithNamespace(context.Background(), "buildkit-test") - - tmpdir, err := ioutil.TempDir("", "cachemanager") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) - - snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) - require.NoError(t, err) - cm := getCacheManager(t, tmpdir, snapshotter) - - active, err := cm.New(ctx, nil, CachePolicyRetain) - require.NoError(t, err) - - // after commit mutable is locked - snap, err := active.Commit(ctx) - require.NoError(t, err) - - _, err = cm.GetMutable(ctx, active.ID()) - require.Error(t, err) - require.Equal(t, ErrLocked, errors.Cause(err)) - - // immutable refs still work - snap2, err := cm.Get(ctx, snap.ID()) - require.NoError(t, err) - require.Equal(t, snap.ID(), snap2.ID()) - - err = snap.Release(ctx) - require.NoError(t, err) - - err = snap2.Release(ctx) - require.NoError(t, err) - - // immutable work after final release as well - snap, err = cm.Get(ctx, snap.ID()) - require.NoError(t, err) - require.Equal(t, snap.ID(), snap2.ID()) - - // active can't be get while immutable is held - _, err = cm.GetMutable(ctx, active.ID()) - require.Error(t, err) - require.Equal(t, ErrLocked, errors.Cause(err)) - - err = snap.Release(ctx) - require.NoError(t, err) - - // after release mutable becomes available again - active2, err := cm.GetMutable(ctx, active.ID()) - require.NoError(t, err) - require.Equal(t, active2.ID(), active.ID()) - - // because ref was took mutable old immutable are cleared - _, err = cm.Get(ctx, snap.ID()) - require.Error(t, err) - require.Equal(t, errNotFound, errors.Cause(err)) - - snap, err = active2.Commit(ctx) - require.NoError(t, err) - - // this time finalize commit - err = snap.Finalize(ctx, true) - require.NoError(t, err) - - err = snap.Release(ctx) - require.NoError(t, err) - - // mutable is gone after finalize - _, err = cm.GetMutable(ctx, active2.ID()) - require.Error(t, err) - require.Equal(t, errNotFound, errors.Cause(err)) - - // immutable still works - snap2, err = cm.Get(ctx, snap.ID()) - require.NoError(t, err) - require.Equal(t, snap.ID(), snap2.ID()) - - err = snap2.Release(ctx) - require.NoError(t, err) - - // test restarting after commit - active, err = cm.New(ctx, nil, CachePolicyRetain) - require.NoError(t, err) - - // after commit mutable is locked - snap, err = active.Commit(ctx) - require.NoError(t, err) - - err = cm.Close() - require.NoError(t, err) - - // we can't close snapshotter and open it twice (especially, its internal bbolt store) - cm = getCacheManager(t, tmpdir, snapshotter) - - snap2, err = cm.Get(ctx, snap.ID()) - require.NoError(t, err) - - err = snap2.Release(ctx) - require.NoError(t, err) - - active, err = cm.GetMutable(ctx, active.ID()) - require.NoError(t, err) - - _, err = cm.Get(ctx, snap.ID()) - require.Error(t, err) - require.Equal(t, errNotFound, errors.Cause(err)) - - snap, err = active.Commit(ctx) - require.NoError(t, err) - - err = cm.Close() - require.NoError(t, err) - - cm = getCacheManager(t, tmpdir, snapshotter) - - snap2, err = cm.Get(ctx, snap.ID()) - require.NoError(t, err) - - err = snap2.Finalize(ctx, true) - require.NoError(t, err) - - err = snap2.Release(ctx) - require.NoError(t, err) - - active, err = cm.GetMutable(ctx, active.ID()) - require.Error(t, err) - require.Equal(t, errNotFound, errors.Cause(err)) -} - -func getCacheManager(t *testing.T, tmpdir string, snapshotter snapshots.Snapshotter) Manager { - md, err := metadata.NewStore(filepath.Join(tmpdir, "metadata.db")) - require.NoError(t, err) - - cm, err := NewManager(ManagerOpt{ - Snapshotter: snapshot.FromContainerdSnapshotter(snapshotter), - MetadataStore: md, - }) - require.NoError(t, err, fmt.Sprintf("error: %+v", err)) - return cm -} - -func checkDiskUsage(ctx context.Context, t *testing.T, cm Manager, inuse, unused int) { - du, err := cm.DiskUsage(ctx, client.DiskUsageInfo{}) - require.NoError(t, err) - var inuseActual, unusedActual int - for _, r := range du { - if r.InUse { - inuseActual++ - } else { - unusedActual++ - } - } - require.Equal(t, inuse, inuseActual) - require.Equal(t, unused, unusedActual) -} - -func pruneResultBuffer() *buf { - b := &buf{C: make(chan client.UsageInfo), closed: make(chan struct{})} - go func() { - for c := range b.C { - b.all = append(b.all, c) - } - close(b.closed) - }() - return b -} - -type buf struct { - C chan client.UsageInfo - closed chan struct{} - all []client.UsageInfo -} - -func (b *buf) close() { - close(b.C) - <-b.closed -} diff --git a/vendor/github.com/moby/buildkit/cache/metadata.go b/vendor/github.com/moby/buildkit/cache/metadata.go deleted file mode 100644 index 9929868844c9..000000000000 --- a/vendor/github.com/moby/buildkit/cache/metadata.go +++ /dev/null @@ -1,262 +0,0 @@ -package cache - -import ( - "time" - - "github.com/moby/buildkit/cache/metadata" - "github.com/moby/buildkit/client" - "github.com/pkg/errors" - bolt "go.etcd.io/bbolt" -) - -const sizeUnknown int64 = -1 -const keySize = "snapshot.size" -const keyEqualMutable = "cache.equalMutable" -const keyCachePolicy = "cache.cachePolicy" -const keyDescription = "cache.description" -const keyCreatedAt = "cache.createdAt" -const keyLastUsedAt = "cache.lastUsedAt" -const keyUsageCount = "cache.usageCount" -const keyLayerType = "cache.layerType" -const keyRecordType = "cache.recordType" - -const keyDeleted = "cache.deleted" - -func setDeleted(si *metadata.StorageItem) error { - v, err := metadata.NewValue(true) - if err != nil { - return errors.Wrap(err, "failed to create size value") - } - si.Update(func(b *bolt.Bucket) error { - return si.SetValue(b, keyDeleted, v) - }) - return nil -} - -func getDeleted(si *metadata.StorageItem) bool { - v := si.Get(keyDeleted) - if v == nil { - return false - } - var deleted bool - if err := v.Unmarshal(&deleted); err != nil { - return false - } - return deleted -} - -func setSize(si *metadata.StorageItem, s int64) error { - v, err := metadata.NewValue(s) - if err != nil { - return errors.Wrap(err, "failed to create size value") - } - si.Queue(func(b *bolt.Bucket) error { - return si.SetValue(b, keySize, v) - }) - return nil -} - -func getSize(si *metadata.StorageItem) int64 { - v := si.Get(keySize) - if v == nil { - return sizeUnknown - } - var size int64 - if err := v.Unmarshal(&size); err != nil { - return sizeUnknown - } - return size -} - -func getEqualMutable(si *metadata.StorageItem) string { - v := si.Get(keyEqualMutable) - if v == nil { - return "" - } - var str string - if err := v.Unmarshal(&str); err != nil { - return "" - } - return str -} - -func setEqualMutable(si *metadata.StorageItem, s string) error { - v, err := metadata.NewValue(s) - if err != nil { - return errors.Wrapf(err, "failed to create %s meta value", keyEqualMutable) - } - si.Queue(func(b *bolt.Bucket) error { - return si.SetValue(b, keyEqualMutable, v) - }) - return nil -} - -func clearEqualMutable(si *metadata.StorageItem) error { - si.Queue(func(b *bolt.Bucket) error { - return si.SetValue(b, keyEqualMutable, nil) - }) - return nil -} - -func queueCachePolicy(si *metadata.StorageItem, p cachePolicy) error { - v, err := metadata.NewValue(p) - if err != nil { - return errors.Wrap(err, "failed to create cachePolicy value") - } - si.Queue(func(b *bolt.Bucket) error { - return si.SetValue(b, keyCachePolicy, v) - }) - return nil -} - -func getCachePolicy(si *metadata.StorageItem) cachePolicy { - v := si.Get(keyCachePolicy) - if v == nil { - return cachePolicyDefault - } - var p cachePolicy - if err := v.Unmarshal(&p); err != nil { - return cachePolicyDefault - } - return p -} - -func queueDescription(si *metadata.StorageItem, descr string) error { - v, err := metadata.NewValue(descr) - if err != nil { - return errors.Wrap(err, "failed to create description value") - } - si.Queue(func(b *bolt.Bucket) error { - return si.SetValue(b, keyDescription, v) - }) - return nil -} - -func GetDescription(si *metadata.StorageItem) string { - v := si.Get(keyDescription) - if v == nil { - return "" - } - var str string - if err := v.Unmarshal(&str); err != nil { - return "" - } - return str -} - -func queueCreatedAt(si *metadata.StorageItem, tm time.Time) error { - v, err := metadata.NewValue(tm.UnixNano()) - if err != nil { - return errors.Wrap(err, "failed to create createdAt value") - } - si.Queue(func(b *bolt.Bucket) error { - return si.SetValue(b, keyCreatedAt, v) - }) - return nil -} - -func GetCreatedAt(si *metadata.StorageItem) time.Time { - v := si.Get(keyCreatedAt) - if v == nil { - return time.Time{} - } - var tm int64 - if err := v.Unmarshal(&tm); err != nil { - return time.Time{} - } - return time.Unix(tm/1e9, tm%1e9) -} - -func getLastUsed(si *metadata.StorageItem) (int, *time.Time) { - v := si.Get(keyUsageCount) - if v == nil { - return 0, nil - } - var usageCount int - if err := v.Unmarshal(&usageCount); err != nil { - return 0, nil - } - v = si.Get(keyLastUsedAt) - if v == nil { - return usageCount, nil - } - var lastUsedTs int64 - if err := v.Unmarshal(&lastUsedTs); err != nil || lastUsedTs == 0 { - return usageCount, nil - } - tm := time.Unix(lastUsedTs/1e9, lastUsedTs%1e9) - return usageCount, &tm -} - -func updateLastUsed(si *metadata.StorageItem) error { - count, _ := getLastUsed(si) - count++ - - v, err := metadata.NewValue(count) - if err != nil { - return errors.Wrap(err, "failed to create usageCount value") - } - v2, err := metadata.NewValue(time.Now().UnixNano()) - if err != nil { - return errors.Wrap(err, "failed to create lastUsedAt value") - } - return si.Update(func(b *bolt.Bucket) error { - if err := si.SetValue(b, keyUsageCount, v); err != nil { - return err - } - return si.SetValue(b, keyLastUsedAt, v2) - }) -} - -func SetLayerType(m withMetadata, value string) error { - v, err := metadata.NewValue(value) - if err != nil { - return errors.Wrap(err, "failed to create layertype value") - } - m.Metadata().Queue(func(b *bolt.Bucket) error { - return m.Metadata().SetValue(b, keyLayerType, v) - }) - return m.Metadata().Commit() -} - -func GetLayerType(m withMetadata) string { - v := m.Metadata().Get(keyLayerType) - if v == nil { - return "" - } - var str string - if err := v.Unmarshal(&str); err != nil { - return "" - } - return str -} - -func GetRecordType(m withMetadata) client.UsageRecordType { - v := m.Metadata().Get(keyRecordType) - if v == nil { - return "" - } - var str string - if err := v.Unmarshal(&str); err != nil { - return "" - } - return client.UsageRecordType(str) -} - -func SetRecordType(m withMetadata, value client.UsageRecordType) error { - if err := queueRecordType(m.Metadata(), value); err != nil { - return err - } - return m.Metadata().Commit() -} - -func queueRecordType(si *metadata.StorageItem, value client.UsageRecordType) error { - v, err := metadata.NewValue(value) - if err != nil { - return errors.Wrap(err, "failed to create recordtype value") - } - si.Queue(func(b *bolt.Bucket) error { - return si.SetValue(b, keyRecordType, v) - }) - return nil -} diff --git a/vendor/github.com/moby/buildkit/cache/metadata/metadata.go b/vendor/github.com/moby/buildkit/cache/metadata/metadata.go deleted file mode 100644 index 9da270b4e619..000000000000 --- a/vendor/github.com/moby/buildkit/cache/metadata/metadata.go +++ /dev/null @@ -1,382 +0,0 @@ -package metadata - -import ( - "bytes" - "encoding/json" - "strings" - "sync" - - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - bolt "go.etcd.io/bbolt" -) - -const ( - mainBucket = "_main" - indexBucket = "_index" - externalBucket = "_external" -) - -var errNotFound = errors.Errorf("not found") - -type Store struct { - db *bolt.DB -} - -func NewStore(dbPath string) (*Store, error) { - db, err := bolt.Open(dbPath, 0600, nil) - if err != nil { - return nil, errors.Wrapf(err, "failed to open database file %s", dbPath) - } - return &Store{db: db}, nil -} - -func (s *Store) DB() *bolt.DB { - return s.db -} - -func (s *Store) All() ([]*StorageItem, error) { - var out []*StorageItem - err := s.db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte(mainBucket)) - if b == nil { - return nil - } - return b.ForEach(func(key, _ []byte) error { - b := b.Bucket(key) - if b == nil { - return nil - } - si, err := newStorageItem(string(key), b, s) - if err != nil { - return err - } - out = append(out, si) - return nil - }) - }) - return out, err -} - -func (s *Store) Probe(index string) (bool, error) { - var exists bool - err := s.db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte(indexBucket)) - if b == nil { - return nil - } - main := tx.Bucket([]byte(mainBucket)) - if main == nil { - return nil - } - search := []byte(indexKey(index, "")) - c := b.Cursor() - k, _ := c.Seek(search) - if k != nil && bytes.HasPrefix(k, search) { - exists = true - } - return nil - }) - return exists, err -} - -func (s *Store) Search(index string) ([]*StorageItem, error) { - var out []*StorageItem - err := s.db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte(indexBucket)) - if b == nil { - return nil - } - main := tx.Bucket([]byte(mainBucket)) - if main == nil { - return nil - } - index = indexKey(index, "") - c := b.Cursor() - k, _ := c.Seek([]byte(index)) - for { - if k != nil && strings.HasPrefix(string(k), index) { - itemID := strings.TrimPrefix(string(k), index) - k, _ = c.Next() - b := main.Bucket([]byte(itemID)) - if b == nil { - logrus.Errorf("index pointing to missing record %s", itemID) - continue - } - si, err := newStorageItem(itemID, b, s) - if err != nil { - return err - } - out = append(out, si) - } else { - break - } - } - return nil - }) - return out, err -} - -func (s *Store) View(id string, fn func(b *bolt.Bucket) error) error { - return s.db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte(mainBucket)) - if b == nil { - return errors.WithStack(errNotFound) - } - b = b.Bucket([]byte(id)) - if b == nil { - return errors.WithStack(errNotFound) - } - return fn(b) - }) -} - -func (s *Store) Clear(id string) error { - return s.db.Update(func(tx *bolt.Tx) error { - external := tx.Bucket([]byte(externalBucket)) - if external != nil { - external.DeleteBucket([]byte(id)) - } - main := tx.Bucket([]byte(mainBucket)) - if main == nil { - return nil - } - b := main.Bucket([]byte(id)) - if b == nil { - return nil - } - si, err := newStorageItem(id, b, s) - if err != nil { - return err - } - if indexes := si.Indexes(); len(indexes) > 0 { - b := tx.Bucket([]byte(indexBucket)) - if b != nil { - for _, index := range indexes { - if err := b.Delete([]byte(indexKey(index, id))); err != nil { - return err - } - } - } - } - return main.DeleteBucket([]byte(id)) - }) -} - -func (s *Store) Update(id string, fn func(b *bolt.Bucket) error) error { - return s.db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucketIfNotExists([]byte(mainBucket)) - if err != nil { - return err - } - b, err = b.CreateBucketIfNotExists([]byte(id)) - if err != nil { - return err - } - return fn(b) - }) -} - -func (s *Store) Get(id string) (*StorageItem, bool) { - empty := func() *StorageItem { - si, _ := newStorageItem(id, nil, s) - return si - } - tx, err := s.db.Begin(false) - if err != nil { - return empty(), false - } - defer tx.Rollback() - b := tx.Bucket([]byte(mainBucket)) - if b == nil { - return empty(), false - } - b = b.Bucket([]byte(id)) - if b == nil { - return empty(), false - } - si, _ := newStorageItem(id, b, s) - return si, true -} - -func (s *Store) Close() error { - return s.db.Close() -} - -type StorageItem struct { - id string - values map[string]*Value - queue []func(*bolt.Bucket) error - storage *Store - mu sync.RWMutex -} - -func newStorageItem(id string, b *bolt.Bucket, s *Store) (*StorageItem, error) { - si := &StorageItem{ - id: id, - storage: s, - values: make(map[string]*Value), - } - if b != nil { - if err := b.ForEach(func(k, v []byte) error { - var sv Value - if len(v) > 0 { - if err := json.Unmarshal(v, &sv); err != nil { - return err - } - si.values[string(k)] = &sv - } - return nil - }); err != nil { - return si, err - } - } - return si, nil -} - -func (s *StorageItem) Storage() *Store { // TODO: used in local source. how to remove this? - return s.storage -} - -func (s *StorageItem) ID() string { - return s.id -} - -func (s *StorageItem) View(fn func(b *bolt.Bucket) error) error { - return s.storage.View(s.id, fn) -} - -func (s *StorageItem) Update(fn func(b *bolt.Bucket) error) error { - return s.storage.Update(s.id, fn) -} - -func (s *StorageItem) Keys() []string { - keys := make([]string, 0, len(s.values)) - for k := range s.values { - keys = append(keys, k) - } - return keys -} - -func (s *StorageItem) Get(k string) *Value { - s.mu.RLock() - v := s.values[k] - s.mu.RUnlock() - return v -} - -func (s *StorageItem) GetExternal(k string) ([]byte, error) { - var dt []byte - err := s.storage.db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte(externalBucket)) - if b == nil { - return errors.WithStack(errNotFound) - } - b = b.Bucket([]byte(s.id)) - if b == nil { - return errors.WithStack(errNotFound) - } - dt = b.Get([]byte(k)) - if dt == nil { - return errors.WithStack(errNotFound) - } - return nil - }) - if err != nil { - return nil, err - } - return dt, nil -} - -func (s *StorageItem) SetExternal(k string, dt []byte) error { - return s.storage.db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucketIfNotExists([]byte(externalBucket)) - if err != nil { - return err - } - b, err = b.CreateBucketIfNotExists([]byte(s.id)) - if err != nil { - return err - } - return b.Put([]byte(k), dt) - }) -} - -func (s *StorageItem) Queue(fn func(b *bolt.Bucket) error) { - s.mu.Lock() - defer s.mu.Unlock() - s.queue = append(s.queue, fn) -} - -func (s *StorageItem) Commit() error { - s.mu.Lock() - defer s.mu.Unlock() - return s.Update(func(b *bolt.Bucket) error { - for _, fn := range s.queue { - if err := fn(b); err != nil { - return err - } - } - s.queue = s.queue[:0] - return nil - }) -} - -func (s *StorageItem) Indexes() (out []string) { - for _, v := range s.values { - if v.Index != "" { - out = append(out, v.Index) - } - } - return -} - -func (s *StorageItem) SetValue(b *bolt.Bucket, key string, v *Value) error { - if v == nil { - if err := b.Put([]byte(key), nil); err != nil { - return err - } - delete(s.values, key) - return nil - } - dt, err := json.Marshal(v) - if err != nil { - return err - } - if err := b.Put([]byte(key), dt); err != nil { - return err - } - if v.Index != "" { - b, err := b.Tx().CreateBucketIfNotExists([]byte(indexBucket)) - if err != nil { - return err - } - if err := b.Put([]byte(indexKey(v.Index, s.ID())), []byte{}); err != nil { - return err - } - } - s.values[key] = v - return nil -} - -type Value struct { - Value json.RawMessage `json:"value,omitempty"` - Index string `json:"index,omitempty"` -} - -func NewValue(v interface{}) (*Value, error) { - dt, err := json.Marshal(v) - if err != nil { - return nil, err - } - return &Value{Value: json.RawMessage(dt)}, nil -} - -func (v *Value) Unmarshal(target interface{}) error { - err := json.Unmarshal(v.Value, target) - return err -} - -func indexKey(index, target string) string { - return index + "::" + target -} diff --git a/vendor/github.com/moby/buildkit/cache/metadata/metadata_test.go b/vendor/github.com/moby/buildkit/cache/metadata/metadata_test.go deleted file mode 100644 index 7e3d5b055df6..000000000000 --- a/vendor/github.com/moby/buildkit/cache/metadata/metadata_test.go +++ /dev/null @@ -1,212 +0,0 @@ -package metadata - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/require" - bolt "go.etcd.io/bbolt" -) - -func TestGetSetSearch(t *testing.T) { - t.Parallel() - - tmpdir, err := ioutil.TempDir("", "buildkit-storage") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) - - dbPath := filepath.Join(tmpdir, "storage.db") - - s, err := NewStore(dbPath) - require.NoError(t, err) - defer s.Close() - - si, ok := s.Get("foo") - require.False(t, ok) - - v := si.Get("bar") - require.Nil(t, v) - - v, err = NewValue("foobar") - require.NoError(t, err) - - si.Queue(func(b *bolt.Bucket) error { - return si.SetValue(b, "bar", v) - }) - - err = si.Commit() - require.NoError(t, err) - - v = si.Get("bar") - require.NotNil(t, v) - - var str string - err = v.Unmarshal(&str) - require.NoError(t, err) - require.Equal(t, "foobar", str) - - err = s.Close() - require.NoError(t, err) - - s, err = NewStore(dbPath) - require.NoError(t, err) - defer s.Close() - - si, ok = s.Get("foo") - require.True(t, ok) - - v = si.Get("bar") - require.NotNil(t, v) - - str = "" - err = v.Unmarshal(&str) - require.NoError(t, err) - require.Equal(t, "foobar", str) - - // add second item to test Search - - si, ok = s.Get("foo2") - require.False(t, ok) - - v, err = NewValue("foobar2") - require.NoError(t, err) - - si.Queue(func(b *bolt.Bucket) error { - return si.SetValue(b, "bar2", v) - }) - - err = si.Commit() - require.NoError(t, err) - - sis, err := s.All() - require.NoError(t, err) - require.Equal(t, 2, len(sis)) - - require.Equal(t, "foo", sis[0].ID()) - require.Equal(t, "foo2", sis[1].ID()) - - v = sis[0].Get("bar") - require.NotNil(t, v) - - str = "" - err = v.Unmarshal(&str) - require.NoError(t, err) - require.Equal(t, "foobar", str) - - // clear foo, check that only foo2 exists - err = s.Clear(sis[0].ID()) - require.NoError(t, err) - - sis, err = s.All() - require.NoError(t, err) - require.Equal(t, 1, len(sis)) - - require.Equal(t, "foo2", sis[0].ID()) - - _, ok = s.Get("foo") - require.False(t, ok) -} - -func TestIndexes(t *testing.T) { - t.Parallel() - - tmpdir, err := ioutil.TempDir("", "buildkit-storage") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) - - dbPath := filepath.Join(tmpdir, "storage.db") - - s, err := NewStore(dbPath) - require.NoError(t, err) - defer s.Close() - - var tcases = []struct { - key, valueKey, value, index string - }{ - {"foo1", "bar", "val1", "tag:baz"}, - {"foo2", "bar", "val2", "tag:bax"}, - {"foo3", "bar", "val3", "tag:baz"}, - } - - for _, tcase := range tcases { - si, ok := s.Get(tcase.key) - require.False(t, ok) - - v, err := NewValue(tcase.valueKey) - require.NoError(t, err) - v.Index = tcase.index - - si.Queue(func(b *bolt.Bucket) error { - return si.SetValue(b, tcase.value, v) - }) - - err = si.Commit() - require.NoError(t, err) - } - - sis, err := s.Search("tag:baz") - require.NoError(t, err) - require.Equal(t, 2, len(sis)) - - require.Equal(t, sis[0].ID(), "foo1") - require.Equal(t, sis[1].ID(), "foo3") - - sis, err = s.Search("tag:bax") - require.NoError(t, err) - require.Equal(t, 1, len(sis)) - - require.Equal(t, sis[0].ID(), "foo2") - - err = s.Clear("foo1") - require.NoError(t, err) - - sis, err = s.Search("tag:baz") - require.NoError(t, err) - require.Equal(t, 1, len(sis)) - - require.Equal(t, sis[0].ID(), "foo3") -} - -func TestExternalData(t *testing.T) { - t.Parallel() - - tmpdir, err := ioutil.TempDir("", "buildkit-storage") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) - - dbPath := filepath.Join(tmpdir, "storage.db") - - s, err := NewStore(dbPath) - require.NoError(t, err) - defer s.Close() - - si, ok := s.Get("foo") - require.False(t, ok) - - err = si.SetExternal("ext1", []byte("data")) - require.NoError(t, err) - - dt, err := si.GetExternal("ext1") - require.NoError(t, err) - require.Equal(t, "data", string(dt)) - - si, ok = s.Get("bar") - require.False(t, ok) - - _, err = si.GetExternal("ext1") - require.Error(t, err) - - si, _ = s.Get("foo") - dt, err = si.GetExternal("ext1") - require.NoError(t, err) - require.Equal(t, "data", string(dt)) - - err = s.Clear("foo") - require.NoError(t, err) - - si, _ = s.Get("foo") - _, err = si.GetExternal("ext1") - require.Error(t, err) -} diff --git a/vendor/github.com/moby/buildkit/cache/refs.go b/vendor/github.com/moby/buildkit/cache/refs.go deleted file mode 100644 index 7521e0aa9af4..000000000000 --- a/vendor/github.com/moby/buildkit/cache/refs.go +++ /dev/null @@ -1,433 +0,0 @@ -package cache - -import ( - "context" - "sync" - - "github.com/containerd/containerd/mount" - "github.com/moby/buildkit/cache/metadata" - "github.com/moby/buildkit/identity" - "github.com/moby/buildkit/snapshot" - "github.com/moby/buildkit/util/flightcontrol" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// Ref is a reference to cacheable objects. -type Ref interface { - Mountable - ID() string - Release(context.Context) error - Size(ctx context.Context) (int64, error) - Metadata() *metadata.StorageItem -} - -type ImmutableRef interface { - Ref - Parent() ImmutableRef - Finalize(ctx context.Context, commit bool) error // Make sure reference is flushed to driver - Clone() ImmutableRef -} - -type MutableRef interface { - Ref - Commit(context.Context) (ImmutableRef, error) -} - -type Mountable interface { - Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) -} - -type ref interface { - updateLastUsed() bool -} - -type cacheRecord struct { - cm *cacheManager - mu *sync.Mutex // the mutex is shared by records sharing data - - mutable bool - refs map[ref]struct{} - parent ImmutableRef - md *metadata.StorageItem - - // dead means record is marked as deleted - dead bool - - view string - viewMount snapshot.Mountable - - sizeG flightcontrol.Group - - // these are filled if multiple refs point to same data - equalMutable *mutableRef - equalImmutable *immutableRef -} - -// hold ref lock before calling -func (cr *cacheRecord) ref(triggerLastUsed bool) *immutableRef { - ref := &immutableRef{cacheRecord: cr, triggerLastUsed: triggerLastUsed} - cr.refs[ref] = struct{}{} - return ref -} - -// hold ref lock before calling -func (cr *cacheRecord) mref(triggerLastUsed bool) *mutableRef { - ref := &mutableRef{cacheRecord: cr, triggerLastUsed: triggerLastUsed} - cr.refs[ref] = struct{}{} - return ref -} - -// hold ref lock before calling -func (cr *cacheRecord) isDead() bool { - return cr.dead || (cr.equalImmutable != nil && cr.equalImmutable.dead) || (cr.equalMutable != nil && cr.equalMutable.dead) -} - -func (cr *cacheRecord) Size(ctx context.Context) (int64, error) { - // this expects that usage() is implemented lazily - s, err := cr.sizeG.Do(ctx, cr.ID(), func(ctx context.Context) (interface{}, error) { - cr.mu.Lock() - s := getSize(cr.md) - if s != sizeUnknown { - cr.mu.Unlock() - return s, nil - } - driverID := cr.ID() - if cr.equalMutable != nil { - driverID = cr.equalMutable.ID() - } - cr.mu.Unlock() - usage, err := cr.cm.ManagerOpt.Snapshotter.Usage(ctx, driverID) - if err != nil { - cr.mu.Lock() - isDead := cr.isDead() - cr.mu.Unlock() - if isDead { - return int64(0), nil - } - return s, errors.Wrapf(err, "failed to get usage for %s", cr.ID()) - } - cr.mu.Lock() - setSize(cr.md, usage.Size) - if err := cr.md.Commit(); err != nil { - cr.mu.Unlock() - return s, err - } - cr.mu.Unlock() - return usage.Size, nil - }) - return s.(int64), err -} - -func (cr *cacheRecord) Parent() ImmutableRef { - return cr.parentRef(true) -} - -func (cr *cacheRecord) parentRef(hidden bool) ImmutableRef { - if cr.parent == nil { - return nil - } - p := cr.parent.(*immutableRef) - p.mu.Lock() - defer p.mu.Unlock() - return p.ref(hidden) -} - -func (cr *cacheRecord) Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) { - cr.mu.Lock() - defer cr.mu.Unlock() - - if cr.mutable { - m, err := cr.cm.Snapshotter.Mounts(ctx, cr.ID()) - if err != nil { - return nil, errors.Wrapf(err, "failed to mount %s", cr.ID()) - } - if readonly { - m = setReadonly(m) - } - return m, nil - } - - if cr.equalMutable != nil && readonly { - m, err := cr.cm.Snapshotter.Mounts(ctx, cr.equalMutable.ID()) - if err != nil { - return nil, errors.Wrapf(err, "failed to mount %s", cr.equalMutable.ID()) - } - return setReadonly(m), nil - } - - if err := cr.finalize(ctx, true); err != nil { - return nil, err - } - if cr.viewMount == nil { // TODO: handle this better - cr.view = identity.NewID() - m, err := cr.cm.Snapshotter.View(ctx, cr.view, cr.ID()) - if err != nil { - cr.view = "" - return nil, errors.Wrapf(err, "failed to mount %s", cr.ID()) - } - cr.viewMount = m - } - return cr.viewMount, nil -} - -// call when holding the manager lock -func (cr *cacheRecord) remove(ctx context.Context, removeSnapshot bool) error { - delete(cr.cm.records, cr.ID()) - if cr.parent != nil { - if err := cr.parent.(*immutableRef).release(ctx); err != nil { - return err - } - } - if removeSnapshot { - if err := cr.cm.Snapshotter.Remove(ctx, cr.ID()); err != nil { - return err - } - } - if err := cr.cm.md.Clear(cr.ID()); err != nil { - return err - } - return nil -} - -func (cr *cacheRecord) ID() string { - return cr.md.ID() -} - -type immutableRef struct { - *cacheRecord - triggerLastUsed bool -} - -type mutableRef struct { - *cacheRecord - triggerLastUsed bool -} - -func (sr *immutableRef) Clone() ImmutableRef { - sr.mu.Lock() - ref := sr.ref(false) - sr.mu.Unlock() - return ref -} - -func (sr *immutableRef) Release(ctx context.Context) error { - sr.cm.mu.Lock() - defer sr.cm.mu.Unlock() - - sr.mu.Lock() - defer sr.mu.Unlock() - - return sr.release(ctx) -} - -func (sr *immutableRef) updateLastUsed() bool { - return sr.triggerLastUsed -} - -func (sr *immutableRef) updateLastUsedNow() bool { - if !sr.triggerLastUsed { - return false - } - for r := range sr.refs { - if r.updateLastUsed() { - return false - } - } - return true -} - -func (sr *immutableRef) release(ctx context.Context) error { - delete(sr.refs, sr) - - if sr.updateLastUsedNow() { - updateLastUsed(sr.md) - if sr.equalMutable != nil { - sr.equalMutable.triggerLastUsed = true - } - } - - if len(sr.refs) == 0 { - if sr.viewMount != nil { // TODO: release viewMount earlier if possible - if err := sr.cm.Snapshotter.Remove(ctx, sr.view); err != nil { - return err - } - sr.view = "" - sr.viewMount = nil - } - - if sr.equalMutable != nil { - sr.equalMutable.release(ctx) - } - // go sr.cm.GC() - } - - return nil -} - -func (sr *immutableRef) Finalize(ctx context.Context, b bool) error { - sr.mu.Lock() - defer sr.mu.Unlock() - - return sr.finalize(ctx, b) -} - -func (cr *cacheRecord) Metadata() *metadata.StorageItem { - return cr.md -} - -func (cr *cacheRecord) finalize(ctx context.Context, commit bool) error { - mutable := cr.equalMutable - if mutable == nil { - return nil - } - if !commit { - if HasCachePolicyRetain(mutable) { - CachePolicyRetain(mutable) - return mutable.Metadata().Commit() - } - return nil - } - err := cr.cm.Snapshotter.Commit(ctx, cr.ID(), mutable.ID()) - if err != nil { - return errors.Wrapf(err, "failed to commit %s", mutable.ID()) - } - mutable.dead = true - go func() { - cr.cm.mu.Lock() - defer cr.cm.mu.Unlock() - if err := mutable.remove(context.TODO(), false); err != nil { - logrus.Error(err) - } - }() - cr.equalMutable = nil - clearEqualMutable(cr.md) - return cr.md.Commit() -} - -func (sr *mutableRef) updateLastUsed() bool { - return sr.triggerLastUsed -} - -func (sr *mutableRef) commit(ctx context.Context) (ImmutableRef, error) { - if !sr.mutable || len(sr.refs) == 0 { - return nil, errors.Wrapf(errInvalid, "invalid mutable ref") - } - - id := identity.NewID() - md, _ := sr.cm.md.Get(id) - rec := &cacheRecord{ - mu: sr.mu, - cm: sr.cm, - parent: sr.parentRef(false), - equalMutable: sr, - refs: make(map[ref]struct{}), - md: md, - } - - if descr := GetDescription(sr.md); descr != "" { - if err := queueDescription(md, descr); err != nil { - return nil, err - } - } - - if err := initializeMetadata(rec); err != nil { - return nil, err - } - - sr.cm.records[id] = rec - - if err := sr.md.Commit(); err != nil { - return nil, err - } - - setSize(md, sizeUnknown) - setEqualMutable(md, sr.ID()) - if err := md.Commit(); err != nil { - return nil, err - } - - ref := rec.ref(true) - sr.equalImmutable = ref - return ref, nil -} - -func (sr *mutableRef) updatesLastUsed() bool { - return sr.triggerLastUsed -} - -func (sr *mutableRef) Commit(ctx context.Context) (ImmutableRef, error) { - sr.cm.mu.Lock() - defer sr.cm.mu.Unlock() - - sr.mu.Lock() - defer sr.mu.Unlock() - - return sr.commit(ctx) -} - -func (sr *mutableRef) Release(ctx context.Context) error { - sr.cm.mu.Lock() - defer sr.cm.mu.Unlock() - - sr.mu.Lock() - defer sr.mu.Unlock() - - return sr.release(ctx) -} - -func (sr *mutableRef) release(ctx context.Context) error { - delete(sr.refs, sr) - if getCachePolicy(sr.md) != cachePolicyRetain { - if sr.equalImmutable != nil { - if getCachePolicy(sr.equalImmutable.md) == cachePolicyRetain { - if sr.updateLastUsed() { - updateLastUsed(sr.md) - sr.triggerLastUsed = false - } - return nil - } - if err := sr.equalImmutable.remove(ctx, false); err != nil { - return err - } - } - if sr.parent != nil { - if err := sr.parent.(*immutableRef).release(ctx); err != nil { - return err - } - } - return sr.remove(ctx, true) - } else { - if sr.updateLastUsed() { - updateLastUsed(sr.md) - sr.triggerLastUsed = false - } - } - return nil -} - -func setReadonly(mounts snapshot.Mountable) snapshot.Mountable { - return &readOnlyMounter{mounts} -} - -type readOnlyMounter struct { - snapshot.Mountable -} - -func (m *readOnlyMounter) Mount() ([]mount.Mount, error) { - mounts, err := m.Mountable.Mount() - if err != nil { - return nil, err - } - for i, m := range mounts { - opts := make([]string, 0, len(m.Options)) - for _, opt := range m.Options { - if opt != "rw" { - opts = append(opts, opt) - } - } - opts = append(opts, "ro") - mounts[i].Options = opts - } - return mounts, nil -} diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/export.go b/vendor/github.com/moby/buildkit/cache/remotecache/export.go deleted file mode 100644 index e40b8de20617..000000000000 --- a/vendor/github.com/moby/buildkit/cache/remotecache/export.go +++ /dev/null @@ -1,128 +0,0 @@ -package remotecache - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "time" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/images" - v1 "github.com/moby/buildkit/cache/remotecache/v1" - "github.com/moby/buildkit/solver" - "github.com/moby/buildkit/util/contentutil" - "github.com/moby/buildkit/util/progress" - digest "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -type ResolveCacheExporterFunc func(ctx context.Context, typ, target string) (Exporter, error) - -func oneOffProgress(ctx context.Context, id string) func(err error) error { - pw, _, _ := progress.FromContext(ctx) - now := time.Now() - st := progress.Status{ - Started: &now, - } - pw.Write(id, st) - return func(err error) error { - now := time.Now() - st.Completed = &now - pw.Write(id, st) - pw.Close() - return err - } -} - -type Exporter interface { - solver.CacheExporterTarget - Finalize(ctx context.Context) error -} - -type contentCacheExporter struct { - solver.CacheExporterTarget - chains *v1.CacheChains - ingester content.Ingester -} - -func NewExporter(ingester content.Ingester) Exporter { - cc := v1.NewCacheChains() - return &contentCacheExporter{CacheExporterTarget: cc, chains: cc, ingester: ingester} -} - -func (ce *contentCacheExporter) Finalize(ctx context.Context) error { - return export(ctx, ce.ingester, ce.chains) -} - -func export(ctx context.Context, ingester content.Ingester, cc *v1.CacheChains) error { - config, descs, err := cc.Marshal() - if err != nil { - return err - } - - // own type because oci type can't be pushed and docker type doesn't have annotations - type manifestList struct { - specs.Versioned - - MediaType string `json:"mediaType,omitempty"` - - // Manifests references platform specific manifests. - Manifests []ocispec.Descriptor `json:"manifests"` - } - - var mfst manifestList - mfst.SchemaVersion = 2 - mfst.MediaType = images.MediaTypeDockerSchema2ManifestList - - for _, l := range config.Layers { - dgstPair, ok := descs[l.Blob] - if !ok { - return errors.Errorf("missing blob %s", l.Blob) - } - layerDone := oneOffProgress(ctx, fmt.Sprintf("writing layer %s", l.Blob)) - if err := contentutil.Copy(ctx, ingester, dgstPair.Provider, dgstPair.Descriptor); err != nil { - return layerDone(errors.Wrap(err, "error writing layer blob")) - } - layerDone(nil) - mfst.Manifests = append(mfst.Manifests, dgstPair.Descriptor) - } - - dt, err := json.Marshal(config) - if err != nil { - return err - } - dgst := digest.FromBytes(dt) - desc := ocispec.Descriptor{ - Digest: dgst, - Size: int64(len(dt)), - MediaType: v1.CacheConfigMediaTypeV0, - } - configDone := oneOffProgress(ctx, fmt.Sprintf("writing config %s", dgst)) - if err := content.WriteBlob(ctx, ingester, dgst.String(), bytes.NewReader(dt), desc); err != nil { - return configDone(errors.Wrap(err, "error writing config blob")) - } - configDone(nil) - - mfst.Manifests = append(mfst.Manifests, desc) - - dt, err = json.Marshal(mfst) - if err != nil { - return errors.Wrap(err, "failed to marshal manifest") - } - dgst = digest.FromBytes(dt) - - desc = ocispec.Descriptor{ - Digest: dgst, - Size: int64(len(dt)), - MediaType: mfst.MediaType, - } - mfstDone := oneOffProgress(ctx, fmt.Sprintf("writing manifest %s", dgst)) - if err := content.WriteBlob(ctx, ingester, dgst.String(), bytes.NewReader(dt), desc); err != nil { - return mfstDone(errors.Wrap(err, "error writing manifest blob")) - } - mfstDone(nil) - return nil -} diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/import.go b/vendor/github.com/moby/buildkit/cache/remotecache/import.go deleted file mode 100644 index a76762e6fff5..000000000000 --- a/vendor/github.com/moby/buildkit/cache/remotecache/import.go +++ /dev/null @@ -1,98 +0,0 @@ -package remotecache - -import ( - "context" - "encoding/json" - "io" - - "github.com/containerd/containerd/content" - v1 "github.com/moby/buildkit/cache/remotecache/v1" - "github.com/moby/buildkit/solver" - "github.com/moby/buildkit/worker" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -// ResolveCacheImporterFunc returns importer and descriptor. -// Currently typ needs to be an empty string. -type ResolveCacheImporterFunc func(ctx context.Context, typ, ref string) (Importer, ocispec.Descriptor, error) - -type Importer interface { - Resolve(ctx context.Context, desc ocispec.Descriptor, id string, w worker.Worker) (solver.CacheManager, error) -} - -func NewImporter(provider content.Provider) Importer { - return &contentCacheImporter{provider: provider} -} - -type contentCacheImporter struct { - provider content.Provider -} - -func (ci *contentCacheImporter) Resolve(ctx context.Context, desc ocispec.Descriptor, id string, w worker.Worker) (solver.CacheManager, error) { - dt, err := readBlob(ctx, ci.provider, desc) - if err != nil { - return nil, err - } - - var mfst ocispec.Index - if err := json.Unmarshal(dt, &mfst); err != nil { - return nil, err - } - - allLayers := v1.DescriptorProvider{} - - var configDesc ocispec.Descriptor - - for _, m := range mfst.Manifests { - if m.MediaType == v1.CacheConfigMediaTypeV0 { - configDesc = m - continue - } - allLayers[m.Digest] = v1.DescriptorProviderPair{ - Descriptor: m, - Provider: ci.provider, - } - } - - if configDesc.Digest == "" { - return nil, errors.Errorf("invalid build cache from %+v", desc) - } - - dt, err = readBlob(ctx, ci.provider, configDesc) - if err != nil { - return nil, err - } - - cc := v1.NewCacheChains() - if err := v1.Parse(dt, allLayers, cc); err != nil { - return nil, err - } - - keysStorage, resultStorage, err := v1.NewCacheKeyStorage(cc, w) - if err != nil { - return nil, err - } - return solver.NewCacheManager(id, keysStorage, resultStorage), nil -} - -func readBlob(ctx context.Context, provider content.Provider, desc ocispec.Descriptor) ([]byte, error) { - maxBlobSize := int64(1 << 20) - if desc.Size > maxBlobSize { - return nil, errors.Errorf("blob %s is too large (%d > %d)", desc.Digest, desc.Size, maxBlobSize) - } - dt, err := content.ReadBlob(ctx, provider, desc) - if err != nil { - // NOTE: even if err == EOF, we might have got expected dt here. - // For instance, http.Response.Body is known to return non-zero bytes with EOF. - if err == io.EOF { - if dtDigest := desc.Digest.Algorithm().FromBytes(dt); dtDigest != desc.Digest { - err = errors.Wrapf(err, "got EOF, expected %s (%d bytes), got %s (%d bytes)", - desc.Digest, desc.Size, dtDigest, len(dt)) - } else { - err = nil - } - } - } - return dt, err -} diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/registry/registry.go b/vendor/github.com/moby/buildkit/cache/remotecache/registry/registry.go deleted file mode 100644 index fa23aa552203..000000000000 --- a/vendor/github.com/moby/buildkit/cache/remotecache/registry/registry.go +++ /dev/null @@ -1,72 +0,0 @@ -package registry - -import ( - "context" - "time" - - "github.com/containerd/containerd/remotes" - "github.com/containerd/containerd/remotes/docker" - "github.com/moby/buildkit/cache/remotecache" - "github.com/moby/buildkit/session" - "github.com/moby/buildkit/session/auth" - "github.com/moby/buildkit/util/contentutil" - "github.com/moby/buildkit/util/resolver" - specs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -func ResolveCacheExporterFunc(sm *session.Manager, resolverOpt resolver.ResolveOptionsFunc) remotecache.ResolveCacheExporterFunc { - return func(ctx context.Context, typ, ref string) (remotecache.Exporter, error) { - if typ != "" { - return nil, errors.Errorf("unsupported cache exporter type: %s", typ) - } - remote := newRemoteResolver(ctx, resolverOpt, sm, ref) - pusher, err := remote.Pusher(ctx, ref) - if err != nil { - return nil, err - } - return remotecache.NewExporter(contentutil.FromPusher(pusher)), nil - } -} - -func ResolveCacheImporterFunc(sm *session.Manager, resolverOpt resolver.ResolveOptionsFunc) remotecache.ResolveCacheImporterFunc { - return func(ctx context.Context, typ, ref string) (remotecache.Importer, specs.Descriptor, error) { - if typ != "" { - return nil, specs.Descriptor{}, errors.Errorf("unsupported cache importer type: %s", typ) - } - remote := newRemoteResolver(ctx, resolverOpt, sm, ref) - xref, desc, err := remote.Resolve(ctx, ref) - if err != nil { - return nil, specs.Descriptor{}, err - } - fetcher, err := remote.Fetcher(ctx, xref) - if err != nil { - return nil, specs.Descriptor{}, err - } - return remotecache.NewImporter(contentutil.FromFetcher(fetcher)), desc, nil - } -} - -func newRemoteResolver(ctx context.Context, resolverOpt resolver.ResolveOptionsFunc, sm *session.Manager, ref string) remotes.Resolver { - opt := resolverOpt(ref) - opt.Credentials = getCredentialsFunc(ctx, sm) - return docker.NewResolver(opt) -} - -func getCredentialsFunc(ctx context.Context, sm *session.Manager) func(string) (string, string, error) { - id := session.FromContext(ctx) - if id == "" { - return nil - } - return func(host string) (string, string, error) { - timeoutCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - caller, err := sm.Get(timeoutCtx, id) - if err != nil { - return "", "", err - } - - return auth.CredentialsFunc(context.TODO(), caller)(host) - } -} diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/v1/cachestorage.go b/vendor/github.com/moby/buildkit/cache/remotecache/v1/cachestorage.go deleted file mode 100644 index 27b19587c820..000000000000 --- a/vendor/github.com/moby/buildkit/cache/remotecache/v1/cachestorage.go +++ /dev/null @@ -1,247 +0,0 @@ -package cacheimport - -import ( - "context" - - "github.com/moby/buildkit/identity" - "github.com/moby/buildkit/solver" - "github.com/moby/buildkit/worker" - digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -func NewCacheKeyStorage(cc *CacheChains, w worker.Worker) (solver.CacheKeyStorage, solver.CacheResultStorage, error) { - storage := &cacheKeyStorage{ - byID: map[string]*itemWithOutgoingLinks{}, - byItem: map[*item]string{}, - byResult: map[string]map[string]struct{}{}, - } - - for _, it := range cc.items { - if _, err := addItemToStorage(storage, it); err != nil { - return nil, nil, err - } - } - - results := &cacheResultStorage{ - w: w, - byID: storage.byID, - byResult: storage.byResult, - } - - return storage, results, nil -} - -func addItemToStorage(k *cacheKeyStorage, it *item) (*itemWithOutgoingLinks, error) { - if id, ok := k.byItem[it]; ok { - if id == "" { - return nil, errors.Errorf("invalid loop") - } - return k.byID[id], nil - } - - var id string - if len(it.links) == 0 { - id = it.dgst.String() - } else { - id = identity.NewID() - } - - k.byItem[it] = "" - - for i, m := range it.links { - for l := range m { - src, err := addItemToStorage(k, l.src) - if err != nil { - return nil, err - } - cl := nlink{ - input: i, - dgst: it.dgst, - selector: l.selector, - } - src.links[cl] = append(src.links[cl], id) - } - } - - k.byItem[it] = id - - itl := &itemWithOutgoingLinks{ - item: it, - links: map[nlink][]string{}, - } - - k.byID[id] = itl - - if res := it.result; res != nil { - resultID := remoteID(res) - ids, ok := k.byResult[resultID] - if !ok { - ids = map[string]struct{}{} - k.byResult[resultID] = ids - } - ids[id] = struct{}{} - } - return itl, nil -} - -type cacheKeyStorage struct { - byID map[string]*itemWithOutgoingLinks - byItem map[*item]string - byResult map[string]map[string]struct{} -} - -type itemWithOutgoingLinks struct { - *item - links map[nlink][]string -} - -func (cs *cacheKeyStorage) Exists(id string) bool { - _, ok := cs.byID[id] - return ok -} - -func (cs *cacheKeyStorage) Walk(func(id string) error) error { - return nil -} - -func (cs *cacheKeyStorage) WalkResults(id string, fn func(solver.CacheResult) error) error { - it, ok := cs.byID[id] - if !ok { - return nil - } - if res := it.result; res != nil { - return fn(solver.CacheResult{ID: remoteID(res), CreatedAt: it.resultTime}) - } - return nil -} - -func (cs *cacheKeyStorage) Load(id string, resultID string) (solver.CacheResult, error) { - it, ok := cs.byID[id] - if !ok { - return solver.CacheResult{}, nil - } - if res := it.result; res != nil { - return solver.CacheResult{ID: remoteID(res), CreatedAt: it.resultTime}, nil - } - return solver.CacheResult{}, nil -} - -func (cs *cacheKeyStorage) AddResult(id string, res solver.CacheResult) error { - return nil -} - -func (cs *cacheKeyStorage) Release(resultID string) error { - return nil -} -func (cs *cacheKeyStorage) AddLink(id string, link solver.CacheInfoLink, target string) error { - return nil -} -func (cs *cacheKeyStorage) WalkLinks(id string, link solver.CacheInfoLink, fn func(id string) error) error { - it, ok := cs.byID[id] - if !ok { - return nil - } - for _, id := range it.links[nlink{ - dgst: outputKey(link.Digest, int(link.Output)), - input: int(link.Input), - selector: link.Selector.String(), - }] { - if err := fn(id); err != nil { - return err - } - } - return nil -} - -// TODO: -func (cs *cacheKeyStorage) WalkBacklinks(id string, fn func(id string, link solver.CacheInfoLink) error) error { - return nil -} - -func (cs *cacheKeyStorage) WalkIDsByResult(id string, fn func(id string) error) error { - ids := cs.byResult[id] - for id := range ids { - if err := fn(id); err != nil { - return err - } - } - return nil -} - -func (cs *cacheKeyStorage) HasLink(id string, link solver.CacheInfoLink, target string) bool { - l := nlink{ - dgst: outputKey(link.Digest, int(link.Output)), - input: int(link.Input), - selector: link.Selector.String(), - } - if it, ok := cs.byID[id]; ok { - for _, id := range it.links[l] { - if id == target { - return true - } - } - } - return false -} - -type cacheResultStorage struct { - w worker.Worker - byID map[string]*itemWithOutgoingLinks - byResult map[string]map[string]struct{} -} - -func (cs *cacheResultStorage) Save(res solver.Result) (solver.CacheResult, error) { - return solver.CacheResult{}, errors.Errorf("importer is immutable") -} - -func (cs *cacheResultStorage) Load(ctx context.Context, res solver.CacheResult) (solver.Result, error) { - remote, err := cs.LoadRemote(ctx, res) - if err != nil { - return nil, err - } - - ref, err := cs.w.FromRemote(ctx, remote) - if err != nil { - return nil, err - } - return worker.NewWorkerRefResult(ref, cs.w), nil -} - -func (cs *cacheResultStorage) LoadRemote(ctx context.Context, res solver.CacheResult) (*solver.Remote, error) { - if r := cs.byResultID(res.ID); r != nil { - return r, nil - } - return nil, errors.WithStack(solver.ErrNotFound) -} - -func (cs *cacheResultStorage) Exists(id string) bool { - return cs.byResultID(id) != nil -} - -func (cs *cacheResultStorage) byResultID(resultID string) *solver.Remote { - m, ok := cs.byResult[resultID] - if !ok || len(m) == 0 { - return nil - } - - for id := range m { - it, ok := cs.byID[id] - if ok { - if r := it.result; r != nil { - return r - } - } - } - - return nil -} - -// unique ID per remote. this ID is not stable. -func remoteID(r *solver.Remote) string { - dgstr := digest.Canonical.Digester() - for _, desc := range r.Descriptors { - dgstr.Hash().Write([]byte(desc.Digest)) - } - return dgstr.Digest().String() -} diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/v1/chains.go b/vendor/github.com/moby/buildkit/cache/remotecache/v1/chains.go deleted file mode 100644 index 52806b9c4498..000000000000 --- a/vendor/github.com/moby/buildkit/cache/remotecache/v1/chains.go +++ /dev/null @@ -1,127 +0,0 @@ -package cacheimport - -import ( - "time" - - "github.com/containerd/containerd/content" - "github.com/moby/buildkit/solver" - digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -func NewCacheChains() *CacheChains { - return &CacheChains{visited: map[interface{}]struct{}{}} -} - -type CacheChains struct { - items []*item - visited map[interface{}]struct{} -} - -func (c *CacheChains) Add(dgst digest.Digest) solver.CacheExporterRecord { - it := &item{c: c, dgst: dgst} - c.items = append(c.items, it) - return it -} - -func (c *CacheChains) Visit(v interface{}) { - c.visited[v] = struct{}{} -} - -func (c *CacheChains) Visited(v interface{}) bool { - _, ok := c.visited[v] - return ok -} - -func (c *CacheChains) normalize() error { - st := &normalizeState{ - added: map[*item]*item{}, - links: map[*item]map[nlink]map[digest.Digest]struct{}{}, - byKey: map[digest.Digest]*item{}, - } - - for _, it := range c.items { - _, err := normalizeItem(it, st) - if err != nil { - return err - } - } - - items := make([]*item, 0, len(st.byKey)) - for _, it := range st.byKey { - items = append(items, it) - } - c.items = items - return nil -} - -func (c *CacheChains) Marshal() (*CacheConfig, DescriptorProvider, error) { - if err := c.normalize(); err != nil { - return nil, nil, err - } - - st := &marshalState{ - chainsByID: map[string]int{}, - descriptors: DescriptorProvider{}, - recordsByItem: map[*item]int{}, - } - - for _, it := range c.items { - if err := marshalItem(it, st); err != nil { - return nil, nil, err - } - } - - cc := CacheConfig{ - Layers: st.layers, - Records: st.records, - } - sortConfig(&cc) - - return &cc, st.descriptors, nil -} - -type DescriptorProvider map[digest.Digest]DescriptorProviderPair - -type DescriptorProviderPair struct { - Descriptor ocispec.Descriptor - Provider content.Provider -} - -type item struct { - c *CacheChains - dgst digest.Digest - - result *solver.Remote - resultTime time.Time - - links []map[link]struct{} -} - -type link struct { - src *item - selector string -} - -func (c *item) AddResult(createdAt time.Time, result *solver.Remote) { - c.resultTime = createdAt - c.result = result -} - -func (c *item) LinkFrom(rec solver.CacheExporterRecord, index int, selector string) { - src, ok := rec.(*item) - if !ok { - return - } - - for { - if index < len(c.links) { - break - } - c.links = append(c.links, map[link]struct{}{}) - } - - c.links[index][link{src: src, selector: selector}] = struct{}{} -} - -var _ solver.CacheExporterTarget = &CacheChains{} diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/v1/chains_test.go b/vendor/github.com/moby/buildkit/cache/remotecache/v1/chains_test.go deleted file mode 100644 index b811f968a9c6..000000000000 --- a/vendor/github.com/moby/buildkit/cache/remotecache/v1/chains_test.go +++ /dev/null @@ -1,96 +0,0 @@ -package cacheimport - -import ( - "encoding/json" - "testing" - "time" - - "github.com/moby/buildkit/solver" - digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/stretchr/testify/require" -) - -func TestSimpleMarshal(t *testing.T) { - cc := NewCacheChains() - - addRecords := func() { - foo := cc.Add(outputKey(dgst("foo"), 0)) - bar := cc.Add(outputKey(dgst("bar"), 1)) - baz := cc.Add(outputKey(dgst("baz"), 0)) - - baz.LinkFrom(foo, 0, "") - baz.LinkFrom(bar, 1, "sel0") - r0 := &solver.Remote{ - Descriptors: []ocispec.Descriptor{{ - Digest: dgst("d0"), - }, { - Digest: dgst("d1"), - }}, - } - baz.AddResult(time.Now(), r0) - } - - addRecords() - - cfg, _, err := cc.Marshal() - require.NoError(t, err) - - require.Equal(t, len(cfg.Layers), 2) - require.Equal(t, len(cfg.Records), 3) - - require.Equal(t, cfg.Layers[0].Blob, dgst("d0")) - require.Equal(t, cfg.Layers[0].ParentIndex, -1) - require.Equal(t, cfg.Layers[1].Blob, dgst("d1")) - require.Equal(t, cfg.Layers[1].ParentIndex, 0) - - require.Equal(t, cfg.Records[0].Digest, outputKey(dgst("baz"), 0)) - require.Equal(t, len(cfg.Records[0].Inputs), 2) - require.Equal(t, len(cfg.Records[0].Results), 1) - - require.Equal(t, cfg.Records[1].Digest, outputKey(dgst("foo"), 0)) - require.Equal(t, len(cfg.Records[1].Inputs), 0) - require.Equal(t, len(cfg.Records[1].Results), 0) - - require.Equal(t, cfg.Records[2].Digest, outputKey(dgst("bar"), 1)) - require.Equal(t, len(cfg.Records[2].Inputs), 0) - require.Equal(t, len(cfg.Records[2].Results), 0) - - require.Equal(t, cfg.Records[0].Results[0].LayerIndex, 1) - require.Equal(t, cfg.Records[0].Inputs[0][0].Selector, "") - require.Equal(t, cfg.Records[0].Inputs[0][0].LinkIndex, 1) - require.Equal(t, cfg.Records[0].Inputs[1][0].Selector, "sel0") - require.Equal(t, cfg.Records[0].Inputs[1][0].LinkIndex, 2) - - // adding same info again doesn't produce anything extra - addRecords() - - cfg2, descPairs, err := cc.Marshal() - require.NoError(t, err) - - require.EqualValues(t, cfg, cfg2) - - // marshal roundtrip - dt, err := json.Marshal(cfg) - require.NoError(t, err) - - newChains := NewCacheChains() - err = Parse(dt, descPairs, newChains) - require.NoError(t, err) - - cfg3, _, err := cc.Marshal() - require.NoError(t, err) - require.EqualValues(t, cfg, cfg3) - - // add extra item - cc.Add(outputKey(dgst("bay"), 0)) - cfg, _, err = cc.Marshal() - require.NoError(t, err) - - require.Equal(t, len(cfg.Layers), 2) - require.Equal(t, len(cfg.Records), 4) -} - -func dgst(s string) digest.Digest { - return digest.FromBytes([]byte(s)) -} diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/v1/doc.go b/vendor/github.com/moby/buildkit/cache/remotecache/v1/doc.go deleted file mode 100644 index 4cff811490bf..000000000000 --- a/vendor/github.com/moby/buildkit/cache/remotecache/v1/doc.go +++ /dev/null @@ -1,50 +0,0 @@ -package cacheimport - -// Distibutable build cache -// -// Main manifest is OCI image index -// https://github.com/opencontainers/image-spec/blob/master/image-index.md . -// Manifests array contains descriptors to the cache layers and one instance of -// build cache config with media type application/vnd.buildkit.cacheconfig.v0 . -// The cache layer descripts need to have an annotation with uncompressed digest -// to allow deduplication on extraction and optionally "buildkit/createdat" -// annotation to support maintaining original timestamps. -// -// Cache config file layout: -// -//{ -// "layers": [ -// { -// "blob": "sha256:deadbeef", <- digest of layer blob in index -// "parent": -1 <- index of parent layer, -1 if no parent -// }, -// { -// "blob": "sha256:deadbeef", -// "parent": 0 -// } -// ], -// -// "records": [ -// { -// "digest": "sha256:deadbeef", <- base digest for the record -// }, -// { -// "digest": "sha256:deadbeef", -// "output": 1, <- optional output index -// "layers": [ <- optional array or layer chains -// { -// "createdat": "", -// "layer": 1, <- index to the layer -// } -// ], -// "inputs": [ <- dependant records -// [ <- index of the dependency (0) -// { -// "selector": "sel", <- optional selector -// "link": 0, <- index to the dependant record -// } -// ] -// ] -// } -// ] -// } diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/v1/parse.go b/vendor/github.com/moby/buildkit/cache/remotecache/v1/parse.go deleted file mode 100644 index 8aa6929ea05b..000000000000 --- a/vendor/github.com/moby/buildkit/cache/remotecache/v1/parse.go +++ /dev/null @@ -1,102 +0,0 @@ -package cacheimport - -import ( - "encoding/json" - - "github.com/moby/buildkit/solver" - "github.com/moby/buildkit/util/contentutil" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -func Parse(configJSON []byte, provider DescriptorProvider, t solver.CacheExporterTarget) error { - var config CacheConfig - if err := json.Unmarshal(configJSON, &config); err != nil { - return err - } - - cache := map[int]solver.CacheExporterRecord{} - - for i := range config.Records { - if _, err := parseRecord(config, i, provider, t, cache); err != nil { - return err - } - } - - return nil -} - -func parseRecord(cc CacheConfig, idx int, provider DescriptorProvider, t solver.CacheExporterTarget, cache map[int]solver.CacheExporterRecord) (solver.CacheExporterRecord, error) { - if r, ok := cache[idx]; ok { - if r == nil { - return nil, errors.Errorf("invalid looping record") - } - return r, nil - } - - if idx < 0 || idx >= len(cc.Records) { - return nil, errors.Errorf("invalid record ID: %d", idx) - } - rec := cc.Records[idx] - - r := t.Add(rec.Digest) - cache[idx] = nil - for i, inputs := range rec.Inputs { - for _, inp := range inputs { - src, err := parseRecord(cc, inp.LinkIndex, provider, t, cache) - if err != nil { - return nil, err - } - r.LinkFrom(src, i, inp.Selector) - } - } - - for _, res := range rec.Results { - visited := map[int]struct{}{} - remote, err := getRemoteChain(cc.Layers, res.LayerIndex, provider, visited) - if err != nil { - return nil, err - } - r.AddResult(res.CreatedAt, remote) - } - - cache[idx] = r - return r, nil -} - -func getRemoteChain(layers []CacheLayer, idx int, provider DescriptorProvider, visited map[int]struct{}) (*solver.Remote, error) { - if _, ok := visited[idx]; ok { - return nil, errors.Errorf("invalid looping layer") - } - visited[idx] = struct{}{} - - if idx < 0 || idx >= len(layers) { - return nil, errors.Errorf("invalid layer index %d", idx) - } - - l := layers[idx] - - descPair, ok := provider[l.Blob] - if !ok { - return nil, errors.Errorf("missing blob for %s", l.Blob) - } - - var r *solver.Remote - if l.ParentIndex != -1 { - var err error - r, err = getRemoteChain(layers, l.ParentIndex, provider, visited) - if err != nil { - return nil, err - } - r.Descriptors = append(r.Descriptors, descPair.Descriptor) - mp := contentutil.NewMultiProvider(r.Provider) - mp.Add(descPair.Descriptor.Digest, descPair.Provider) - r.Provider = mp - return r, nil - } - return &solver.Remote{ - Descriptors: []ocispec.Descriptor{descPair.Descriptor}, - Provider: descPair.Provider, - }, nil - -} diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/v1/spec.go b/vendor/github.com/moby/buildkit/cache/remotecache/v1/spec.go deleted file mode 100644 index 4c6bc0bb26e5..000000000000 --- a/vendor/github.com/moby/buildkit/cache/remotecache/v1/spec.go +++ /dev/null @@ -1,35 +0,0 @@ -package cacheimport - -import ( - "time" - - digest "github.com/opencontainers/go-digest" -) - -const CacheConfigMediaTypeV0 = "application/vnd.buildkit.cacheconfig.v0" - -type CacheConfig struct { - Layers []CacheLayer `json:"layers,omitempty"` - Records []CacheRecord `json:"records,omitempty"` -} - -type CacheLayer struct { - Blob digest.Digest `json:"blob,omitempty"` - ParentIndex int `json:"parent,omitempty"` -} - -type CacheRecord struct { - Results []CacheResult `json:"layers,omitempty"` - Digest digest.Digest `json:"digest,omitempty"` - Inputs [][]CacheInput `json:"inputs,omitempty"` -} - -type CacheResult struct { - LayerIndex int `json:"layer"` - CreatedAt time.Time `json:"createdAt,omitempty"` -} - -type CacheInput struct { - Selector string `json:"selector,omitempty"` - LinkIndex int `json:"link"` -} diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/v1/utils.go b/vendor/github.com/moby/buildkit/cache/remotecache/v1/utils.go deleted file mode 100644 index 665eb330ca9d..000000000000 --- a/vendor/github.com/moby/buildkit/cache/remotecache/v1/utils.go +++ /dev/null @@ -1,306 +0,0 @@ -package cacheimport - -import ( - "fmt" - "sort" - - "github.com/containerd/containerd/content" - "github.com/moby/buildkit/solver" - digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -// sortConfig sorts the config structure to make sure it is deterministic -func sortConfig(cc *CacheConfig) { - type indexedLayer struct { - oldIndex int - newIndex int - l CacheLayer - } - - unsortedLayers := make([]*indexedLayer, len(cc.Layers)) - sortedLayers := make([]*indexedLayer, len(cc.Layers)) - - for i, l := range cc.Layers { - il := &indexedLayer{oldIndex: i, l: l} - unsortedLayers[i] = il - sortedLayers[i] = il - } - sort.Slice(sortedLayers, func(i, j int) bool { - li := sortedLayers[i].l - lj := sortedLayers[j].l - if li.Blob == lj.Blob { - return li.ParentIndex < lj.ParentIndex - } - return li.Blob < lj.Blob - }) - for i, l := range sortedLayers { - l.newIndex = i - } - - layers := make([]CacheLayer, len(sortedLayers)) - for i, l := range sortedLayers { - if pID := l.l.ParentIndex; pID != -1 { - l.l.ParentIndex = unsortedLayers[pID].newIndex - } - layers[i] = l.l - } - - type indexedRecord struct { - oldIndex int - newIndex int - r CacheRecord - } - - unsortedRecords := make([]*indexedRecord, len(cc.Records)) - sortedRecords := make([]*indexedRecord, len(cc.Records)) - - for i, r := range cc.Records { - ir := &indexedRecord{oldIndex: i, r: r} - unsortedRecords[i] = ir - sortedRecords[i] = ir - } - sort.Slice(sortedRecords, func(i, j int) bool { - ri := sortedRecords[i].r - rj := sortedRecords[j].r - if ri.Digest != rj.Digest { - return ri.Digest < rj.Digest - } - if len(ri.Inputs) != len(ri.Inputs) { - return len(ri.Inputs) < len(ri.Inputs) - } - for i, inputs := range ri.Inputs { - if len(ri.Inputs[i]) != len(rj.Inputs[i]) { - return len(ri.Inputs[i]) < len(rj.Inputs[i]) - } - for j := range inputs { - if ri.Inputs[i][j].Selector != rj.Inputs[i][j].Selector { - return ri.Inputs[i][j].Selector != rj.Inputs[i][j].Selector - } - return cc.Records[ri.Inputs[i][j].LinkIndex].Digest < cc.Records[rj.Inputs[i][j].LinkIndex].Digest - } - } - return ri.Digest < rj.Digest - }) - for i, l := range sortedRecords { - l.newIndex = i - } - - records := make([]CacheRecord, len(sortedRecords)) - for i, r := range sortedRecords { - for j := range r.r.Results { - r.r.Results[j].LayerIndex = unsortedLayers[r.r.Results[j].LayerIndex].newIndex - } - for j, inputs := range r.r.Inputs { - for k := range inputs { - r.r.Inputs[j][k].LinkIndex = unsortedRecords[r.r.Inputs[j][k].LinkIndex].newIndex - } - sort.Slice(inputs, func(i, j int) bool { - return inputs[i].LinkIndex < inputs[j].LinkIndex - }) - } - records[i] = r.r - } - - cc.Layers = layers - cc.Records = records -} - -func outputKey(dgst digest.Digest, idx int) digest.Digest { - return digest.FromBytes([]byte(fmt.Sprintf("%s@%d", dgst, idx))) -} - -type nlink struct { - dgst digest.Digest - input int - selector string -} -type normalizeState struct { - added map[*item]*item - links map[*item]map[nlink]map[digest.Digest]struct{} - byKey map[digest.Digest]*item - next int -} - -func normalizeItem(it *item, state *normalizeState) (*item, error) { - if it2, ok := state.added[it]; ok { - return it2, nil - } - - if len(it.links) == 0 { - id := it.dgst - if it2, ok := state.byKey[id]; ok { - state.added[it] = it2 - return it2, nil - } - state.byKey[id] = it - state.added[it] = it - return nil, nil - } - - matches := map[digest.Digest]struct{}{} - - // check if there is already a matching record - for i, m := range it.links { - if len(m) == 0 { - return nil, errors.Errorf("invalid incomplete links") - } - for l := range m { - nl := nlink{dgst: it.dgst, input: i, selector: l.selector} - it2, err := normalizeItem(l.src, state) - if err != nil { - return nil, err - } - links := state.links[it2][nl] - if i == 0 { - for id := range links { - matches[id] = struct{}{} - } - } else { - for id := range matches { - if _, ok := links[id]; !ok { - delete(matches, id) - } - } - } - } - } - - var id digest.Digest - - links := it.links - - if len(matches) > 0 { - for m := range matches { - if id == "" || id > m { - id = m - } - } - } else { - // keep tmp IDs deterministic - state.next++ - id = digest.FromBytes([]byte(fmt.Sprintf("%d", state.next))) - state.byKey[id] = it - it.links = make([]map[link]struct{}, len(it.links)) - for i := range it.links { - it.links[i] = map[link]struct{}{} - } - } - - it2 := state.byKey[id] - state.added[it] = it2 - - for i, m := range links { - for l := range m { - subIt, err := normalizeItem(l.src, state) - if err != nil { - return nil, err - } - it2.links[i][link{src: subIt, selector: l.selector}] = struct{}{} - - nl := nlink{dgst: it.dgst, input: i, selector: l.selector} - if _, ok := state.links[subIt]; !ok { - state.links[subIt] = map[nlink]map[digest.Digest]struct{}{} - } - if _, ok := state.links[subIt][nl]; !ok { - state.links[subIt][nl] = map[digest.Digest]struct{}{} - } - state.links[subIt][nl][id] = struct{}{} - } - } - - return it2, nil -} - -type marshalState struct { - layers []CacheLayer - chainsByID map[string]int - descriptors DescriptorProvider - - records []CacheRecord - recordsByItem map[*item]int -} - -func marshalRemote(r *solver.Remote, state *marshalState) string { - if len(r.Descriptors) == 0 { - return "" - } - type Remote struct { - Descriptors []ocispec.Descriptor - Provider content.Provider - } - var parentID string - if len(r.Descriptors) > 1 { - r2 := &solver.Remote{ - Descriptors: r.Descriptors[:len(r.Descriptors)-1], - Provider: r.Provider, - } - parentID = marshalRemote(r2, state) - } - desc := r.Descriptors[len(r.Descriptors)-1] - - state.descriptors[desc.Digest] = DescriptorProviderPair{ - Descriptor: desc, - Provider: r.Provider, - } - - id := desc.Digest.String() + parentID - - if _, ok := state.chainsByID[id]; ok { - return id - } - - state.chainsByID[id] = len(state.layers) - l := CacheLayer{ - Blob: desc.Digest, - ParentIndex: -1, - } - if parentID != "" { - l.ParentIndex = state.chainsByID[parentID] - } - state.layers = append(state.layers, l) - return id -} - -func marshalItem(it *item, state *marshalState) error { - if _, ok := state.recordsByItem[it]; ok { - return nil - } - - rec := CacheRecord{ - Digest: it.dgst, - Inputs: make([][]CacheInput, len(it.links)), - } - - for i, m := range it.links { - for l := range m { - if err := marshalItem(l.src, state); err != nil { - return err - } - idx, ok := state.recordsByItem[l.src] - if !ok { - return errors.Errorf("invalid source record: %v", l.src) - } - rec.Inputs[i] = append(rec.Inputs[i], CacheInput{ - Selector: l.selector, - LinkIndex: idx, - }) - } - } - - if it.result != nil { - id := marshalRemote(it.result, state) - if id != "" { - idx, ok := state.chainsByID[id] - if !ok { - return errors.Errorf("parent chainid not found") - } - rec.Results = append(rec.Results, CacheResult{LayerIndex: idx, CreatedAt: it.resultTime}) - } - } - - state.recordsByItem[it] = len(state.records) - state.records = append(state.records, rec) - return nil -} diff --git a/vendor/github.com/moby/buildkit/cache/util/fsutil.go b/vendor/github.com/moby/buildkit/cache/util/fsutil.go deleted file mode 100644 index b7aa6730d607..000000000000 --- a/vendor/github.com/moby/buildkit/cache/util/fsutil.go +++ /dev/null @@ -1,139 +0,0 @@ -package util - -import ( - "context" - "io" - "io/ioutil" - "os" - "path/filepath" - - "github.com/containerd/continuity/fs" - "github.com/moby/buildkit/cache" - "github.com/moby/buildkit/snapshot" - "github.com/pkg/errors" - "github.com/tonistiigi/fsutil" - fstypes "github.com/tonistiigi/fsutil/types" -) - -type ReadRequest struct { - Filename string - Range *FileRange -} - -type FileRange struct { - Offset int - Length int -} - -func withMount(ctx context.Context, ref cache.ImmutableRef, cb func(string) error) error { - mount, err := ref.Mount(ctx, true) - if err != nil { - return err - } - - lm := snapshot.LocalMounter(mount) - - root, err := lm.Mount() - if err != nil { - return err - } - - defer func() { - if lm != nil { - lm.Unmount() - } - }() - - if err := cb(root); err != nil { - return err - } - - if err := lm.Unmount(); err != nil { - return err - } - lm = nil - return nil -} - -func ReadFile(ctx context.Context, ref cache.ImmutableRef, req ReadRequest) ([]byte, error) { - var dt []byte - - err := withMount(ctx, ref, func(root string) error { - fp, err := fs.RootPath(root, req.Filename) - if err != nil { - return err - } - - if req.Range == nil { - dt, err = ioutil.ReadFile(fp) - if err != nil { - return err - } - } else { - f, err := os.Open(fp) - if err != nil { - return err - } - dt, err = ioutil.ReadAll(io.NewSectionReader(f, int64(req.Range.Offset), int64(req.Range.Length))) - f.Close() - if err != nil { - return err - } - } - return nil - }) - return dt, err -} - -type ReadDirRequest struct { - Path string - IncludePattern string -} - -func ReadDir(ctx context.Context, ref cache.ImmutableRef, req ReadDirRequest) ([]*fstypes.Stat, error) { - var ( - rd []*fstypes.Stat - wo fsutil.WalkOpt - ) - if req.IncludePattern != "" { - wo.IncludePatterns = append(wo.IncludePatterns, req.IncludePattern) - } - err := withMount(ctx, ref, func(root string) error { - fp, err := fs.RootPath(root, req.Path) - if err != nil { - return err - } - return fsutil.Walk(ctx, fp, &wo, func(path string, info os.FileInfo, err error) error { - if err != nil { - return errors.Wrapf(err, "walking %q", root) - } - stat, ok := info.Sys().(*fstypes.Stat) - if !ok { - // This "can't happen(tm)". - return errors.Errorf("expected a *fsutil.Stat but got %T", info.Sys()) - } - rd = append(rd, stat) - - if info.IsDir() { - return filepath.SkipDir - } - return nil - }) - }) - return rd, err -} - -func StatFile(ctx context.Context, ref cache.ImmutableRef, path string) (*fstypes.Stat, error) { - var st *fstypes.Stat - err := withMount(ctx, ref, func(root string) error { - fp, err := fs.RootPath(root, path) - if err != nil { - return err - } - if st, err = fsutil.Stat(fp); err != nil { - return err - } - return nil - }) - return st, err -} diff --git a/vendor/github.com/moby/buildkit/client/build.go b/vendor/github.com/moby/buildkit/client/build.go deleted file mode 100644 index f23df4450eaf..000000000000 --- a/vendor/github.com/moby/buildkit/client/build.go +++ /dev/null @@ -1,105 +0,0 @@ -package client - -import ( - "context" - - "github.com/moby/buildkit/client/buildid" - gateway "github.com/moby/buildkit/frontend/gateway/client" - "github.com/moby/buildkit/frontend/gateway/grpcclient" - gatewayapi "github.com/moby/buildkit/frontend/gateway/pb" - "github.com/moby/buildkit/session" - "github.com/moby/buildkit/util/apicaps" - "github.com/pkg/errors" - "google.golang.org/grpc" -) - -func (c *Client) Build(ctx context.Context, opt SolveOpt, product string, buildFunc gateway.BuildFunc, statusChan chan *SolveStatus) (*SolveResponse, error) { - defer func() { - if statusChan != nil { - close(statusChan) - } - }() - - if opt.Frontend != "" { - return nil, errors.New("invalid SolveOpt, Build interface cannot use Frontend") - } - - if product == "" { - product = apicaps.ExportedProduct - } - - feOpts := opt.FrontendAttrs - opt.FrontendAttrs = nil - - workers, err := c.ListWorkers(ctx) - if err != nil { - return nil, errors.Wrap(err, "listing workers for Build") - } - var gworkers []gateway.WorkerInfo - for _, w := range workers { - gworkers = append(gworkers, gateway.WorkerInfo{ - ID: w.ID, - Labels: w.Labels, - Platforms: w.Platforms, - }) - } - - cb := func(ref string, s *session.Session) error { - g, err := grpcclient.New(ctx, feOpts, s.ID(), product, c.gatewayClientForBuild(ref), gworkers) - if err != nil { - return err - } - - if err := g.Run(ctx, buildFunc); err != nil { - return errors.Wrap(err, "failed to run Build function") - } - return nil - } - - return c.solve(ctx, nil, cb, opt, statusChan) -} - -func (c *Client) gatewayClientForBuild(buildid string) gatewayapi.LLBBridgeClient { - g := gatewayapi.NewLLBBridgeClient(c.conn) - return &gatewayClientForBuild{g, buildid} -} - -type gatewayClientForBuild struct { - gateway gatewayapi.LLBBridgeClient - buildID string -} - -func (g *gatewayClientForBuild) ResolveImageConfig(ctx context.Context, in *gatewayapi.ResolveImageConfigRequest, opts ...grpc.CallOption) (*gatewayapi.ResolveImageConfigResponse, error) { - ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) - return g.gateway.ResolveImageConfig(ctx, in, opts...) -} - -func (g *gatewayClientForBuild) Solve(ctx context.Context, in *gatewayapi.SolveRequest, opts ...grpc.CallOption) (*gatewayapi.SolveResponse, error) { - ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) - return g.gateway.Solve(ctx, in, opts...) -} - -func (g *gatewayClientForBuild) ReadFile(ctx context.Context, in *gatewayapi.ReadFileRequest, opts ...grpc.CallOption) (*gatewayapi.ReadFileResponse, error) { - ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) - return g.gateway.ReadFile(ctx, in, opts...) -} - -func (g *gatewayClientForBuild) ReadDir(ctx context.Context, in *gatewayapi.ReadDirRequest, opts ...grpc.CallOption) (*gatewayapi.ReadDirResponse, error) { - ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) - return g.gateway.ReadDir(ctx, in, opts...) -} - -func (g *gatewayClientForBuild) StatFile(ctx context.Context, in *gatewayapi.StatFileRequest, opts ...grpc.CallOption) (*gatewayapi.StatFileResponse, error) { - ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) - return g.gateway.StatFile(ctx, in, opts...) -} - -func (g *gatewayClientForBuild) Ping(ctx context.Context, in *gatewayapi.PingRequest, opts ...grpc.CallOption) (*gatewayapi.PongResponse, error) { - ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) - return g.gateway.Ping(ctx, in, opts...) -} - -func (g *gatewayClientForBuild) Return(ctx context.Context, in *gatewayapi.ReturnRequest, opts ...grpc.CallOption) (*gatewayapi.ReturnResponse, error) { - ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) - return g.gateway.Return(ctx, in, opts...) -} diff --git a/vendor/github.com/moby/buildkit/client/build_test.go b/vendor/github.com/moby/buildkit/client/build_test.go deleted file mode 100644 index 95f4c995551f..000000000000 --- a/vendor/github.com/moby/buildkit/client/build_test.go +++ /dev/null @@ -1,173 +0,0 @@ -package client - -import ( - "context" - "io/ioutil" - "os" - "path/filepath" - "testing" - - "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/frontend/gateway/client" - gatewayapi "github.com/moby/buildkit/frontend/gateway/pb" - "github.com/moby/buildkit/identity" - "github.com/moby/buildkit/util/testutil/integration" - "github.com/pkg/errors" - "github.com/stretchr/testify/require" -) - -func TestClientGatewayIntegration(t *testing.T) { - integration.Run(t, []integration.Test{ - testClientGatewaySolve, - testClientGatewayFailedSolve, - testClientGatewayEmptySolve, - testNoBuildID, - testUnknownBuildID, - }, integration.WithMirroredImages(integration.OfficialImages("busybox:latest"))) -} - -func testClientGatewaySolve(t *testing.T, sb integration.Sandbox) { - requiresLinux(t) - - ctx := context.TODO() - - c, err := New(ctx, sb.Address()) - require.NoError(t, err) - defer c.Close() - - product := "buildkit_test" - optKey := "test-string" - - b := func(ctx context.Context, c client.Client) (*client.Result, error) { - if c.BuildOpts().Product != product { - return nil, errors.Errorf("expected product %q, got %q", product, c.BuildOpts().Product) - } - opts := c.BuildOpts().Opts - testStr, ok := opts[optKey] - if !ok { - return nil, errors.Errorf(`build option %q missing`, optKey) - } - - run := llb.Image("busybox:latest").Run( - llb.ReadonlyRootFS(), - llb.Args([]string{"/bin/sh", "-ec", `echo -n '` + testStr + `' > /out/foo`}), - ) - st := run.AddMount("/out", llb.Scratch()) - - def, err := st.Marshal() - if err != nil { - return nil, errors.Wrap(err, "failed to marshal state") - } - - r, err := c.Solve(ctx, client.SolveRequest{ - Definition: def.ToPB(), - }) - if err != nil { - return nil, errors.Wrap(err, "failed to solve") - } - - read, err := r.Ref.ReadFile(ctx, client.ReadRequest{ - Filename: "/foo", - }) - if err != nil { - return nil, errors.Wrap(err, "failed to read result") - } - if testStr != string(read) { - return nil, errors.Errorf("read back %q, expected %q", string(read), testStr) - } - return r, nil - } - - tmpdir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) - - testStr := "This is a test" - - _, err = c.Build(ctx, SolveOpt{ - Exporter: ExporterLocal, - ExporterOutputDir: tmpdir, - FrontendAttrs: map[string]string{ - optKey: testStr, - }, - }, product, b, nil) - require.NoError(t, err) - - read, err := ioutil.ReadFile(filepath.Join(tmpdir, "foo")) - require.NoError(t, err) - require.Equal(t, testStr, string(read)) - - checkAllReleasable(t, c, sb, true) -} - -func testClientGatewayFailedSolve(t *testing.T, sb integration.Sandbox) { - requiresLinux(t) - - ctx := context.TODO() - - c, err := New(ctx, sb.Address()) - require.NoError(t, err) - defer c.Close() - - b := func(ctx context.Context, c client.Client) (*client.Result, error) { - return nil, errors.New("expected to fail") - } - - _, err = c.Build(ctx, SolveOpt{}, "", b, nil) - require.Error(t, err) - require.Contains(t, err.Error(), "expected to fail") -} - -func testClientGatewayEmptySolve(t *testing.T, sb integration.Sandbox) { - requiresLinux(t) - - ctx := context.TODO() - - c, err := New(ctx, sb.Address()) - require.NoError(t, err) - defer c.Close() - - b := func(ctx context.Context, c client.Client) (*client.Result, error) { - r, err := c.Solve(ctx, client.SolveRequest{}) - if err != nil { - return nil, errors.Wrap(err, "failed to solve") - } - if r.Ref != nil || r.Refs != nil || r.Metadata != nil { - return nil, errors.Errorf("got unexpected non-empty result %+v", r) - } - return r, nil - } - - _, err = c.Build(ctx, SolveOpt{}, "", b, nil) - require.NoError(t, err) -} - -func testNoBuildID(t *testing.T, sb integration.Sandbox) { - requiresLinux(t) - - ctx := context.TODO() - - c, err := New(ctx, sb.Address()) - require.NoError(t, err) - defer c.Close() - - g := gatewayapi.NewLLBBridgeClient(c.conn) - _, err = g.Ping(ctx, &gatewayapi.PingRequest{}) - require.Error(t, err) - require.Contains(t, err.Error(), "no buildid found in context") -} - -func testUnknownBuildID(t *testing.T, sb integration.Sandbox) { - requiresLinux(t) - - ctx := context.TODO() - - c, err := New(ctx, sb.Address()) - require.NoError(t, err) - defer c.Close() - - g := c.gatewayClientForBuild(t.Name() + identity.NewID()) - _, err = g.Ping(ctx, &gatewayapi.PingRequest{}) - require.Error(t, err) - require.Contains(t, err.Error(), "no such job") -} diff --git a/vendor/github.com/moby/buildkit/client/buildid/metadata.go b/vendor/github.com/moby/buildkit/client/buildid/metadata.go deleted file mode 100644 index bb169b8fe4ec..000000000000 --- a/vendor/github.com/moby/buildkit/client/buildid/metadata.go +++ /dev/null @@ -1,29 +0,0 @@ -package buildid - -import ( - "context" - - "google.golang.org/grpc/metadata" -) - -var metadataKey = "buildkit-controlapi-buildid" - -func AppendToOutgoingContext(ctx context.Context, id string) context.Context { - if id != "" { - return metadata.AppendToOutgoingContext(ctx, metadataKey, id) - } - return ctx -} - -func FromIncomingContext(ctx context.Context) string { - md, ok := metadata.FromIncomingContext(ctx) - if !ok { - return "" - } - - if ids := md.Get(metadataKey); len(ids) == 1 { - return ids[0] - } - - return "" -} diff --git a/vendor/github.com/moby/buildkit/client/client.go b/vendor/github.com/moby/buildkit/client/client.go deleted file mode 100644 index 6126acdc1b36..000000000000 --- a/vendor/github.com/moby/buildkit/client/client.go +++ /dev/null @@ -1,130 +0,0 @@ -package client - -import ( - "context" - "crypto/tls" - "crypto/x509" - "io/ioutil" - - "github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc" - controlapi "github.com/moby/buildkit/api/services/control" - "github.com/moby/buildkit/util/appdefaults" - opentracing "github.com/opentracing/opentracing-go" - "github.com/pkg/errors" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" -) - -type Client struct { - conn *grpc.ClientConn -} - -type ClientOpt interface{} - -// New returns a new buildkit client. Address can be empty for the system-default address. -func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error) { - gopts := []grpc.DialOption{ - grpc.WithDialer(dialer), - } - needWithInsecure := true - for _, o := range opts { - if _, ok := o.(*withFailFast); ok { - gopts = append(gopts, grpc.FailOnNonTempDialError(true)) - } - if credInfo, ok := o.(*withCredentials); ok { - opt, err := loadCredentials(credInfo) - if err != nil { - return nil, err - } - gopts = append(gopts, opt) - needWithInsecure = false - } - if wt, ok := o.(*withTracer); ok { - gopts = append(gopts, - grpc.WithUnaryInterceptor(otgrpc.OpenTracingClientInterceptor(wt.tracer, otgrpc.LogPayloads())), - grpc.WithStreamInterceptor(otgrpc.OpenTracingStreamClientInterceptor(wt.tracer))) - } - } - if needWithInsecure { - gopts = append(gopts, grpc.WithInsecure()) - } - if address == "" { - address = appdefaults.Address - } - conn, err := grpc.DialContext(ctx, address, gopts...) - if err != nil { - return nil, errors.Wrapf(err, "failed to dial %q . make sure buildkitd is running", address) - } - c := &Client{ - conn: conn, - } - return c, nil -} - -func (c *Client) controlClient() controlapi.ControlClient { - return controlapi.NewControlClient(c.conn) -} - -func (c *Client) Close() error { - return c.conn.Close() -} - -type withFailFast struct{} - -func WithFailFast() ClientOpt { - return &withFailFast{} -} - -type withCredentials struct { - ServerName string - CACert string - Cert string - Key string -} - -// WithCredentials configures the TLS parameters of the client. -// Arguments: -// * serverName: specifies the name of the target server -// * ca: specifies the filepath of the CA certificate to use for verification -// * cert: specifies the filepath of the client certificate -// * key: specifies the filepath of the client key -func WithCredentials(serverName, ca, cert, key string) ClientOpt { - return &withCredentials{serverName, ca, cert, key} -} - -func loadCredentials(opts *withCredentials) (grpc.DialOption, error) { - ca, err := ioutil.ReadFile(opts.CACert) - if err != nil { - return nil, errors.Wrap(err, "could not read ca certificate") - } - - certPool := x509.NewCertPool() - if ok := certPool.AppendCertsFromPEM(ca); !ok { - return nil, errors.New("failed to append ca certs") - } - - cfg := &tls.Config{ - ServerName: opts.ServerName, - RootCAs: certPool, - } - - // we will produce an error if the user forgot about either cert or key if at least one is specified - if opts.Cert != "" || opts.Key != "" { - cert, err := tls.LoadX509KeyPair(opts.Cert, opts.Key) - if err != nil { - return nil, errors.Wrap(err, "could not read certificate/key") - } - cfg.Certificates = []tls.Certificate{cert} - cfg.BuildNameToCertificate() - } - - return grpc.WithTransportCredentials(credentials.NewTLS(cfg)), nil -} - -func WithTracer(t opentracing.Tracer) ClientOpt { - return &withTracer{t} -} - -type withTracer struct { - tracer opentracing.Tracer -} diff --git a/vendor/github.com/moby/buildkit/client/client_test.go b/vendor/github.com/moby/buildkit/client/client_test.go deleted file mode 100644 index 214ac33a79cd..000000000000 --- a/vendor/github.com/moby/buildkit/client/client_test.go +++ /dev/null @@ -1,1873 +0,0 @@ -package client - -import ( - "archive/tar" - "context" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "encoding/json" - "encoding/pem" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - "os" - "path/filepath" - "runtime" - "strings" - "testing" - "time" - - "github.com/containerd/containerd" - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/images" - "github.com/containerd/containerd/namespaces" - "github.com/containerd/containerd/snapshots" - "github.com/containerd/continuity/fs/fstest" - "github.com/moby/buildkit/client/llb" - gateway "github.com/moby/buildkit/frontend/gateway/client" - "github.com/moby/buildkit/identity" - "github.com/moby/buildkit/session" - "github.com/moby/buildkit/session/secrets/secretsprovider" - "github.com/moby/buildkit/session/sshforward/sshprovider" - "github.com/moby/buildkit/util/testutil" - "github.com/moby/buildkit/util/testutil/httpserver" - "github.com/moby/buildkit/util/testutil/integration" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/stretchr/testify/require" - "golang.org/x/crypto/ssh/agent" - "golang.org/x/sync/errgroup" -) - -type nopWriteCloser struct { - io.Writer -} - -func (nopWriteCloser) Close() error { return nil } - -func TestClientIntegration(t *testing.T) { - integration.Run(t, []integration.Test{ - testRelativeWorkDir, - testCallDiskUsage, - testBuildMultiMount, - testBuildHTTPSource, - testBuildPushAndValidate, - testResolveAndHosts, - testUser, - testOCIExporter, - testWhiteoutParentDir, - testFrontendImageNaming, - testDuplicateWhiteouts, - testSchema1Image, - testMountWithNoSource, - testInvalidExporter, - testReadonlyRootFS, - testBasicCacheImportExport, - testCachedMounts, - testProxyEnv, - testLocalSymlinkEscape, - testTmpfsMounts, - testSharedCacheMounts, - testLockedCacheMounts, - testDuplicateCacheMount, - testParallelLocalBuilds, - testSecretMounts, - testExtraHosts, - testNetworkMode, - testFrontendMetadataReturn, - testSSHMount, - testStdinClosed, - testHostnameLookup, - }, - integration.WithMirroredImages(integration.OfficialImages("busybox:latest", "alpine:latest")), - ) -} - -func newContainerd(cdAddress string) (*containerd.Client, error) { - return containerd.New(cdAddress, containerd.WithTimeout(60*time.Second)) -} - -func testHostnameLookup(t *testing.T, sb integration.Sandbox) { - if sb.Rootless() { - t.SkipNow() - } - - c, err := New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - st := llb.Image("busybox:latest").Run(llb.Shlex(`sh -c "ping -c 1 $(hostname)"`)) - - def, err := st.Marshal() - require.NoError(t, err) - - _, err = c.Solve(context.TODO(), def, SolveOpt{}, nil) - require.NoError(t, err) -} - -// moby/buildkit#614 -func testStdinClosed(t *testing.T, sb integration.Sandbox) { - c, err := New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - st := llb.Image("busybox:latest").Run(llb.Shlex("cat")) - - def, err := st.Marshal() - require.NoError(t, err) - - _, err = c.Solve(context.TODO(), def, SolveOpt{}, nil) - require.NoError(t, err) -} - -func testSSHMount(t *testing.T, sb integration.Sandbox) { - c, err := New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - a := agent.NewKeyring() - - k, err := rsa.GenerateKey(rand.Reader, 2048) - require.NoError(t, err) - - err = a.Add(agent.AddedKey{PrivateKey: k}) - require.NoError(t, err) - - sockPath, clean, err := makeSSHAgentSock(a) - require.NoError(t, err) - defer clean() - - ssh, err := sshprovider.NewSSHAgentProvider([]sshprovider.AgentConfig{{ - Paths: []string{sockPath}, - }}) - require.NoError(t, err) - - // no ssh exposed - st := llb.Image("busybox:latest").Run(llb.Shlex(`nosuchcmd`), llb.AddSSHSocket()) - def, err := st.Marshal() - require.NoError(t, err) - - _, err = c.Solve(context.TODO(), def, SolveOpt{}, nil) - require.Error(t, err) - require.Contains(t, err.Error(), "no SSH key ") - - // custom ID not exposed - st = llb.Image("busybox:latest").Run(llb.Shlex(`nosuchcmd`), llb.AddSSHSocket(llb.SSHID("customID"))) - def, err = st.Marshal() - require.NoError(t, err) - - _, err = c.Solve(context.TODO(), def, SolveOpt{ - Session: []session.Attachable{ssh}, - }, nil) - require.Error(t, err) - require.Contains(t, err.Error(), "unset ssh forward key customID") - - // missing custom ID ignored on optional - st = llb.Image("busybox:latest").Run(llb.Shlex(`ls`), llb.AddSSHSocket(llb.SSHID("customID"), llb.SSHOptional)) - def, err = st.Marshal() - require.NoError(t, err) - - _, err = c.Solve(context.TODO(), def, SolveOpt{ - Session: []session.Attachable{ssh}, - }, nil) - require.NoError(t, err) - - // valid socket - st = llb.Image("alpine:latest"). - Run(llb.Shlex(`apk add --no-cache openssh`)). - Run(llb.Shlex(`sh -c 'echo -n $SSH_AUTH_SOCK > /out/sock && ssh-add -l > /out/out'`), - llb.AddSSHSocket()) - - out := st.AddMount("/out", llb.Scratch()) - def, err = out.Marshal() - require.NoError(t, err) - - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - _, err = c.Solve(context.TODO(), def, SolveOpt{ - Exporter: ExporterLocal, - ExporterOutputDir: destDir, - Session: []session.Attachable{ssh}, - }, nil) - require.NoError(t, err) - - dt, err := ioutil.ReadFile(filepath.Join(destDir, "sock")) - require.NoError(t, err) - require.Equal(t, "/run/buildkit/ssh_agent.0", string(dt)) - - dt, err = ioutil.ReadFile(filepath.Join(destDir, "out")) - require.NoError(t, err) - require.Contains(t, string(dt), "2048") - require.Contains(t, string(dt), "(RSA)") - - // forbidden command - st = llb.Image("alpine:latest"). - Run(llb.Shlex(`apk add --no-cache openssh`)). - Run(llb.Shlex(`sh -c 'ssh-keygen -f /tmp/key -N "" && ssh-add -k /tmp/key 2> /out/out || true'`), - llb.AddSSHSocket()) - - out = st.AddMount("/out", llb.Scratch()) - def, err = out.Marshal() - require.NoError(t, err) - - require.NoError(t, err) - - _, err = c.Solve(context.TODO(), def, SolveOpt{ - Exporter: ExporterLocal, - ExporterOutputDir: destDir, - Session: []session.Attachable{ssh}, - }, nil) - require.NoError(t, err) - - dt, err = ioutil.ReadFile(filepath.Join(destDir, "out")) - require.NoError(t, err) - require.Contains(t, string(dt), "agent refused operation") - - // valid socket from key on disk - st = llb.Image("alpine:latest"). - Run(llb.Shlex(`apk add --no-cache openssh`)). - Run(llb.Shlex(`sh -c 'ssh-add -l > /out/out'`), - llb.AddSSHSocket()) - - out = st.AddMount("/out", llb.Scratch()) - def, err = out.Marshal() - require.NoError(t, err) - - k, err = rsa.GenerateKey(rand.Reader, 1024) - require.NoError(t, err) - - dt = pem.EncodeToMemory( - &pem.Block{ - Type: "RSA PRIVATE KEY", - Bytes: x509.MarshalPKCS1PrivateKey(k), - }, - ) - - tmpDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(tmpDir) - - err = ioutil.WriteFile(filepath.Join(tmpDir, "key"), dt, 0600) - require.NoError(t, err) - - ssh, err = sshprovider.NewSSHAgentProvider([]sshprovider.AgentConfig{{ - Paths: []string{filepath.Join(tmpDir, "key")}, - }}) - require.NoError(t, err) - - destDir, err = ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - _, err = c.Solve(context.TODO(), def, SolveOpt{ - Exporter: ExporterLocal, - ExporterOutputDir: destDir, - Session: []session.Attachable{ssh}, - }, nil) - require.NoError(t, err) - - dt, err = ioutil.ReadFile(filepath.Join(destDir, "out")) - require.NoError(t, err) - require.Contains(t, string(dt), "1024") - require.Contains(t, string(dt), "(RSA)") -} - -func testExtraHosts(t *testing.T, sb integration.Sandbox) { - c, err := New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - st := llb.Image("busybox:latest"). - Run(llb.Shlex(`sh -c 'cat /etc/hosts | grep myhost | grep 1.2.3.4'`), llb.AddExtraHost("myhost", net.ParseIP("1.2.3.4"))) - - def, err := st.Marshal() - require.NoError(t, err) - - _, err = c.Solve(context.TODO(), def, SolveOpt{}, nil) - require.NoError(t, err) -} - -func testNetworkMode(t *testing.T, sb integration.Sandbox) { - c, err := New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - st := llb.Image("busybox:latest"). - Run(llb.Shlex(`sh -c 'wget https://example.com 2>&1 | grep "wget: bad address"'`), llb.Network(llb.NetModeNone)) - - def, err := st.Marshal() - require.NoError(t, err) - - _, err = c.Solve(context.TODO(), def, SolveOpt{}, nil) - require.NoError(t, err) - - st2 := llb.Image("busybox:latest"). - Run(llb.Shlex(`ifconfig`), llb.Network(llb.NetModeHost)) - - def, err = st2.Marshal() - require.NoError(t, err) - - _, err = c.Solve(context.TODO(), def, SolveOpt{ - // Currently disabled globally by default - // AllowedEntitlements: []entitlements.Entitlement{entitlements.EntitlementNetworkHost}, - }, nil) - require.Error(t, err) - require.Contains(t, err.Error(), "network.host is not allowed") -} - -func testFrontendImageNaming(t *testing.T, sb integration.Sandbox) { - requiresLinux(t) - c, err := New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - registry, err := sb.NewRegistry() - if errors.Cause(err) == integration.ErrorRequirements { - t.Skip(err.Error()) - } - require.NoError(t, err) - - checkImageName := map[string]func(out, imageName string, exporterResponse map[string]string){ - ExporterOCI: func(out, imageName string, exporterResponse map[string]string) { - // Nothing to check - return - }, - ExporterDocker: func(out, imageName string, exporterResponse map[string]string) { - require.Contains(t, exporterResponse, "image.name") - require.Equal(t, exporterResponse["image.name"], "docker.io/library/"+imageName) - - dt, err := ioutil.ReadFile(out) - require.NoError(t, err) - - m, err := testutil.ReadTarToMap(dt, false) - require.NoError(t, err) - - _, ok := m["oci-layout"] - require.True(t, ok) - - var index ocispec.Index - err = json.Unmarshal(m["index.json"].Data, &index) - require.NoError(t, err) - require.Equal(t, 2, index.SchemaVersion) - require.Equal(t, 1, len(index.Manifests)) - - var dockerMfst []struct { - RepoTags []string - } - err = json.Unmarshal(m["manifest.json"].Data, &dockerMfst) - require.NoError(t, err) - require.Equal(t, 1, len(dockerMfst)) - require.Equal(t, 1, len(dockerMfst[0].RepoTags)) - require.Equal(t, "docker.io/library/"+imageName, dockerMfst[0].RepoTags[0]) - }, - ExporterImage: func(_, imageName string, exporterResponse map[string]string) { - require.Contains(t, exporterResponse, "image.name") - require.Equal(t, exporterResponse["image.name"], imageName) - - // check if we can pull (requires containerd) - var cdAddress string - if cd, ok := sb.(interface { - ContainerdAddress() string - }); !ok { - return - } else { - cdAddress = cd.ContainerdAddress() - } - - // TODO: make public pull helper function so this can be checked for standalone as well - - client, err := containerd.New(cdAddress) - require.NoError(t, err) - defer client.Close() - - ctx := namespaces.WithNamespace(context.Background(), "buildkit") - - // check image in containerd - _, err = client.ImageService().Get(ctx, imageName) - require.NoError(t, err) - - // deleting image should release all content - err = client.ImageService().Delete(ctx, imageName, images.SynchronousDelete()) - require.NoError(t, err) - - checkAllReleasable(t, c, sb, true) - - _, err = client.Pull(ctx, imageName) - require.NoError(t, err) - - err = client.ImageService().Delete(ctx, imageName, images.SynchronousDelete()) - require.NoError(t, err) - }, - } - - // A caller provided name takes precedence over one returned by the frontend. Iterate over both options. - for _, winner := range []string{"frontend", "caller"} { - winner := winner // capture loop variable. - - // The double layer of `t.Run` here is required so - // that the inner-most tests (with the actual - // functionality) have definitely completed before the - // sandbox and registry cleanups (defered above) are run. - t.Run(winner, func(t *testing.T) { - for _, exp := range []string{ExporterOCI, ExporterDocker, ExporterImage} { - exp := exp // capture loop variable. - t.Run(exp, func(t *testing.T) { - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - so := SolveOpt{ - Exporter: exp, - ExporterAttrs: map[string]string{}, - } - - out := filepath.Join(destDir, "out.tar") - - imageName := "image-" + exp + "-fe:latest" - - switch exp { - case ExporterOCI: - t.Skip("oci exporter does not support named images") - case ExporterDocker: - outW, err := os.Create(out) - require.NoError(t, err) - so.ExporterOutput = outW - case ExporterImage: - imageName = registry + "/" + imageName - so.ExporterAttrs["push"] = "true" - } - - feName := imageName - switch winner { - case "caller": - feName = "loser:latest" - so.ExporterAttrs["name"] = imageName - case "frontend": - so.ExporterAttrs["name"] = "*" - } - - frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { - res := gateway.NewResult() - res.AddMeta("image.name", []byte(feName)) - return res, nil - } - - resp, err := c.Build(context.TODO(), so, "", frontend, nil) - require.NoError(t, err) - - checkImageName[exp](out, imageName, resp.ExporterResponse) - }) - } - }) - } - - checkAllReleasable(t, c, sb, true) -} - -func testSecretMounts(t *testing.T, sb integration.Sandbox) { - c, err := New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - st := llb.Image("busybox:latest"). - Run(llb.Shlex(`sh -c 'mount | grep mysecret | grep "type tmpfs" && [ "$(cat /run/secrets/mysecret)" = 'foo-secret' ]'`), llb.AddSecret("/run/secrets/mysecret")) - - def, err := st.Marshal() - require.NoError(t, err) - - _, err = c.Solve(context.TODO(), def, SolveOpt{ - Session: []session.Attachable{secretsprovider.FromMap(map[string][]byte{ - "/run/secrets/mysecret": []byte("foo-secret"), - })}, - }, nil) - require.NoError(t, err) - - // test optional - st = llb.Image("busybox:latest"). - Run(llb.Shlex(`echo secret2`), llb.AddSecret("/run/secrets/mysecret2", llb.SecretOptional)) - - def, err = st.Marshal() - require.NoError(t, err) - - _, err = c.Solve(context.TODO(), def, SolveOpt{ - Session: []session.Attachable{secretsprovider.FromMap(map[string][]byte{})}, - }, nil) - require.NoError(t, err) - - _, err = c.Solve(context.TODO(), def, SolveOpt{}, nil) - require.NoError(t, err) - - st = llb.Image("busybox:latest"). - Run(llb.Shlex(`echo secret3`), llb.AddSecret("/run/secrets/mysecret3")) - - def, err = st.Marshal() - require.NoError(t, err) - - _, err = c.Solve(context.TODO(), def, SolveOpt{ - Session: []session.Attachable{secretsprovider.FromMap(map[string][]byte{})}, - }, nil) - require.Error(t, err) - - // test id,perm,uid - st = llb.Image("busybox:latest"). - Run(llb.Shlex(`sh -c '[ "$(stat -c "%%u %%g %%f" /run/secrets/mysecret4)" = "1 1 81ff" ]' `), llb.AddSecret("/run/secrets/mysecret4", llb.SecretID("mysecret"), llb.SecretFileOpt(1, 1, 0777))) - - def, err = st.Marshal() - require.NoError(t, err) - - _, err = c.Solve(context.TODO(), def, SolveOpt{ - Session: []session.Attachable{secretsprovider.FromMap(map[string][]byte{ - "mysecret": []byte("pw"), - })}, - }, nil) - require.NoError(t, err) -} - -func testTmpfsMounts(t *testing.T, sb integration.Sandbox) { - requiresLinux(t) - c, err := New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - st := llb.Image("busybox:latest"). - Run(llb.Shlex(`sh -c 'mount | grep /foobar | grep "type tmpfs" && touch /foobar/test'`), llb.AddMount("/foobar", llb.Scratch(), llb.Tmpfs())) - - def, err := st.Marshal() - require.NoError(t, err) - - _, err = c.Solve(context.TODO(), def, SolveOpt{}, nil) - require.NoError(t, err) -} - -func testLocalSymlinkEscape(t *testing.T, sb integration.Sandbox) { - requiresLinux(t) - c, err := New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - test := []byte(`set -ex -[[ -L /mount/foo ]] -[[ -L /mount/sub/bar ]] -[[ -L /mount/bax ]] -[[ -f /mount/bay ]] -[[ -f /mount/sub/sub2/file ]] -[[ ! -f /mount/baz ]] -[[ ! -f /mount/etc/passwd ]] -[[ ! -f /mount/etc/group ]] -[[ $(readlink /mount/foo) == "/etc/passwd" ]] -[[ $(readlink /mount/sub/bar) == "../../../etc/group" ]] -`) - - dir, err := tmpdir( - // point to absolute path that is not part of dir - fstest.Symlink("/etc/passwd", "foo"), - fstest.CreateDir("sub", 0700), - // point outside of the dir - fstest.Symlink("../../../etc/group", "sub/bar"), - // regular valid symlink - fstest.Symlink("bay", "bax"), - // target for symlink (not requested) - fstest.CreateFile("bay", []byte{}, 0600), - // file with many subdirs - fstest.CreateDir("sub/sub2", 0700), - fstest.CreateFile("sub/sub2/file", []byte{}, 0600), - // unused file that shouldn't be included - fstest.CreateFile("baz", []byte{}, 0600), - fstest.CreateFile("test.sh", test, 0700), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - local := llb.Local("mylocal", llb.FollowPaths([]string{ - "test.sh", "foo", "sub/bar", "bax", "sub/sub2/file", - })) - - st := llb.Image("busybox:latest"). - Run(llb.Shlex(`sh /mount/test.sh`), llb.AddMount("/mount", local, llb.Readonly)) - - def, err := st.Marshal() - require.NoError(t, err) - - _, err = c.Solve(context.TODO(), def, SolveOpt{ - LocalDirs: map[string]string{ - "mylocal": dir, - }, - }, nil) - require.NoError(t, err) -} - -func testRelativeWorkDir(t *testing.T, sb integration.Sandbox) { - requiresLinux(t) - c, err := New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - pwd := llb.Image("docker.io/library/busybox:latest"). - Dir("test1"). - Dir("test2"). - Run(llb.Shlex(`sh -c "pwd > /out/pwd"`)). - AddMount("/out", llb.Scratch()) - - def, err := pwd.Marshal() - require.NoError(t, err) - - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - _, err = c.Solve(context.TODO(), def, SolveOpt{ - Exporter: ExporterLocal, - ExporterOutputDir: destDir, - }, nil) - require.NoError(t, err) - - dt, err := ioutil.ReadFile(filepath.Join(destDir, "pwd")) - require.NoError(t, err) - require.Equal(t, []byte("/test1/test2\n"), dt) -} - -func testCallDiskUsage(t *testing.T, sb integration.Sandbox) { - c, err := New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - _, err = c.DiskUsage(context.TODO()) - require.NoError(t, err) -} - -func testBuildMultiMount(t *testing.T, sb integration.Sandbox) { - requiresLinux(t) - c, err := New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - alpine := llb.Image("docker.io/library/alpine:latest") - ls := alpine.Run(llb.Shlex("/bin/ls -l")) - busybox := llb.Image("docker.io/library/busybox:latest") - cp := ls.Run(llb.Shlex("/bin/cp -a /busybox/etc/passwd baz")) - cp.AddMount("/busybox", busybox) - - def, err := cp.Marshal() - require.NoError(t, err) - - _, err = c.Solve(context.TODO(), def, SolveOpt{}, nil) - require.NoError(t, err) - - checkAllReleasable(t, c, sb, true) -} - -func testBuildHTTPSource(t *testing.T, sb integration.Sandbox) { - c, err := New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - modTime := time.Now().Add(-24 * time.Hour) // avoid falso positive with current time - - resp := httpserver.Response{ - Etag: identity.NewID(), - Content: []byte("content1"), - LastModified: &modTime, - } - - server := httpserver.NewTestServer(map[string]httpserver.Response{ - "/foo": resp, - }) - defer server.Close() - - // invalid URL first - st := llb.HTTP(server.URL + "/bar") - - def, err := st.Marshal() - require.NoError(t, err) - - _, err = c.Solve(context.TODO(), def, SolveOpt{}, nil) - require.Error(t, err) - require.Contains(t, err.Error(), "invalid response status 404") - - // first correct request - st = llb.HTTP(server.URL + "/foo") - - def, err = st.Marshal() - require.NoError(t, err) - - _, err = c.Solve(context.TODO(), def, SolveOpt{}, nil) - require.NoError(t, err) - - require.Equal(t, server.Stats("/foo").AllRequests, 1) - require.Equal(t, server.Stats("/foo").CachedRequests, 0) - - tmpdir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) - - _, err = c.Solve(context.TODO(), def, SolveOpt{ - Exporter: ExporterLocal, - ExporterOutputDir: tmpdir, - }, nil) - require.NoError(t, err) - - require.Equal(t, server.Stats("/foo").AllRequests, 2) - require.Equal(t, server.Stats("/foo").CachedRequests, 1) - - dt, err := ioutil.ReadFile(filepath.Join(tmpdir, "foo")) - require.NoError(t, err) - require.Equal(t, []byte("content1"), dt) - - // test extra options - st = llb.HTTP(server.URL+"/foo", llb.Filename("bar"), llb.Chmod(0741), llb.Chown(1000, 1000)) - - def, err = st.Marshal() - require.NoError(t, err) - - _, err = c.Solve(context.TODO(), def, SolveOpt{ - Exporter: ExporterLocal, - ExporterOutputDir: tmpdir, - }, nil) - require.NoError(t, err) - - require.Equal(t, server.Stats("/foo").AllRequests, 3) - require.Equal(t, server.Stats("/foo").CachedRequests, 1) - - dt, err = ioutil.ReadFile(filepath.Join(tmpdir, "bar")) - require.NoError(t, err) - require.Equal(t, []byte("content1"), dt) - - fi, err := os.Stat(filepath.Join(tmpdir, "bar")) - require.NoError(t, err) - require.Equal(t, fi.ModTime().Format(http.TimeFormat), modTime.Format(http.TimeFormat)) - require.Equal(t, int(fi.Mode()&0777), 0741) - - checkAllReleasable(t, c, sb, true) - - // TODO: check that second request was marked as cached -} - -func testResolveAndHosts(t *testing.T, sb integration.Sandbox) { - requiresLinux(t) - c, err := New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - busybox := llb.Image("busybox:latest") - st := llb.Scratch() - - run := func(cmd string) { - st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st) - } - - run(`sh -c "cp /etc/resolv.conf ."`) - run(`sh -c "cp /etc/hosts ."`) - - def, err := st.Marshal() - require.NoError(t, err) - - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - _, err = c.Solve(context.TODO(), def, SolveOpt{ - Exporter: ExporterLocal, - ExporterOutputDir: destDir, - }, nil) - require.NoError(t, err) - - dt, err := ioutil.ReadFile(filepath.Join(destDir, "resolv.conf")) - require.NoError(t, err) - require.Contains(t, string(dt), "nameserver") - - dt, err = ioutil.ReadFile(filepath.Join(destDir, "hosts")) - require.NoError(t, err) - require.Contains(t, string(dt), "127.0.0.1 localhost") - -} - -func testUser(t *testing.T, sb integration.Sandbox) { - requiresLinux(t) - c, err := New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - st := llb.Image("busybox:latest").Run(llb.Shlex(`sh -c "mkdir -m 0777 /wd"`)) - - run := func(user, cmd string) { - st = st.Run(llb.Shlex(cmd), llb.Dir("/wd"), llb.User(user)) - } - - run("daemon", `sh -c "id -nu > user"`) - run("daemon:daemon", `sh -c "id -ng > group"`) - run("daemon:nogroup", `sh -c "id -ng > nogroup"`) - run("1:1", `sh -c "id -g > userone"`) - - st = st.Run(llb.Shlex("cp -a /wd/. /out/")) - out := st.AddMount("/out", llb.Scratch()) - - def, err := out.Marshal() - require.NoError(t, err) - - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - _, err = c.Solve(context.TODO(), def, SolveOpt{ - Exporter: ExporterLocal, - ExporterOutputDir: destDir, - }, nil) - require.NoError(t, err) - - dt, err := ioutil.ReadFile(filepath.Join(destDir, "user")) - require.NoError(t, err) - require.Contains(t, string(dt), "daemon") - - dt, err = ioutil.ReadFile(filepath.Join(destDir, "group")) - require.NoError(t, err) - require.Contains(t, string(dt), "daemon") - - dt, err = ioutil.ReadFile(filepath.Join(destDir, "nogroup")) - require.NoError(t, err) - require.Contains(t, string(dt), "nogroup") - - dt, err = ioutil.ReadFile(filepath.Join(destDir, "userone")) - require.NoError(t, err) - require.Contains(t, string(dt), "1") - - checkAllReleasable(t, c, sb, true) -} - -func testOCIExporter(t *testing.T, sb integration.Sandbox) { - requiresLinux(t) - c, err := New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - busybox := llb.Image("busybox:latest") - st := llb.Scratch() - - run := func(cmd string) { - st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st) - } - - run(`sh -c "echo -n first > foo"`) - run(`sh -c "echo -n second > bar"`) - - def, err := st.Marshal() - require.NoError(t, err) - - for _, exp := range []string{ExporterOCI, ExporterDocker} { - - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - out := filepath.Join(destDir, "out.tar") - outW, err := os.Create(out) - require.NoError(t, err) - target := "example.com/buildkit/testoci:latest" - attrs := map[string]string{} - if exp == ExporterDocker { - attrs["name"] = target - } - _, err = c.Solve(context.TODO(), def, SolveOpt{ - Exporter: exp, - ExporterAttrs: attrs, - ExporterOutput: outW, - }, nil) - require.NoError(t, err) - - dt, err := ioutil.ReadFile(out) - require.NoError(t, err) - - m, err := testutil.ReadTarToMap(dt, false) - require.NoError(t, err) - - _, ok := m["oci-layout"] - require.True(t, ok) - - var index ocispec.Index - err = json.Unmarshal(m["index.json"].Data, &index) - require.NoError(t, err) - require.Equal(t, 2, index.SchemaVersion) - require.Equal(t, 1, len(index.Manifests)) - - var mfst ocispec.Manifest - err = json.Unmarshal(m["blobs/sha256/"+index.Manifests[0].Digest.Hex()].Data, &mfst) - require.NoError(t, err) - require.Equal(t, 2, len(mfst.Layers)) - - var ociimg ocispec.Image - err = json.Unmarshal(m["blobs/sha256/"+mfst.Config.Digest.Hex()].Data, &ociimg) - require.NoError(t, err) - require.Equal(t, "layers", ociimg.RootFS.Type) - require.Equal(t, 2, len(ociimg.RootFS.DiffIDs)) - - _, ok = m["blobs/sha256/"+mfst.Layers[0].Digest.Hex()] - require.True(t, ok) - _, ok = m["blobs/sha256/"+mfst.Layers[1].Digest.Hex()] - require.True(t, ok) - - if exp != ExporterDocker { - continue - } - - var dockerMfst []struct { - Config string - RepoTags []string - Layers []string - } - err = json.Unmarshal(m["manifest.json"].Data, &dockerMfst) - require.NoError(t, err) - require.Equal(t, 1, len(dockerMfst)) - - _, ok = m[dockerMfst[0].Config] - require.True(t, ok) - require.Equal(t, 2, len(dockerMfst[0].Layers)) - require.Equal(t, 1, len(dockerMfst[0].RepoTags)) - require.Equal(t, target, dockerMfst[0].RepoTags[0]) - - for _, l := range dockerMfst[0].Layers { - _, ok := m[l] - require.True(t, ok) - } - } - - checkAllReleasable(t, c, sb, true) -} - -func testFrontendMetadataReturn(t *testing.T, sb integration.Sandbox) { - requiresLinux(t) - c, err := New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { - res := gateway.NewResult() - res.AddMeta("frontend.returned", []byte("true")) - res.AddMeta("not-frontend.not-returned", []byte("false")) - res.AddMeta("frontendnot.returned.either", []byte("false")) - return res, nil - } - - res, err := c.Build(context.TODO(), SolveOpt{ - Exporter: ExporterOCI, - ExporterAttrs: map[string]string{}, - ExporterOutput: nopWriteCloser{ioutil.Discard}, - }, "", frontend, nil) - require.NoError(t, err) - require.Contains(t, res.ExporterResponse, "frontend.returned") - require.Equal(t, res.ExporterResponse["frontend.returned"], "true") - require.NotContains(t, res.ExporterResponse, "not-frontend.not-returned") - require.NotContains(t, res.ExporterResponse, "frontendnot.returned.either") - checkAllReleasable(t, c, sb, true) -} - -func testBuildPushAndValidate(t *testing.T, sb integration.Sandbox) { - requiresLinux(t) - c, err := New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - busybox := llb.Image("busybox:latest") - st := llb.Scratch() - - run := func(cmd string) { - st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st) - } - - run(`sh -e -c "mkdir -p foo/sub; echo -n first > foo/sub/bar; chmod 0741 foo;"`) - run(`true`) // this doesn't create a layer - run(`sh -c "echo -n second > foo/sub/baz"`) - - def, err := st.Marshal() - require.NoError(t, err) - - registry, err := sb.NewRegistry() - if errors.Cause(err) == integration.ErrorRequirements { - t.Skip(err.Error()) - } - require.NoError(t, err) - - target := registry + "/buildkit/testpush:latest" - - _, err = c.Solve(context.TODO(), def, SolveOpt{ - Exporter: ExporterImage, - ExporterAttrs: map[string]string{ - "name": target, - "push": "true", - }, - }, nil) - require.NoError(t, err) - - // test existence of the image with next build - firstBuild := llb.Image(target) - - def, err = firstBuild.Marshal() - require.NoError(t, err) - - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - _, err = c.Solve(context.TODO(), def, SolveOpt{ - Exporter: ExporterLocal, - ExporterOutputDir: destDir, - }, nil) - require.NoError(t, err) - - dt, err := ioutil.ReadFile(filepath.Join(destDir, "foo/sub/bar")) - require.NoError(t, err) - require.Equal(t, dt, []byte("first")) - - dt, err = ioutil.ReadFile(filepath.Join(destDir, "foo/sub/baz")) - require.NoError(t, err) - require.Equal(t, dt, []byte("second")) - - fi, err := os.Stat(filepath.Join(destDir, "foo")) - require.NoError(t, err) - require.Equal(t, 0741, int(fi.Mode()&0777)) - - checkAllReleasable(t, c, sb, false) - - // examine contents of exported tars (requires containerd) - var cdAddress string - if cd, ok := sb.(interface { - ContainerdAddress() string - }); !ok { - return - } else { - cdAddress = cd.ContainerdAddress() - } - - // TODO: make public pull helper function so this can be checked for standalone as well - - client, err := newContainerd(cdAddress) - require.NoError(t, err) - defer client.Close() - - ctx := namespaces.WithNamespace(context.Background(), "buildkit") - - // check image in containerd - _, err = client.ImageService().Get(ctx, target) - require.NoError(t, err) - - // deleting image should release all content - err = client.ImageService().Delete(ctx, target, images.SynchronousDelete()) - require.NoError(t, err) - - checkAllReleasable(t, c, sb, true) - - img, err := client.Pull(ctx, target) - require.NoError(t, err) - - desc, err := img.Config(ctx) - require.NoError(t, err) - - dt, err = content.ReadBlob(ctx, img.ContentStore(), desc) - require.NoError(t, err) - - var ociimg ocispec.Image - err = json.Unmarshal(dt, &ociimg) - require.NoError(t, err) - - require.NotEqual(t, "", ociimg.OS) - require.NotEqual(t, "", ociimg.Architecture) - require.NotEqual(t, "", ociimg.Config.WorkingDir) - require.Equal(t, "layers", ociimg.RootFS.Type) - require.Equal(t, 2, len(ociimg.RootFS.DiffIDs)) - require.NotNil(t, ociimg.Created) - require.True(t, time.Since(*ociimg.Created) < 2*time.Minute) - require.Condition(t, func() bool { - for _, env := range ociimg.Config.Env { - if strings.HasPrefix(env, "PATH=") { - return true - } - } - return false - }) - - require.Equal(t, 3, len(ociimg.History)) - require.Contains(t, ociimg.History[0].CreatedBy, "foo/sub/bar") - require.Contains(t, ociimg.History[1].CreatedBy, "true") - require.Contains(t, ociimg.History[2].CreatedBy, "foo/sub/baz") - require.False(t, ociimg.History[0].EmptyLayer) - require.True(t, ociimg.History[1].EmptyLayer) - require.False(t, ociimg.History[2].EmptyLayer) - - dt, err = content.ReadBlob(ctx, img.ContentStore(), img.Target()) - require.NoError(t, err) - - var mfst = struct { - MediaType string `json:"mediaType,omitempty"` - ocispec.Manifest - }{} - - err = json.Unmarshal(dt, &mfst) - require.NoError(t, err) - - require.Equal(t, images.MediaTypeDockerSchema2Manifest, mfst.MediaType) - require.Equal(t, 2, len(mfst.Layers)) - - dt, err = content.ReadBlob(ctx, img.ContentStore(), ocispec.Descriptor{Digest: mfst.Layers[0].Digest}) - require.NoError(t, err) - - m, err := testutil.ReadTarToMap(dt, true) - require.NoError(t, err) - - item, ok := m["foo/"] - require.True(t, ok) - require.Equal(t, int32(item.Header.Typeflag), tar.TypeDir) - require.Equal(t, 0741, int(item.Header.Mode&0777)) - - item, ok = m["foo/sub/"] - require.True(t, ok) - require.Equal(t, int32(item.Header.Typeflag), tar.TypeDir) - - item, ok = m["foo/sub/bar"] - require.True(t, ok) - require.Equal(t, int32(item.Header.Typeflag), tar.TypeReg) - require.Equal(t, []byte("first"), item.Data) - - _, ok = m["foo/sub/baz"] - require.False(t, ok) - - dt, err = content.ReadBlob(ctx, img.ContentStore(), ocispec.Descriptor{Digest: mfst.Layers[1].Digest}) - require.NoError(t, err) - - m, err = testutil.ReadTarToMap(dt, true) - require.NoError(t, err) - - item, ok = m["foo/sub/baz"] - require.True(t, ok) - require.Equal(t, int32(item.Header.Typeflag), tar.TypeReg) - require.Equal(t, []byte("second"), item.Data) - - item, ok = m["foo/"] - require.True(t, ok) - require.Equal(t, int32(item.Header.Typeflag), tar.TypeDir) - require.Equal(t, 0741, int(item.Header.Mode&0777)) - - item, ok = m["foo/sub/"] - require.True(t, ok) - require.Equal(t, int32(item.Header.Typeflag), tar.TypeDir) - - _, ok = m["foo/sub/bar"] - require.False(t, ok) -} - -func testBasicCacheImportExport(t *testing.T, sb integration.Sandbox) { - requiresLinux(t) - registry, err := sb.NewRegistry() - if errors.Cause(err) == integration.ErrorRequirements { - t.Skip(err.Error()) - } - require.NoError(t, err) - - c, err := New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - busybox := llb.Image("busybox:latest") - st := llb.Scratch() - - run := func(cmd string) { - st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st) - } - - run(`sh -c "echo -n foobar > const"`) - run(`sh -c "cat /dev/urandom | head -c 100 | sha256sum > unique"`) - - def, err := st.Marshal() - require.NoError(t, err) - - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - target := registry + "/buildkit/testexport:latest" - - _, err = c.Solve(context.TODO(), def, SolveOpt{ - Exporter: ExporterLocal, - ExporterOutputDir: destDir, - ExportCache: target, - }, nil) - require.NoError(t, err) - - dt, err := ioutil.ReadFile(filepath.Join(destDir, "const")) - require.NoError(t, err) - require.Equal(t, string(dt), "foobar") - - dt, err = ioutil.ReadFile(filepath.Join(destDir, "unique")) - require.NoError(t, err) - - err = c.Prune(context.TODO(), nil, PruneAll) - require.NoError(t, err) - - checkAllRemoved(t, c, sb) - - destDir, err = ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - _, err = c.Solve(context.TODO(), def, SolveOpt{ - Exporter: ExporterLocal, - ExporterOutputDir: destDir, - ImportCache: []string{target}, - }, nil) - require.NoError(t, err) - - dt2, err := ioutil.ReadFile(filepath.Join(destDir, "const")) - require.NoError(t, err) - require.Equal(t, string(dt2), "foobar") - - dt2, err = ioutil.ReadFile(filepath.Join(destDir, "unique")) - require.NoError(t, err) - require.Equal(t, string(dt), string(dt2)) -} - -func testCachedMounts(t *testing.T, sb integration.Sandbox) { - requiresLinux(t) - c, err := New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - busybox := llb.Image("busybox:latest") - // setup base for one of the cache sources - st := busybox.Run(llb.Shlex(`sh -c "echo -n base > baz"`), llb.Dir("/wd")) - base := st.AddMount("/wd", llb.Scratch()) - - st = busybox.Run(llb.Shlex(`sh -c "echo -n first > foo"`), llb.Dir("/wd")) - st.AddMount("/wd", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountShared)) - st = st.Run(llb.Shlex(`sh -c "cat foo && echo -n second > /wd2/bar"`), llb.Dir("/wd")) - st.AddMount("/wd", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountShared)) - st.AddMount("/wd2", base, llb.AsPersistentCacheDir("mycache2", llb.CacheMountShared)) - - def, err := st.Marshal() - require.NoError(t, err) - - _, err = c.Solve(context.TODO(), def, SolveOpt{}, nil) - require.NoError(t, err) - - // repeat to make sure cache works - _, err = c.Solve(context.TODO(), def, SolveOpt{}, nil) - require.NoError(t, err) - - // second build using cache directories - st = busybox.Run(llb.Shlex(`sh -c "cp /src0/foo . && cp /src1/bar . && cp /src1/baz ."`), llb.Dir("/wd")) - out := st.AddMount("/wd", llb.Scratch()) - st.AddMount("/src0", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountShared)) - st.AddMount("/src1", base, llb.AsPersistentCacheDir("mycache2", llb.CacheMountShared)) - - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - def, err = out.Marshal() - require.NoError(t, err) - - _, err = c.Solve(context.TODO(), def, SolveOpt{ - Exporter: ExporterLocal, - ExporterOutputDir: destDir, - }, nil) - require.NoError(t, err) - - dt, err := ioutil.ReadFile(filepath.Join(destDir, "foo")) - require.NoError(t, err) - require.Equal(t, string(dt), "first") - - dt, err = ioutil.ReadFile(filepath.Join(destDir, "bar")) - require.NoError(t, err) - require.Equal(t, string(dt), "second") - - dt, err = ioutil.ReadFile(filepath.Join(destDir, "baz")) - require.NoError(t, err) - require.Equal(t, string(dt), "base") - - checkAllReleasable(t, c, sb, true) -} - -func testSharedCacheMounts(t *testing.T, sb integration.Sandbox) { - requiresLinux(t) - c, err := New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - busybox := llb.Image("busybox:latest") - st := busybox.Run(llb.Shlex(`sh -e -c "touch one; while [[ ! -f two ]]; do ls -l; usleep 500000; done"`), llb.Dir("/wd")) - st.AddMount("/wd", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountShared)) - - st2 := busybox.Run(llb.Shlex(`sh -e -c "touch two; while [[ ! -f one ]]; do ls -l; usleep 500000; done"`), llb.Dir("/wd")) - st2.AddMount("/wd", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountShared)) - - out := busybox.Run(llb.Shlex("true")) - out.AddMount("/m1", st.Root()) - out.AddMount("/m2", st2.Root()) - - def, err := out.Marshal() - require.NoError(t, err) - - _, err = c.Solve(context.TODO(), def, SolveOpt{}, nil) - require.NoError(t, err) -} - -func testLockedCacheMounts(t *testing.T, sb integration.Sandbox) { - requiresLinux(t) - c, err := New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - busybox := llb.Image("busybox:latest") - st := busybox.Run(llb.Shlex(`sh -e -c "touch one; if [[ -f two ]]; then exit 0; fi; for i in $(seq 10); do if [[ -f two ]]; then exit 1; fi; usleep 200000; done"`), llb.Dir("/wd")) - st.AddMount("/wd", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountLocked)) - - st2 := busybox.Run(llb.Shlex(`sh -e -c "touch two; if [[ -f one ]]; then exit 0; fi; for i in $(seq 10); do if [[ -f one ]]; then exit 1; fi; usleep 200000; done"`), llb.Dir("/wd")) - st2.AddMount("/wd", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountLocked)) - - out := busybox.Run(llb.Shlex("true")) - out.AddMount("/m1", st.Root()) - out.AddMount("/m2", st2.Root()) - - def, err := out.Marshal() - require.NoError(t, err) - - _, err = c.Solve(context.TODO(), def, SolveOpt{}, nil) - require.NoError(t, err) -} - -func testDuplicateCacheMount(t *testing.T, sb integration.Sandbox) { - requiresLinux(t) - c, err := New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - busybox := llb.Image("busybox:latest") - - out := busybox.Run(llb.Shlex(`sh -e -c "[[ ! -f /m2/foo ]]; touch /m1/foo; [[ -f /m2/foo ]];"`)) - out.AddMount("/m1", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountLocked)) - out.AddMount("/m2", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountLocked)) - - def, err := out.Marshal() - require.NoError(t, err) - - _, err = c.Solve(context.TODO(), def, SolveOpt{}, nil) - require.NoError(t, err) -} - -// containerd/containerd#2119 -func testDuplicateWhiteouts(t *testing.T, sb integration.Sandbox) { - requiresLinux(t) - c, err := New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - busybox := llb.Image("busybox:latest") - st := llb.Scratch() - - run := func(cmd string) { - st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st) - } - - run(`sh -e -c "mkdir -p d0 d1; echo -n first > d1/bar;"`) - run(`sh -c "rm -rf d0 d1"`) - - def, err := st.Marshal() - require.NoError(t, err) - - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - out := filepath.Join(destDir, "out.tar") - outW, err := os.Create(out) - require.NoError(t, err) - - _, err = c.Solve(context.TODO(), def, SolveOpt{ - Exporter: ExporterOCI, - ExporterOutput: outW, - }, nil) - require.NoError(t, err) - - dt, err := ioutil.ReadFile(out) - require.NoError(t, err) - - m, err := testutil.ReadTarToMap(dt, false) - require.NoError(t, err) - - var index ocispec.Index - err = json.Unmarshal(m["index.json"].Data, &index) - require.NoError(t, err) - - var mfst ocispec.Manifest - err = json.Unmarshal(m["blobs/sha256/"+index.Manifests[0].Digest.Hex()].Data, &mfst) - require.NoError(t, err) - - lastLayer := mfst.Layers[len(mfst.Layers)-1] - - layer, ok := m["blobs/sha256/"+lastLayer.Digest.Hex()] - require.True(t, ok) - - m, err = testutil.ReadTarToMap(layer.Data, true) - require.NoError(t, err) - - _, ok = m[".wh.d0"] - require.True(t, ok) - - _, ok = m[".wh.d1"] - require.True(t, ok) - - // check for a bug that added whiteout for subfile - _, ok = m["d1/.wh.bar"] - require.True(t, !ok) -} - -// #276 -func testWhiteoutParentDir(t *testing.T, sb integration.Sandbox) { - requiresLinux(t) - c, err := New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - busybox := llb.Image("busybox:latest") - st := llb.Scratch() - - run := func(cmd string) { - st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st) - } - - run(`sh -c "mkdir -p foo; echo -n first > foo/bar;"`) - run(`rm foo/bar`) - - def, err := st.Marshal() - require.NoError(t, err) - - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - out := filepath.Join(destDir, "out.tar") - outW, err := os.Create(out) - require.NoError(t, err) - _, err = c.Solve(context.TODO(), def, SolveOpt{ - Exporter: ExporterOCI, - ExporterOutput: outW, - }, nil) - require.NoError(t, err) - - dt, err := ioutil.ReadFile(out) - require.NoError(t, err) - - m, err := testutil.ReadTarToMap(dt, false) - require.NoError(t, err) - - var index ocispec.Index - err = json.Unmarshal(m["index.json"].Data, &index) - require.NoError(t, err) - - var mfst ocispec.Manifest - err = json.Unmarshal(m["blobs/sha256/"+index.Manifests[0].Digest.Hex()].Data, &mfst) - require.NoError(t, err) - - lastLayer := mfst.Layers[len(mfst.Layers)-1] - - layer, ok := m["blobs/sha256/"+lastLayer.Digest.Hex()] - require.True(t, ok) - - m, err = testutil.ReadTarToMap(layer.Data, true) - require.NoError(t, err) - - _, ok = m["foo/.wh.bar"] - require.True(t, ok) - - _, ok = m["foo/"] - require.True(t, ok) -} - -// #296 -func testSchema1Image(t *testing.T, sb integration.Sandbox) { - c, err := New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - st := llb.Image("gcr.io/google_containers/pause:3.0@sha256:0d093c962a6c2dd8bb8727b661e2b5f13e9df884af9945b4cc7088d9350cd3ee") - - def, err := st.Marshal() - require.NoError(t, err) - - _, err = c.Solve(context.TODO(), def, SolveOpt{}, nil) - require.NoError(t, err) - - checkAllReleasable(t, c, sb, true) -} - -// #319 -func testMountWithNoSource(t *testing.T, sb integration.Sandbox) { - c, err := New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - busybox := llb.Image("docker.io/library/busybox:latest") - st := llb.Scratch() - - var nilState llb.State - - // This should never actually be run, but we want to succeed - // if it was, because we expect an error below, or a daemon - // panic if the issue has regressed. - run := busybox.Run( - llb.Args([]string{"/bin/true"}), - llb.AddMount("/nil", nilState, llb.SourcePath("/"), llb.Readonly)) - - st = run.AddMount("/mnt", st) - - def, err := st.Marshal() - require.NoError(t, err) - - _, err = c.Solve(context.TODO(), def, SolveOpt{}, nil) - require.NoError(t, err) - - checkAllReleasable(t, c, sb, true) -} - -// #324 -func testReadonlyRootFS(t *testing.T, sb integration.Sandbox) { - c, err := New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - busybox := llb.Image("docker.io/library/busybox:latest") - st := llb.Scratch() - - // The path /foo should be unwriteable. - run := busybox.Run( - llb.ReadonlyRootFS(), - llb.Args([]string{"/bin/touch", "/foo"})) - st = run.AddMount("/mnt", st) - - def, err := st.Marshal() - require.NoError(t, err) - - _, err = c.Solve(context.TODO(), def, SolveOpt{}, nil) - require.Error(t, err) - // Would prefer to detect more specifically "Read-only file - // system" but that isn't exposed here (it is on the stdio - // which we don't see). - require.Contains(t, err.Error(), "executor failed running [/bin/touch /foo]:") - - checkAllReleasable(t, c, sb, true) -} - -func testProxyEnv(t *testing.T, sb integration.Sandbox) { - c, err := New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - base := llb.Image("docker.io/library/busybox:latest").Dir("/out") - cmd := `sh -c "echo -n $HTTP_PROXY-$HTTPS_PROXY-$NO_PROXY-$no_proxy > env"` - - st := base.Run(llb.Shlex(cmd), llb.WithProxy(llb.ProxyEnv{ - HttpProxy: "httpvalue", - HttpsProxy: "httpsvalue", - NoProxy: "noproxyvalue", - })) - out := st.AddMount("/out", llb.Scratch()) - - def, err := out.Marshal() - require.NoError(t, err) - - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - _, err = c.Solve(context.TODO(), def, SolveOpt{ - Exporter: ExporterLocal, - ExporterOutputDir: destDir, - }, nil) - require.NoError(t, err) - - dt, err := ioutil.ReadFile(filepath.Join(destDir, "env")) - require.NoError(t, err) - require.Equal(t, string(dt), "httpvalue-httpsvalue-noproxyvalue-noproxyvalue") - - // repeat to make sure proxy doesn't change cache - st = base.Run(llb.Shlex(cmd), llb.WithProxy(llb.ProxyEnv{ - HttpsProxy: "httpsvalue2", - NoProxy: "noproxyvalue2", - })) - out = st.AddMount("/out", llb.Scratch()) - - def, err = out.Marshal() - require.NoError(t, err) - - destDir, err = ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - _, err = c.Solve(context.TODO(), def, SolveOpt{ - Exporter: ExporterLocal, - ExporterOutputDir: destDir, - }, nil) - require.NoError(t, err) - - dt, err = ioutil.ReadFile(filepath.Join(destDir, "env")) - require.NoError(t, err) - require.Equal(t, string(dt), "httpvalue-httpsvalue-noproxyvalue-noproxyvalue") -} - -func requiresLinux(t *testing.T) { - if runtime.GOOS != "linux" { - t.Skipf("unsupported GOOS: %s", runtime.GOOS) - } -} - -func checkAllRemoved(t *testing.T, c *Client, sb integration.Sandbox) { - retries := 0 - for { - require.True(t, 20 > retries) - retries++ - du, err := c.DiskUsage(context.TODO()) - require.NoError(t, err) - if len(du) > 0 { - time.Sleep(500 * time.Millisecond) - continue - } - break - } -} - -func checkAllReleasable(t *testing.T, c *Client, sb integration.Sandbox, checkContent bool) { - retries := 0 -loop0: - for { - require.True(t, 20 > retries) - retries++ - du, err := c.DiskUsage(context.TODO()) - require.NoError(t, err) - for _, d := range du { - if d.InUse { - time.Sleep(500 * time.Millisecond) - continue loop0 - } - } - break - } - - err := c.Prune(context.TODO(), nil, PruneAll) - require.NoError(t, err) - - du, err := c.DiskUsage(context.TODO()) - require.NoError(t, err) - require.Equal(t, 0, len(du)) - - // examine contents of exported tars (requires containerd) - var cdAddress string - if cd, ok := sb.(interface { - ContainerdAddress() string - }); !ok { - return - } else { - cdAddress = cd.ContainerdAddress() - } - - // TODO: make public pull helper function so this can be checked for standalone as well - - client, err := newContainerd(cdAddress) - require.NoError(t, err) - defer client.Close() - - ctx := namespaces.WithNamespace(context.Background(), "buildkit") - snapshotService := client.SnapshotService("overlayfs") - - retries = 0 - for { - count := 0 - err = snapshotService.Walk(ctx, func(context.Context, snapshots.Info) error { - count++ - return nil - }) - require.NoError(t, err) - if count == 0 { - break - } - require.True(t, 20 > retries) - retries++ - time.Sleep(500 * time.Millisecond) - } - - if !checkContent { - return - } - - retries = 0 - for { - count := 0 - err = client.ContentStore().Walk(ctx, func(content.Info) error { - count++ - return nil - }) - require.NoError(t, err) - if count == 0 { - break - } - require.True(t, 20 > retries) - retries++ - time.Sleep(500 * time.Millisecond) - } -} - -func testInvalidExporter(t *testing.T, sb integration.Sandbox) { - requiresLinux(t) - c, err := New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - def, err := llb.Image("busybox:latest").Marshal() - require.NoError(t, err) - - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - target := "example.com/buildkit/testoci:latest" - attrs := map[string]string{ - "name": target, - } - for _, exp := range []string{ExporterOCI, ExporterDocker} { - _, err = c.Solve(context.TODO(), def, SolveOpt{ - Exporter: exp, - ExporterAttrs: attrs, - }, nil) - // output file writer is required - require.Error(t, err) - _, err = c.Solve(context.TODO(), def, SolveOpt{ - Exporter: exp, - ExporterAttrs: attrs, - ExporterOutputDir: destDir, - }, nil) - // output directory is not supported - require.Error(t, err) - } - - _, err = c.Solve(context.TODO(), def, SolveOpt{ - Exporter: ExporterLocal, - ExporterAttrs: attrs, - }, nil) - // output directory is required - require.Error(t, err) - - f, err := os.Create(filepath.Join(destDir, "a")) - require.NoError(t, err) - defer f.Close() - _, err = c.Solve(context.TODO(), def, SolveOpt{ - Exporter: ExporterLocal, - ExporterAttrs: attrs, - ExporterOutput: f, - }, nil) - // output file writer is not supported - require.Error(t, err) - - checkAllReleasable(t, c, sb, true) -} - -// moby/buildkit#492 -func testParallelLocalBuilds(t *testing.T, sb integration.Sandbox) { - ctx, cancel := context.WithCancel(context.TODO()) - defer cancel() - - c, err := New(ctx, sb.Address()) - require.NoError(t, err) - defer c.Close() - - eg, ctx := errgroup.WithContext(ctx) - - for i := 0; i < 3; i++ { - func(i int) { - eg.Go(func() error { - fn := fmt.Sprintf("test%d", i) - srcDir, err := tmpdir( - fstest.CreateFile(fn, []byte("contents"), 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(srcDir) - - def, err := llb.Local("source").Marshal() - require.NoError(t, err) - - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - _, err = c.Solve(ctx, def, SolveOpt{ - Exporter: ExporterLocal, - ExporterOutputDir: destDir, - LocalDirs: map[string]string{ - "source": srcDir, - }, - }, nil) - require.NoError(t, err) - - act, err := ioutil.ReadFile(filepath.Join(destDir, fn)) - require.NoError(t, err) - - require.Equal(t, "contents", string(act)) - return nil - }) - }(i) - } - - err = eg.Wait() - require.NoError(t, err) -} - -func tmpdir(appliers ...fstest.Applier) (string, error) { - tmpdir, err := ioutil.TempDir("", "buildkit-client") - if err != nil { - return "", err - } - if err := fstest.Apply(appliers...).Apply(tmpdir); err != nil { - return "", err - } - return tmpdir, nil -} - -func makeSSHAgentSock(agent agent.Agent) (p string, cleanup func() error, err error) { - tmpDir, err := ioutil.TempDir("", "buildkit") - if err != nil { - return "", nil, err - } - defer func() { - if err != nil { - os.RemoveAll(tmpDir) - } - }() - - sockPath := filepath.Join(tmpDir, "ssh_auth_sock") - - l, err := net.Listen("unix", sockPath) - if err != nil { - return "", nil, err - } - - s := &server{l: l} - go s.run(agent) - - return sockPath, func() error { - l.Close() - return os.RemoveAll(tmpDir) - }, nil -} - -type server struct { - l net.Listener -} - -func (s *server) run(a agent.Agent) error { - for { - c, err := s.l.Accept() - if err != nil { - return err - } - - go agent.ServeAgent(a, c) - } -} diff --git a/vendor/github.com/moby/buildkit/client/client_unix.go b/vendor/github.com/moby/buildkit/client/client_unix.go deleted file mode 100644 index 93afb956f1b0..000000000000 --- a/vendor/github.com/moby/buildkit/client/client_unix.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build !windows - -package client - -import ( - "net" - "strings" - "time" - - "github.com/pkg/errors" -) - -func dialer(address string, timeout time.Duration) (net.Conn, error) { - addrParts := strings.SplitN(address, "://", 2) - if len(addrParts) != 2 { - return nil, errors.Errorf("invalid address %s", address) - } - return net.DialTimeout(addrParts[0], addrParts[1], timeout) -} diff --git a/vendor/github.com/moby/buildkit/client/client_windows.go b/vendor/github.com/moby/buildkit/client/client_windows.go deleted file mode 100644 index 75905f520b18..000000000000 --- a/vendor/github.com/moby/buildkit/client/client_windows.go +++ /dev/null @@ -1,24 +0,0 @@ -package client - -import ( - "net" - "strings" - "time" - - "github.com/Microsoft/go-winio" - "github.com/pkg/errors" -) - -func dialer(address string, timeout time.Duration) (net.Conn, error) { - addrParts := strings.SplitN(address, "://", 2) - if len(addrParts) != 2 { - return nil, errors.Errorf("invalid address %s", address) - } - switch addrParts[0] { - case "npipe": - address = strings.Replace(addrParts[1], "/", "\\", 0) - return winio.DialPipe(address, &timeout) - default: - return net.DialTimeout(addrParts[0], addrParts[1], timeout) - } -} diff --git a/vendor/github.com/moby/buildkit/client/diskusage.go b/vendor/github.com/moby/buildkit/client/diskusage.go deleted file mode 100644 index 8034f977c173..000000000000 --- a/vendor/github.com/moby/buildkit/client/diskusage.go +++ /dev/null @@ -1,84 +0,0 @@ -package client - -import ( - "context" - "sort" - "time" - - controlapi "github.com/moby/buildkit/api/services/control" - "github.com/pkg/errors" -) - -type UsageInfo struct { - ID string - Mutable bool - InUse bool - Size int64 - - CreatedAt time.Time - LastUsedAt *time.Time - UsageCount int - Parent string - Description string - RecordType UsageRecordType - Shared bool -} - -func (c *Client) DiskUsage(ctx context.Context, opts ...DiskUsageOption) ([]*UsageInfo, error) { - info := &DiskUsageInfo{} - for _, o := range opts { - o.SetDiskUsageOption(info) - } - - req := &controlapi.DiskUsageRequest{Filter: info.Filter} - resp, err := c.controlClient().DiskUsage(ctx, req) - if err != nil { - return nil, errors.Wrap(err, "failed to call diskusage") - } - - var du []*UsageInfo - - for _, d := range resp.Record { - du = append(du, &UsageInfo{ - ID: d.ID, - Mutable: d.Mutable, - InUse: d.InUse, - Size: d.Size_, - Parent: d.Parent, - CreatedAt: d.CreatedAt, - Description: d.Description, - UsageCount: int(d.UsageCount), - LastUsedAt: d.LastUsedAt, - RecordType: UsageRecordType(d.RecordType), - Shared: d.Shared, - }) - } - - sort.Slice(du, func(i, j int) bool { - if du[i].Size == du[j].Size { - return du[i].ID > du[j].ID - } - return du[i].Size > du[j].Size - }) - - return du, nil -} - -type DiskUsageOption interface { - SetDiskUsageOption(*DiskUsageInfo) -} - -type DiskUsageInfo struct { - Filter []string -} - -type UsageRecordType string - -const ( - UsageRecordTypeInternal UsageRecordType = "internal" - UsageRecordTypeFrontend UsageRecordType = "frontend" - UsageRecordTypeLocalSource UsageRecordType = "source.local" - UsageRecordTypeGitCheckout UsageRecordType = "source.git.checkout" - UsageRecordTypeCacheMount UsageRecordType = "exec.cachemount" - UsageRecordTypeRegular UsageRecordType = "regular" -) diff --git a/vendor/github.com/moby/buildkit/client/exporters.go b/vendor/github.com/moby/buildkit/client/exporters.go deleted file mode 100644 index 4160d92a73f3..000000000000 --- a/vendor/github.com/moby/buildkit/client/exporters.go +++ /dev/null @@ -1,8 +0,0 @@ -package client - -const ( - ExporterImage = "image" - ExporterLocal = "local" - ExporterOCI = "oci" - ExporterDocker = "docker" -) diff --git a/vendor/github.com/moby/buildkit/client/filter.go b/vendor/github.com/moby/buildkit/client/filter.go deleted file mode 100644 index b05fe59d088c..000000000000 --- a/vendor/github.com/moby/buildkit/client/filter.go +++ /dev/null @@ -1,19 +0,0 @@ -package client - -func WithFilter(f []string) Filter { - return Filter(f) -} - -type Filter []string - -func (f Filter) SetDiskUsageOption(di *DiskUsageInfo) { - di.Filter = f -} - -func (f Filter) SetPruneOption(pi *PruneInfo) { - pi.Filter = f -} - -func (f Filter) SetListWorkersOption(lwi *ListWorkersInfo) { - lwi.Filter = f -} diff --git a/vendor/github.com/moby/buildkit/client/graph.go b/vendor/github.com/moby/buildkit/client/graph.go deleted file mode 100644 index 141a393cf9f7..000000000000 --- a/vendor/github.com/moby/buildkit/client/graph.go +++ /dev/null @@ -1,45 +0,0 @@ -package client - -import ( - "time" - - digest "github.com/opencontainers/go-digest" -) - -type Vertex struct { - Digest digest.Digest - Inputs []digest.Digest - Name string - Started *time.Time - Completed *time.Time - Cached bool - Error string -} - -type VertexStatus struct { - ID string - Vertex digest.Digest - Name string - Total int64 - Current int64 - Timestamp time.Time - Started *time.Time - Completed *time.Time -} - -type VertexLog struct { - Vertex digest.Digest - Stream int - Data []byte - Timestamp time.Time -} - -type SolveStatus struct { - Vertexes []*Vertex - Statuses []*VertexStatus - Logs []*VertexLog -} - -type SolveResponse struct { - ExporterResponse map[string]string -} diff --git a/vendor/github.com/moby/buildkit/client/llb/exec.go b/vendor/github.com/moby/buildkit/client/llb/exec.go deleted file mode 100644 index 8233b076e78f..000000000000 --- a/vendor/github.com/moby/buildkit/client/llb/exec.go +++ /dev/null @@ -1,625 +0,0 @@ -package llb - -import ( - _ "crypto/sha256" - "fmt" - "net" - "sort" - - "github.com/moby/buildkit/solver/pb" - "github.com/moby/buildkit/util/system" - digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -type Meta struct { - Args []string - Env EnvList - Cwd string - User string - ProxyEnv *ProxyEnv - ExtraHosts []HostIP - Network pb.NetMode -} - -func NewExecOp(root Output, meta Meta, readOnly bool, c Constraints) *ExecOp { - e := &ExecOp{meta: meta, constraints: c} - rootMount := &mount{ - target: pb.RootMount, - source: root, - readonly: readOnly, - } - e.mounts = append(e.mounts, rootMount) - if readOnly { - e.root = root - } else { - o := &output{vertex: e, getIndex: e.getMountIndexFn(rootMount)} - if p := c.Platform; p != nil { - o.platform = p - } - e.root = o - } - rootMount.output = e.root - return e -} - -type mount struct { - target string - readonly bool - source Output - output Output - selector string - cacheID string - tmpfs bool - cacheSharing CacheMountSharingMode - // hasOutput bool -} - -type ExecOp struct { - MarshalCache - root Output - mounts []*mount - meta Meta - constraints Constraints - isValidated bool - secrets []SecretInfo - ssh []SSHInfo -} - -func (e *ExecOp) AddMount(target string, source Output, opt ...MountOption) Output { - m := &mount{ - target: target, - source: source, - } - for _, o := range opt { - o(m) - } - e.mounts = append(e.mounts, m) - if m.readonly { - m.output = source - } else if m.tmpfs { - m.output = &output{vertex: e, err: errors.Errorf("tmpfs mount for %s can't be used as a parent", target)} - } else { - o := &output{vertex: e, getIndex: e.getMountIndexFn(m)} - if p := e.constraints.Platform; p != nil { - o.platform = p - } - m.output = o - } - e.Store(nil, nil, nil) - e.isValidated = false - return m.output -} - -func (e *ExecOp) GetMount(target string) Output { - for _, m := range e.mounts { - if m.target == target { - return m.output - } - } - return nil -} - -func (e *ExecOp) Validate() error { - if e.isValidated { - return nil - } - if len(e.meta.Args) == 0 { - return errors.Errorf("arguments are required") - } - if e.meta.Cwd == "" { - return errors.Errorf("working directory is required") - } - for _, m := range e.mounts { - if m.source != nil { - if err := m.source.Vertex().Validate(); err != nil { - return err - } - } - } - e.isValidated = true - return nil -} - -func (e *ExecOp) Marshal(c *Constraints) (digest.Digest, []byte, *pb.OpMetadata, error) { - if e.Cached(c) { - return e.Load() - } - if err := e.Validate(); err != nil { - return "", nil, nil, err - } - // make sure mounts are sorted - sort.Slice(e.mounts, func(i, j int) bool { - return e.mounts[i].target < e.mounts[j].target - }) - - if len(e.ssh) > 0 { - for i, s := range e.ssh { - if s.Target == "" { - e.ssh[i].Target = fmt.Sprintf("/run/buildkit/ssh_agent.%d", i) - } - } - if _, ok := e.meta.Env.Get("SSH_AUTH_SOCK"); !ok { - e.meta.Env = e.meta.Env.AddOrReplace("SSH_AUTH_SOCK", e.ssh[0].Target) - } - } - if c.Caps != nil { - if err := c.Caps.Supports(pb.CapExecMetaSetsDefaultPath); err != nil { - e.meta.Env = e.meta.Env.SetDefault("PATH", system.DefaultPathEnv) - } else { - addCap(&e.constraints, pb.CapExecMetaSetsDefaultPath) - } - } - - meta := &pb.Meta{ - Args: e.meta.Args, - Env: e.meta.Env.ToArray(), - Cwd: e.meta.Cwd, - User: e.meta.User, - } - if len(e.meta.ExtraHosts) > 0 { - hosts := make([]*pb.HostIP, len(e.meta.ExtraHosts)) - for i, h := range e.meta.ExtraHosts { - hosts[i] = &pb.HostIP{Host: h.Host, IP: h.IP.String()} - } - meta.ExtraHosts = hosts - } - - peo := &pb.ExecOp{ - Meta: meta, - Network: e.meta.Network, - } - if e.meta.Network != NetModeSandbox { - addCap(&e.constraints, pb.CapExecMetaNetwork) - } - - if p := e.meta.ProxyEnv; p != nil { - peo.Meta.ProxyEnv = &pb.ProxyEnv{ - HttpProxy: p.HttpProxy, - HttpsProxy: p.HttpsProxy, - FtpProxy: p.FtpProxy, - NoProxy: p.NoProxy, - } - addCap(&e.constraints, pb.CapExecMetaProxy) - } - - addCap(&e.constraints, pb.CapExecMetaBase) - - for _, m := range e.mounts { - if m.selector != "" { - addCap(&e.constraints, pb.CapExecMountSelector) - } - if m.cacheID != "" { - addCap(&e.constraints, pb.CapExecMountCache) - addCap(&e.constraints, pb.CapExecMountCacheSharing) - } else if m.tmpfs { - addCap(&e.constraints, pb.CapExecMountTmpfs) - } else if m.source != nil { - addCap(&e.constraints, pb.CapExecMountBind) - } - } - - if len(e.secrets) > 0 { - addCap(&e.constraints, pb.CapExecMountSecret) - } - - if len(e.ssh) > 0 { - addCap(&e.constraints, pb.CapExecMountSSH) - } - - pop, md := MarshalConstraints(c, &e.constraints) - pop.Op = &pb.Op_Exec{ - Exec: peo, - } - - outIndex := 0 - for _, m := range e.mounts { - inputIndex := pb.InputIndex(len(pop.Inputs)) - if m.source != nil { - if m.tmpfs { - return "", nil, nil, errors.Errorf("tmpfs mounts must use scratch") - } - inp, err := m.source.ToInput(c) - if err != nil { - return "", nil, nil, err - } - - newInput := true - - for i, inp2 := range pop.Inputs { - if *inp == *inp2 { - inputIndex = pb.InputIndex(i) - newInput = false - break - } - } - - if newInput { - pop.Inputs = append(pop.Inputs, inp) - } - } else { - inputIndex = pb.Empty - } - - outputIndex := pb.OutputIndex(-1) - if !m.readonly && m.cacheID == "" && !m.tmpfs { - outputIndex = pb.OutputIndex(outIndex) - outIndex++ - } - - pm := &pb.Mount{ - Input: inputIndex, - Dest: m.target, - Readonly: m.readonly, - Output: outputIndex, - Selector: m.selector, - } - if m.cacheID != "" { - pm.MountType = pb.MountType_CACHE - pm.CacheOpt = &pb.CacheOpt{ - ID: m.cacheID, - } - switch m.cacheSharing { - case CacheMountShared: - pm.CacheOpt.Sharing = pb.CacheSharingOpt_SHARED - case CacheMountPrivate: - pm.CacheOpt.Sharing = pb.CacheSharingOpt_PRIVATE - case CacheMountLocked: - pm.CacheOpt.Sharing = pb.CacheSharingOpt_LOCKED - } - } - if m.tmpfs { - pm.MountType = pb.MountType_TMPFS - } - peo.Mounts = append(peo.Mounts, pm) - } - - for _, s := range e.secrets { - pm := &pb.Mount{ - Dest: s.Target, - MountType: pb.MountType_SECRET, - SecretOpt: &pb.SecretOpt{ - ID: s.ID, - Uid: uint32(s.UID), - Gid: uint32(s.GID), - Optional: s.Optional, - Mode: uint32(s.Mode), - }, - } - peo.Mounts = append(peo.Mounts, pm) - } - - for _, s := range e.ssh { - pm := &pb.Mount{ - Dest: s.Target, - MountType: pb.MountType_SSH, - SSHOpt: &pb.SSHOpt{ - ID: s.ID, - Uid: uint32(s.UID), - Gid: uint32(s.GID), - Mode: uint32(s.Mode), - Optional: s.Optional, - }, - } - peo.Mounts = append(peo.Mounts, pm) - } - - dt, err := pop.Marshal() - if err != nil { - return "", nil, nil, err - } - e.Store(dt, md, c) - return e.Load() -} - -func (e *ExecOp) Output() Output { - return e.root -} - -func (e *ExecOp) Inputs() (inputs []Output) { - mm := map[Output]struct{}{} - for _, m := range e.mounts { - if m.source != nil { - mm[m.source] = struct{}{} - } - } - for o := range mm { - inputs = append(inputs, o) - } - return -} - -func (e *ExecOp) getMountIndexFn(m *mount) func() (pb.OutputIndex, error) { - return func() (pb.OutputIndex, error) { - // make sure mounts are sorted - sort.Slice(e.mounts, func(i, j int) bool { - return e.mounts[i].target < e.mounts[j].target - }) - - i := 0 - for _, m2 := range e.mounts { - if m2.readonly || m2.cacheID != "" { - continue - } - if m == m2 { - return pb.OutputIndex(i), nil - } - i++ - } - return pb.OutputIndex(0), errors.Errorf("invalid mount: %s", m.target) - } -} - -type ExecState struct { - State - exec *ExecOp -} - -func (e ExecState) AddMount(target string, source State, opt ...MountOption) State { - return source.WithOutput(e.exec.AddMount(target, source.Output(), opt...)) -} - -func (e ExecState) GetMount(target string) State { - return NewState(e.exec.GetMount(target)) -} - -func (e ExecState) Root() State { - return e.State -} - -type MountOption func(*mount) - -func Readonly(m *mount) { - m.readonly = true -} - -func SourcePath(src string) MountOption { - return func(m *mount) { - m.selector = src - } -} - -func AsPersistentCacheDir(id string, sharing CacheMountSharingMode) MountOption { - return func(m *mount) { - m.cacheID = id - m.cacheSharing = sharing - } -} - -func Tmpfs() MountOption { - return func(m *mount) { - m.tmpfs = true - } -} - -type RunOption interface { - SetRunOption(es *ExecInfo) -} - -type runOptionFunc func(*ExecInfo) - -func (fn runOptionFunc) SetRunOption(ei *ExecInfo) { - fn(ei) -} - -func Network(n pb.NetMode) RunOption { - return runOptionFunc(func(ei *ExecInfo) { - ei.State = network(n)(ei.State) - }) -} - -func Shlex(str string) RunOption { - return Shlexf(str) -} -func Shlexf(str string, v ...interface{}) RunOption { - return runOptionFunc(func(ei *ExecInfo) { - ei.State = shlexf(str, v...)(ei.State) - }) -} - -func Args(a []string) RunOption { - return runOptionFunc(func(ei *ExecInfo) { - ei.State = args(a...)(ei.State) - }) -} - -func AddEnv(key, value string) RunOption { - return AddEnvf(key, value) -} - -func AddEnvf(key, value string, v ...interface{}) RunOption { - return runOptionFunc(func(ei *ExecInfo) { - ei.State = ei.State.AddEnvf(key, value, v...) - }) -} - -func User(str string) RunOption { - return runOptionFunc(func(ei *ExecInfo) { - ei.State = ei.State.User(str) - }) -} - -func Dir(str string) RunOption { - return Dirf(str) -} -func Dirf(str string, v ...interface{}) RunOption { - return runOptionFunc(func(ei *ExecInfo) { - ei.State = ei.State.Dirf(str, v...) - }) -} - -func AddExtraHost(host string, ip net.IP) RunOption { - return runOptionFunc(func(ei *ExecInfo) { - ei.State = ei.State.AddExtraHost(host, ip) - }) -} - -func Reset(s State) RunOption { - return runOptionFunc(func(ei *ExecInfo) { - ei.State = ei.State.Reset(s) - }) -} - -func With(so ...StateOption) RunOption { - return runOptionFunc(func(ei *ExecInfo) { - ei.State = ei.State.With(so...) - }) -} - -func AddMount(dest string, mountState State, opts ...MountOption) RunOption { - return runOptionFunc(func(ei *ExecInfo) { - ei.Mounts = append(ei.Mounts, MountInfo{dest, mountState.Output(), opts}) - }) -} - -func AddSSHSocket(opts ...SSHOption) RunOption { - return runOptionFunc(func(ei *ExecInfo) { - s := &SSHInfo{ - Mode: 0600, - } - for _, opt := range opts { - opt.SetSSHOption(s) - } - ei.SSH = append(ei.SSH, *s) - }) -} - -type SSHOption interface { - SetSSHOption(*SSHInfo) -} - -type sshOptionFunc func(*SSHInfo) - -func (fn sshOptionFunc) SetSSHOption(si *SSHInfo) { - fn(si) -} - -func SSHID(id string) SSHOption { - return sshOptionFunc(func(si *SSHInfo) { - si.ID = id - }) -} - -func SSHSocketTarget(target string) SSHOption { - return sshOptionFunc(func(si *SSHInfo) { - si.Target = target - }) -} - -func SSHSocketOpt(target string, uid, gid, mode int) SSHOption { - return sshOptionFunc(func(si *SSHInfo) { - si.Target = target - si.UID = uid - si.GID = gid - si.Mode = mode - }) -} - -var SSHOptional = sshOptionFunc(func(si *SSHInfo) { - si.Optional = true -}) - -type SSHInfo struct { - ID string - Target string - Mode int - UID int - GID int - Optional bool -} - -func AddSecret(dest string, opts ...SecretOption) RunOption { - return runOptionFunc(func(ei *ExecInfo) { - s := &SecretInfo{ID: dest, Target: dest, Mode: 0400} - for _, opt := range opts { - opt.SetSecretOption(s) - } - ei.Secrets = append(ei.Secrets, *s) - }) -} - -type SecretOption interface { - SetSecretOption(*SecretInfo) -} - -type secretOptionFunc func(*SecretInfo) - -func (fn secretOptionFunc) SetSecretOption(si *SecretInfo) { - fn(si) -} - -type SecretInfo struct { - ID string - Target string - Mode int - UID int - GID int - Optional bool -} - -var SecretOptional = secretOptionFunc(func(si *SecretInfo) { - si.Optional = true -}) - -func SecretID(id string) SecretOption { - return secretOptionFunc(func(si *SecretInfo) { - si.ID = id - }) -} - -func SecretFileOpt(uid, gid, mode int) SecretOption { - return secretOptionFunc(func(si *SecretInfo) { - si.UID = uid - si.GID = gid - si.Mode = mode - }) -} - -func ReadonlyRootFS() RunOption { - return runOptionFunc(func(ei *ExecInfo) { - ei.ReadonlyRootFS = true - }) -} - -func WithProxy(ps ProxyEnv) RunOption { - return runOptionFunc(func(ei *ExecInfo) { - ei.ProxyEnv = &ps - }) -} - -type ExecInfo struct { - constraintsWrapper - State State - Mounts []MountInfo - ReadonlyRootFS bool - ProxyEnv *ProxyEnv - Secrets []SecretInfo - SSH []SSHInfo -} - -type MountInfo struct { - Target string - Source Output - Opts []MountOption -} - -type ProxyEnv struct { - HttpProxy string - HttpsProxy string - FtpProxy string - NoProxy string -} - -type CacheMountSharingMode int - -const ( - CacheMountShared CacheMountSharingMode = iota - CacheMountPrivate - CacheMountLocked -) - -const ( - NetModeSandbox = pb.NetMode_UNSET - NetModeHost = pb.NetMode_HOST - NetModeNone = pb.NetMode_NONE -) diff --git a/vendor/github.com/moby/buildkit/client/llb/exec_test.go b/vendor/github.com/moby/buildkit/client/llb/exec_test.go deleted file mode 100644 index 280391f670e7..000000000000 --- a/vendor/github.com/moby/buildkit/client/llb/exec_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package llb - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestTmpfsMountError(t *testing.T) { - t.Parallel() - - st := Image("foo").Run(Shlex("args")).AddMount("/tmp", Scratch(), Tmpfs()) - _, err := st.Marshal() - - require.Error(t, err) - require.Contains(t, err.Error(), "can't be used as a parent") - - st = Image("foo").Run(Shlex("args"), AddMount("/tmp", Scratch(), Tmpfs())).Root() - _, err = st.Marshal() - require.NoError(t, err) - - st = Image("foo").Run(Shlex("args"), AddMount("/tmp", Image("bar"), Tmpfs())).Root() - _, err = st.Marshal() - require.Error(t, err) - require.Contains(t, err.Error(), "must use scratch") -} diff --git a/vendor/github.com/moby/buildkit/client/llb/imagemetaresolver/resolver.go b/vendor/github.com/moby/buildkit/client/llb/imagemetaresolver/resolver.go deleted file mode 100644 index e13a5780fb4e..000000000000 --- a/vendor/github.com/moby/buildkit/client/llb/imagemetaresolver/resolver.go +++ /dev/null @@ -1,110 +0,0 @@ -package imagemetaresolver - -import ( - "context" - "net/http" - "sync" - - "github.com/containerd/containerd/platforms" - "github.com/containerd/containerd/remotes" - "github.com/containerd/containerd/remotes/docker" - "github.com/docker/docker/pkg/locker" - "github.com/moby/buildkit/client/llb" - gw "github.com/moby/buildkit/frontend/gateway/client" - "github.com/moby/buildkit/util/contentutil" - "github.com/moby/buildkit/util/imageutil" - digest "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - -var defaultImageMetaResolver llb.ImageMetaResolver -var defaultImageMetaResolverOnce sync.Once - -var WithDefault = imageOptionFunc(func(ii *llb.ImageInfo) { - llb.WithMetaResolver(Default()).SetImageOption(ii) -}) - -type imageMetaResolverOpts struct { - platform *specs.Platform -} - -type ImageMetaResolverOpt func(o *imageMetaResolverOpts) - -func WithDefaultPlatform(p *specs.Platform) ImageMetaResolverOpt { - return func(o *imageMetaResolverOpts) { - o.platform = p - } -} - -func New(with ...ImageMetaResolverOpt) llb.ImageMetaResolver { - var opts imageMetaResolverOpts - for _, f := range with { - f(&opts) - } - return &imageMetaResolver{ - resolver: docker.NewResolver(docker.ResolverOptions{ - Client: http.DefaultClient, - }), - platform: opts.platform, - buffer: contentutil.NewBuffer(), - cache: map[string]resolveResult{}, - locker: locker.New(), - } -} - -func Default() llb.ImageMetaResolver { - defaultImageMetaResolverOnce.Do(func() { - defaultImageMetaResolver = New() - }) - return defaultImageMetaResolver -} - -type imageMetaResolver struct { - resolver remotes.Resolver - buffer contentutil.Buffer - platform *specs.Platform - locker *locker.Locker - cache map[string]resolveResult -} - -type resolveResult struct { - config []byte - dgst digest.Digest -} - -func (imr *imageMetaResolver) ResolveImageConfig(ctx context.Context, ref string, opt gw.ResolveImageConfigOpt) (digest.Digest, []byte, error) { - imr.locker.Lock(ref) - defer imr.locker.Unlock(ref) - - platform := opt.Platform - if platform == nil { - platform = imr.platform - } - - k := imr.key(ref, platform) - - if res, ok := imr.cache[k]; ok { - return res.dgst, res.config, nil - } - - dgst, config, err := imageutil.Config(ctx, ref, imr.resolver, imr.buffer, platform) - if err != nil { - return "", nil, err - } - - imr.cache[k] = resolveResult{dgst: dgst, config: config} - return dgst, config, nil -} - -func (imr *imageMetaResolver) key(ref string, platform *specs.Platform) string { - if platform != nil { - ref += platforms.Format(*platform) - } - return ref -} - -type imageOptionFunc func(*llb.ImageInfo) - -func (fn imageOptionFunc) SetImageOption(ii *llb.ImageInfo) { - fn(ii) -} diff --git a/vendor/github.com/moby/buildkit/client/llb/llbbuild/llbbuild.go b/vendor/github.com/moby/buildkit/client/llb/llbbuild/llbbuild.go deleted file mode 100644 index 58ab685218b7..000000000000 --- a/vendor/github.com/moby/buildkit/client/llb/llbbuild/llbbuild.go +++ /dev/null @@ -1,115 +0,0 @@ -package llbbuild - -import ( - "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/solver/pb" - "github.com/moby/buildkit/util/apicaps" - digest "github.com/opencontainers/go-digest" -) - -func Build(opt ...BuildOption) llb.StateOption { - return func(s llb.State) llb.State { - return s.WithOutput(NewBuildOp(s.Output(), opt...).Output()) - } -} - -func NewBuildOp(source llb.Output, opt ...BuildOption) llb.Vertex { - info := &BuildInfo{} - for _, o := range opt { - o(info) - } - return &build{source: source, info: info, constraints: info.Constraints} -} - -type build struct { - llb.MarshalCache - source llb.Output - info *BuildInfo - cachedPBDigest digest.Digest - cachedPB []byte - constraints llb.Constraints -} - -func (b *build) ToInput(c *llb.Constraints) (*pb.Input, error) { - dgst, _, _, err := b.Marshal(c) - if err != nil { - return nil, err - } - return &pb.Input{Digest: dgst, Index: pb.OutputIndex(0)}, nil -} - -func (b *build) Vertex() llb.Vertex { - return b -} - -func (b *build) Validate() error { - return nil -} - -func (b *build) Marshal(c *llb.Constraints) (digest.Digest, []byte, *pb.OpMetadata, error) { - if b.Cached(c) { - return b.Load() - } - pbo := &pb.BuildOp{ - Builder: pb.LLBBuilder, - Inputs: map[string]*pb.BuildInput{ - pb.LLBDefinitionInput: {Input: pb.InputIndex(0)}}, - } - - pbo.Attrs = map[string]string{} - - if b.info.DefinitionFilename != "" { - pbo.Attrs[pb.AttrLLBDefinitionFilename] = b.info.DefinitionFilename - } - - if b.constraints.Metadata.Caps == nil { - b.constraints.Metadata.Caps = make(map[apicaps.CapID]bool) - } - b.constraints.Metadata.Caps[pb.CapBuildOpLLBFileName] = true - - pop, md := llb.MarshalConstraints(c, &b.constraints) - pop.Op = &pb.Op_Build{ - Build: pbo, - } - - inp, err := b.source.ToInput(c) - if err != nil { - return "", nil, nil, err - } - - pop.Inputs = append(pop.Inputs, inp) - - dt, err := pop.Marshal() - if err != nil { - return "", nil, nil, err - } - b.Store(dt, md, c) - return b.Load() -} - -func (b *build) Output() llb.Output { - return b -} - -func (b *build) Inputs() []llb.Output { - return []llb.Output{b.source} -} - -type BuildInfo struct { - llb.Constraints - DefinitionFilename string -} - -type BuildOption func(*BuildInfo) - -func WithFilename(fn string) BuildOption { - return func(b *BuildInfo) { - b.DefinitionFilename = fn - } -} - -func WithConstraints(co llb.ConstraintsOpt) BuildOption { - return func(b *BuildInfo) { - co.SetConstraintsOption(&b.Constraints) - } -} diff --git a/vendor/github.com/moby/buildkit/client/llb/llbbuild/llbbuild_test.go b/vendor/github.com/moby/buildkit/client/llb/llbbuild/llbbuild_test.go deleted file mode 100644 index 9e0c0493ebf5..000000000000 --- a/vendor/github.com/moby/buildkit/client/llb/llbbuild/llbbuild_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package llbbuild - -import ( - "testing" - - "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/solver/pb" - digest "github.com/opencontainers/go-digest" - "github.com/stretchr/testify/require" -) - -func TestMarshal(t *testing.T) { - t.Parallel() - b := NewBuildOp(newDummyOutput("foobar"), WithFilename("myfilename")) - dgst, dt, opMeta, err := b.Marshal(&llb.Constraints{}) - _ = opMeta - require.NoError(t, err) - - require.Equal(t, dgst, digest.FromBytes(dt)) - - var op pb.Op - err = op.Unmarshal(dt) - require.NoError(t, err) - - buildop := op.GetBuild() - require.NotEqual(t, buildop, nil) - - require.Equal(t, len(op.Inputs), 1) - require.Equal(t, buildop.Builder, pb.LLBBuilder) - require.Equal(t, len(buildop.Inputs), 1) - require.Equal(t, buildop.Inputs[pb.LLBDefinitionInput], &pb.BuildInput{Input: pb.InputIndex(0)}) - - require.Equal(t, buildop.Attrs[pb.AttrLLBDefinitionFilename], "myfilename") -} - -func newDummyOutput(key string) llb.Output { - dgst := digest.FromBytes([]byte(key)) - return &dummyOutput{dgst: dgst} -} - -type dummyOutput struct { - dgst digest.Digest -} - -func (d *dummyOutput) ToInput(*llb.Constraints) (*pb.Input, error) { - return &pb.Input{ - Digest: d.dgst, - Index: pb.OutputIndex(7), // random constant - }, nil -} -func (d *dummyOutput) Vertex() llb.Vertex { - return nil -} diff --git a/vendor/github.com/moby/buildkit/client/llb/llbtest/platform_test.go b/vendor/github.com/moby/buildkit/client/llb/llbtest/platform_test.go deleted file mode 100644 index ae8399c62959..000000000000 --- a/vendor/github.com/moby/buildkit/client/llb/llbtest/platform_test.go +++ /dev/null @@ -1,233 +0,0 @@ -package llbtest - -import ( - "strings" - "testing" - - "github.com/containerd/containerd/platforms" - "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/solver" - "github.com/moby/buildkit/solver/llbsolver" - "github.com/moby/buildkit/solver/pb" - specs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/stretchr/testify/require" -) - -func TestCustomPlatform(t *testing.T) { - t.Parallel() - - s := llb.Image("foo", llb.LinuxArmhf). - Run(llb.Shlex("baz")). - Run(llb.Shlex("bar")). - Run(llb.Shlex("bax"), llb.Windows). - Run(llb.Shlex("bay")) - - def, err := s.Marshal() - require.NoError(t, err) - - e, err := llbsolver.Load(def.ToPB()) - require.NoError(t, err) - - require.Equal(t, depth(e), 5) - - expected := specs.Platform{OS: "windows", Architecture: "amd64"} - require.Equal(t, expected, platform(e)) - e = parent(e, 0) - require.Equal(t, expected, platform(e)) - e = parent(e, 0) - - expected = specs.Platform{OS: "linux", Architecture: "arm", Variant: "v7"} - require.Equal(t, expected, platform(e)) - e = parent(e, 0) - require.Equal(t, expected, platform(e)) - require.Equal(t, []string{"baz"}, args(e)) - e = parent(e, 0) - require.Equal(t, expected, platform(e)) - require.Equal(t, "docker-image://docker.io/library/foo:latest", id(e)) -} - -func TestDefaultPlatform(t *testing.T) { - t.Parallel() - - s := llb.Image("foo").Run(llb.Shlex("bar")) - - def, err := s.Marshal() - require.NoError(t, err) - - e, err := llbsolver.Load(def.ToPB()) - require.NoError(t, err) - - require.Equal(t, depth(e), 2) - - expected := platforms.DefaultSpec() - require.Equal(t, expected, platform(e)) - require.Equal(t, []string{"bar"}, args(e)) - e = parent(e, 0) - require.Equal(t, expected, platform(e)) - require.Equal(t, "docker-image://docker.io/library/foo:latest", id(e)) -} - -func TestPlatformOnMarshal(t *testing.T) { - t.Parallel() - - s := llb.Image("image1").Run(llb.Shlex("bar")) - - def, err := s.Marshal(llb.Windows) - require.NoError(t, err) - - e, err := llbsolver.Load(def.ToPB()) - require.NoError(t, err) - - expected := specs.Platform{OS: "windows", Architecture: "amd64"} - require.Equal(t, expected, platform(e)) - e = parent(e, 0) - require.Equal(t, expected, platform(e)) - require.Equal(t, "docker-image://docker.io/library/image1:latest", id(e)) -} - -func TestPlatformMixed(t *testing.T) { - t.Parallel() - - s1 := llb.Image("image1").Run(llb.Shlex("cmd-main")) - s2 := llb.Image("image2", llb.LinuxArmel).Run(llb.Shlex("cmd-sub")) - s1.AddMount("/mnt", s2.Root()) - - def, err := s1.Marshal(llb.LinuxAmd64) - require.NoError(t, err) - - e, err := llbsolver.Load(def.ToPB()) - require.NoError(t, err) - - require.Equal(t, depth(e), 4) - - expectedAmd := specs.Platform{OS: "linux", Architecture: "amd64"} - require.Equal(t, []string{"cmd-main"}, args(e)) - require.Equal(t, expectedAmd, platform(e)) - - e1 := mount(e, "/") - require.Equal(t, "docker-image://docker.io/library/image1:latest", id(e1)) - require.Equal(t, expectedAmd, platform(e1)) - - expectedArm := specs.Platform{OS: "linux", Architecture: "arm", Variant: "v6"} - e2 := mount(e, "/mnt") - require.Equal(t, []string{"cmd-sub"}, args(e2)) - require.Equal(t, expectedArm, platform(e2)) - e2 = parent(e2, 0) - require.Equal(t, "docker-image://docker.io/library/image2:latest", id(e2)) - require.Equal(t, expectedArm, platform(e2)) -} - -func TestFallbackPath(t *testing.T) { - t.Parallel() - - // With no caps we expect no PATH but also no requirement for - // the cap. - def, err := llb.Scratch().Run(llb.Shlex("cmd")).Marshal(llb.LinuxAmd64) - require.NoError(t, err) - e, err := llbsolver.Load(def.ToPB()) - require.NoError(t, err) - require.False(t, def.Metadata[e.Vertex.Digest()].Caps[pb.CapExecMetaSetsDefaultPath]) - _, ok := getenv(e, "PATH") - require.False(t, ok) - - // For an empty capset we expect a default non-empty PATH, and - // no requirement for the cap. - cs := pb.Caps.CapSet(nil) - require.Error(t, cs.Supports(pb.CapExecMetaSetsDefaultPath)) - def, err = llb.Scratch().Run(llb.Shlex("cmd")).Marshal(llb.LinuxAmd64, llb.WithCaps(cs)) - require.NoError(t, err) - e, err = llbsolver.Load(def.ToPB()) - require.NoError(t, err) - require.False(t, def.Metadata[e.Vertex.Digest()].Caps[pb.CapExecMetaSetsDefaultPath]) - v, ok := getenv(e, "PATH") - require.True(t, ok) - require.NotEqual(t, "", v) - - // All capabilities, including pb.CapExecMetaSetsDefaultPath, - // so should get no PATH (not present at all, rather than - // present and empty), but also require the cap. - cs = pb.Caps.CapSet(pb.Caps.All()) - require.NoError(t, cs.Supports(pb.CapExecMetaSetsDefaultPath)) - def, err = llb.Scratch().Run(llb.Shlex("cmd")).Marshal(llb.LinuxAmd64, llb.WithCaps(cs)) - require.NoError(t, err) - e, err = llbsolver.Load(def.ToPB()) - require.NoError(t, err) - require.True(t, def.Metadata[e.Vertex.Digest()].Caps[pb.CapExecMetaSetsDefaultPath]) - _, ok = getenv(e, "PATH") - require.False(t, ok) - - // If we provide a path it should not be touched, no matter - // what caps we pass in. Whether the cap becomes required is - // irrelevant. - for _, cos := range [][]llb.ConstraintsOpt{ - nil, - {llb.WithCaps(pb.Caps.CapSet(nil))}, - {llb.WithCaps(pb.Caps.CapSet(pb.Caps.All()))}, - } { - def, err = llb.Scratch().AddEnv("PATH", "foo").Run(llb.Shlex("cmd")).Marshal(append(cos, llb.LinuxAmd64)...) - require.NoError(t, err) - e, err = llbsolver.Load(def.ToPB()) - require.NoError(t, err) - // pb.CapExecMetaSetsDefaultPath setting is irrelevant (and variable). - v, ok = getenv(e, "PATH") - require.True(t, ok) - require.Equal(t, "foo", v) - } -} - -func toOp(e solver.Edge) *pb.Op { - return e.Vertex.Sys().(*pb.Op) -} - -func platform(e solver.Edge) specs.Platform { - op := toOp(e) - p := *op.Platform - return specs.Platform{ - OS: p.OS, - Architecture: p.Architecture, - Variant: p.Variant, - OSVersion: p.OSVersion, - OSFeatures: p.OSFeatures, - } -} - -func depth(e solver.Edge) int { - i := 1 - for _, inp := range e.Vertex.Inputs() { - i += depth(inp) - } - return i -} - -func parent(e solver.Edge, i int) solver.Edge { - return e.Vertex.Inputs()[i] -} - -func id(e solver.Edge) string { - return toOp(e).GetSource().Identifier -} - -func args(e solver.Edge) []string { - return toOp(e).GetExec().Meta.Args -} - -func mount(e solver.Edge, target string) solver.Edge { - op := toOp(e).GetExec() - for _, m := range op.Mounts { - if m.Dest == target { - return e.Vertex.Inputs()[int(m.Input)] - } - } - panic("could not find mount " + target) -} - -func getenv(e solver.Edge, k string) (string, bool) { - env := toOp(e).GetExec().Meta.Env - k = k + "=" - for _, e := range env { - if strings.HasPrefix(e, k) { - return strings.TrimPrefix(e, k), true - } - } - return "", false -} diff --git a/vendor/github.com/moby/buildkit/client/llb/marshal.go b/vendor/github.com/moby/buildkit/client/llb/marshal.go deleted file mode 100644 index 65a352fae89c..000000000000 --- a/vendor/github.com/moby/buildkit/client/llb/marshal.go +++ /dev/null @@ -1,112 +0,0 @@ -package llb - -import ( - "io" - "io/ioutil" - - "github.com/containerd/containerd/platforms" - "github.com/moby/buildkit/solver/pb" - digest "github.com/opencontainers/go-digest" -) - -// Definition is the LLB definition structure with per-vertex metadata entries -// Corresponds to the Definition structure defined in solver/pb.Definition. -type Definition struct { - Def [][]byte - Metadata map[digest.Digest]pb.OpMetadata -} - -func (def *Definition) ToPB() *pb.Definition { - md := make(map[digest.Digest]pb.OpMetadata) - for k, v := range def.Metadata { - md[k] = v - } - return &pb.Definition{ - Def: def.Def, - Metadata: md, - } -} - -func (def *Definition) FromPB(x *pb.Definition) { - def.Def = x.Def - def.Metadata = make(map[digest.Digest]pb.OpMetadata) - for k, v := range x.Metadata { - def.Metadata[k] = v - } -} - -func WriteTo(def *Definition, w io.Writer) error { - b, err := def.ToPB().Marshal() - if err != nil { - return err - } - _, err = w.Write(b) - return err -} - -func ReadFrom(r io.Reader) (*Definition, error) { - b, err := ioutil.ReadAll(r) - if err != nil { - return nil, err - } - var pbDef pb.Definition - if err := pbDef.Unmarshal(b); err != nil { - return nil, err - } - var def Definition - def.FromPB(&pbDef) - return &def, nil -} - -func MarshalConstraints(base, override *Constraints) (*pb.Op, *pb.OpMetadata) { - c := *base - c.WorkerConstraints = append([]string{}, c.WorkerConstraints...) - - if p := override.Platform; p != nil { - c.Platform = p - } - - for _, wc := range override.WorkerConstraints { - c.WorkerConstraints = append(c.WorkerConstraints, wc) - } - - c.Metadata = mergeMetadata(c.Metadata, override.Metadata) - - if c.Platform == nil { - defaultPlatform := platforms.Normalize(platforms.DefaultSpec()) - c.Platform = &defaultPlatform - } - - return &pb.Op{ - Platform: &pb.Platform{ - OS: c.Platform.OS, - Architecture: c.Platform.Architecture, - Variant: c.Platform.Variant, - OSVersion: c.Platform.OSVersion, - OSFeatures: c.Platform.OSFeatures, - }, - Constraints: &pb.WorkerConstraints{ - Filter: c.WorkerConstraints, - }, - }, &c.Metadata -} - -type MarshalCache struct { - digest digest.Digest - dt []byte - md *pb.OpMetadata - constraints *Constraints -} - -func (mc *MarshalCache) Cached(c *Constraints) bool { - return mc.dt != nil && mc.constraints == c -} -func (mc *MarshalCache) Load() (digest.Digest, []byte, *pb.OpMetadata, error) { - return mc.digest, mc.dt, mc.md, nil -} -func (mc *MarshalCache) Store(dt []byte, md *pb.OpMetadata, c *Constraints) { - mc.digest = digest.FromBytes(dt) - mc.dt = dt - mc.md = md - mc.constraints = c -} diff --git a/vendor/github.com/moby/buildkit/client/llb/meta.go b/vendor/github.com/moby/buildkit/client/llb/meta.go deleted file mode 100644 index 702ea145651f..000000000000 --- a/vendor/github.com/moby/buildkit/client/llb/meta.go +++ /dev/null @@ -1,211 +0,0 @@ -package llb - -import ( - "fmt" - "net" - "path" - - "github.com/containerd/containerd/platforms" - "github.com/google/shlex" - "github.com/moby/buildkit/solver/pb" - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - -type contextKeyT string - -var ( - keyArgs = contextKeyT("llb.exec.args") - keyDir = contextKeyT("llb.exec.dir") - keyEnv = contextKeyT("llb.exec.env") - keyUser = contextKeyT("llb.exec.user") - keyExtraHost = contextKeyT("llb.exec.extrahost") - keyPlatform = contextKeyT("llb.platform") - keyNetwork = contextKeyT("llb.network") -) - -func addEnvf(key, value string, v ...interface{}) StateOption { - return func(s State) State { - return s.WithValue(keyEnv, getEnv(s).AddOrReplace(key, fmt.Sprintf(value, v...))) - } -} - -func dir(str string) StateOption { - return dirf(str) -} - -func dirf(str string, v ...interface{}) StateOption { - return func(s State) State { - value := fmt.Sprintf(str, v...) - if !path.IsAbs(value) { - prev := getDir(s) - if prev == "" { - prev = "/" - } - value = path.Join(prev, value) - } - return s.WithValue(keyDir, value) - } -} - -func user(str string) StateOption { - return func(s State) State { - return s.WithValue(keyUser, str) - } -} - -func reset(s_ State) StateOption { - return func(s State) State { - s = NewState(s.Output()) - s.ctx = s_.ctx - return s - } -} - -func getEnv(s State) EnvList { - v := s.Value(keyEnv) - if v != nil { - return v.(EnvList) - } - return EnvList{} -} - -func getDir(s State) string { - v := s.Value(keyDir) - if v != nil { - return v.(string) - } - return "" -} - -func getArgs(s State) []string { - v := s.Value(keyArgs) - if v != nil { - return v.([]string) - } - return nil -} - -func getUser(s State) string { - v := s.Value(keyUser) - if v != nil { - return v.(string) - } - return "" -} - -func args(args ...string) StateOption { - return func(s State) State { - return s.WithValue(keyArgs, args) - } -} - -func shlexf(str string, v ...interface{}) StateOption { - return func(s State) State { - arg, err := shlex.Split(fmt.Sprintf(str, v...)) - if err != nil { - // TODO: handle error - } - return args(arg...)(s) - } -} - -func platform(p specs.Platform) StateOption { - return func(s State) State { - return s.WithValue(keyPlatform, platforms.Normalize(p)) - } -} - -func getPlatform(s State) *specs.Platform { - v := s.Value(keyPlatform) - if v != nil { - p := v.(specs.Platform) - return &p - } - return nil -} - -func extraHost(host string, ip net.IP) StateOption { - return func(s State) State { - return s.WithValue(keyExtraHost, append(getExtraHosts(s), HostIP{Host: host, IP: ip})) - } -} - -func getExtraHosts(s State) []HostIP { - v := s.Value(keyExtraHost) - if v != nil { - return v.([]HostIP) - } - return nil -} - -type HostIP struct { - Host string - IP net.IP -} - -func network(v pb.NetMode) StateOption { - return func(s State) State { - return s.WithValue(keyNetwork, v) - } -} - -func getNetwork(s State) pb.NetMode { - v := s.Value(keyNetwork) - if v != nil { - n := v.(pb.NetMode) - return n - } - return NetModeSandbox -} - -type EnvList []KeyValue - -type KeyValue struct { - key string - value string -} - -func (e EnvList) AddOrReplace(k, v string) EnvList { - e = e.Delete(k) - e = append(e, KeyValue{key: k, value: v}) - return e -} - -func (e EnvList) SetDefault(k, v string) EnvList { - if _, ok := e.Get(k); !ok { - e = append(e, KeyValue{key: k, value: v}) - } - return e -} - -func (e EnvList) Delete(k string) EnvList { - e = append([]KeyValue(nil), e...) - if i, ok := e.Index(k); ok { - return append(e[:i], e[i+1:]...) - } - return e -} - -func (e EnvList) Get(k string) (string, bool) { - if index, ok := e.Index(k); ok { - return e[index].value, true - } - return "", false -} - -func (e EnvList) Index(k string) (int, bool) { - for i, kv := range e { - if kv.key == k { - return i, true - } - } - return -1, false -} - -func (e EnvList) ToArray() []string { - out := make([]string, 0, len(e)) - for _, kv := range e { - out = append(out, kv.key+"="+kv.value) - } - return out -} diff --git a/vendor/github.com/moby/buildkit/client/llb/meta_test.go b/vendor/github.com/moby/buildkit/client/llb/meta_test.go deleted file mode 100644 index 5118a9cc7744..000000000000 --- a/vendor/github.com/moby/buildkit/client/llb/meta_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package llb - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestRelativeWd(t *testing.T) { - st := Scratch().Dir("foo") - require.Equal(t, st.GetDir(), "/foo") - - st = st.Dir("bar") - require.Equal(t, st.GetDir(), "/foo/bar") - - st = st.Dir("..") - require.Equal(t, st.GetDir(), "/foo") - - st = st.Dir("/baz") - require.Equal(t, st.GetDir(), "/baz") - - st = st.Dir("../../..") - require.Equal(t, st.GetDir(), "/") -} diff --git a/vendor/github.com/moby/buildkit/client/llb/resolver.go b/vendor/github.com/moby/buildkit/client/llb/resolver.go deleted file mode 100644 index d670a991888f..000000000000 --- a/vendor/github.com/moby/buildkit/client/llb/resolver.go +++ /dev/null @@ -1,20 +0,0 @@ -package llb - -import ( - "context" - - gw "github.com/moby/buildkit/frontend/gateway/client" - digest "github.com/opencontainers/go-digest" -) - -// WithMetaResolver adds a metadata resolver to an image -func WithMetaResolver(mr ImageMetaResolver) ImageOption { - return imageOptionFunc(func(ii *ImageInfo) { - ii.metaResolver = mr - }) -} - -// ImageMetaResolver can resolve image config metadata from a reference -type ImageMetaResolver interface { - ResolveImageConfig(ctx context.Context, ref string, opt gw.ResolveImageConfigOpt) (digest.Digest, []byte, error) -} diff --git a/vendor/github.com/moby/buildkit/client/llb/source.go b/vendor/github.com/moby/buildkit/client/llb/source.go deleted file mode 100644 index 52d40be8b747..000000000000 --- a/vendor/github.com/moby/buildkit/client/llb/source.go +++ /dev/null @@ -1,429 +0,0 @@ -package llb - -import ( - "context" - _ "crypto/sha256" - "encoding/json" - "os" - "strconv" - "strings" - - "github.com/docker/distribution/reference" - gw "github.com/moby/buildkit/frontend/gateway/client" - "github.com/moby/buildkit/solver/pb" - "github.com/moby/buildkit/util/apicaps" - digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -type SourceOp struct { - MarshalCache - id string - attrs map[string]string - output Output - constraints Constraints - err error -} - -func NewSource(id string, attrs map[string]string, c Constraints) *SourceOp { - s := &SourceOp{ - id: id, - attrs: attrs, - constraints: c, - } - s.output = &output{vertex: s, platform: c.Platform} - return s -} - -func (s *SourceOp) Validate() error { - if s.err != nil { - return s.err - } - if s.id == "" { - return errors.Errorf("source identifier can't be empty") - } - return nil -} - -func (s *SourceOp) Marshal(constraints *Constraints) (digest.Digest, []byte, *pb.OpMetadata, error) { - if s.Cached(constraints) { - return s.Load() - } - if err := s.Validate(); err != nil { - return "", nil, nil, err - } - - if strings.HasPrefix(s.id, "local://") { - if _, hasSession := s.attrs[pb.AttrLocalSessionID]; !hasSession { - uid := s.constraints.LocalUniqueID - if uid == "" { - uid = constraints.LocalUniqueID - } - s.attrs[pb.AttrLocalUniqueID] = uid - addCap(&s.constraints, pb.CapSourceLocalUnique) - } - } - proto, md := MarshalConstraints(constraints, &s.constraints) - - proto.Op = &pb.Op_Source{ - Source: &pb.SourceOp{Identifier: s.id, Attrs: s.attrs}, - } - - if !platformSpecificSource(s.id) { - proto.Platform = nil - } - - dt, err := proto.Marshal() - if err != nil { - return "", nil, nil, err - } - - s.Store(dt, md, constraints) - return s.Load() -} - -func (s *SourceOp) Output() Output { - return s.output -} - -func (s *SourceOp) Inputs() []Output { - return nil -} - -func Image(ref string, opts ...ImageOption) State { - r, err := reference.ParseNormalizedNamed(ref) - if err == nil { - ref = reference.TagNameOnly(r).String() - } - var info ImageInfo - for _, opt := range opts { - opt.SetImageOption(&info) - } - - addCap(&info.Constraints, pb.CapSourceImage) - - attrs := map[string]string{} - if info.resolveMode != 0 { - attrs[pb.AttrImageResolveMode] = info.resolveMode.String() - if info.resolveMode == ResolveModeForcePull { - addCap(&info.Constraints, pb.CapSourceImageResolveMode) // only require cap for security enforced mode - } - } - - if info.RecordType != "" { - attrs[pb.AttrImageRecordType] = info.RecordType - } - - src := NewSource("docker-image://"+ref, attrs, info.Constraints) // controversial - if err != nil { - src.err = err - } - if info.metaResolver != nil { - _, dt, err := info.metaResolver.ResolveImageConfig(context.TODO(), ref, gw.ResolveImageConfigOpt{ - Platform: info.Constraints.Platform, - ResolveMode: info.resolveMode.String(), - }) - if err != nil { - src.err = err - } else { - st, err := NewState(src.Output()).WithImageConfig(dt) - if err == nil { - return st - } - src.err = err - } - } - return NewState(src.Output()) -} - -type ImageOption interface { - SetImageOption(*ImageInfo) -} - -type imageOptionFunc func(*ImageInfo) - -func (fn imageOptionFunc) SetImageOption(ii *ImageInfo) { - fn(ii) -} - -var MarkImageInternal = imageOptionFunc(func(ii *ImageInfo) { - ii.RecordType = "internal" -}) - -type ResolveMode int - -const ( - ResolveModeDefault ResolveMode = iota - ResolveModeForcePull - ResolveModePreferLocal -) - -func (r ResolveMode) SetImageOption(ii *ImageInfo) { - ii.resolveMode = r -} - -func (r ResolveMode) String() string { - switch r { - case ResolveModeDefault: - return pb.AttrImageResolveModeDefault - case ResolveModeForcePull: - return pb.AttrImageResolveModeForcePull - case ResolveModePreferLocal: - return pb.AttrImageResolveModePreferLocal - default: - return "" - } -} - -type ImageInfo struct { - constraintsWrapper - metaResolver ImageMetaResolver - resolveMode ResolveMode - RecordType string -} - -func Git(remote, ref string, opts ...GitOption) State { - url := "" - - for _, prefix := range []string{ - "http://", "https://", "git://", "git@", - } { - if strings.HasPrefix(remote, prefix) { - url = strings.Split(remote, "#")[0] - remote = strings.TrimPrefix(remote, prefix) - } - } - - id := remote - - if ref != "" { - id += "#" + ref - } - - gi := &GitInfo{} - for _, o := range opts { - o.SetGitOption(gi) - } - attrs := map[string]string{} - if gi.KeepGitDir { - attrs[pb.AttrKeepGitDir] = "true" - addCap(&gi.Constraints, pb.CapSourceGitKeepDir) - } - if url != "" { - attrs[pb.AttrFullRemoteURL] = url - addCap(&gi.Constraints, pb.CapSourceGitFullURL) - } - - addCap(&gi.Constraints, pb.CapSourceGit) - - source := NewSource("git://"+id, attrs, gi.Constraints) - return NewState(source.Output()) -} - -type GitOption interface { - SetGitOption(*GitInfo) -} -type gitOptionFunc func(*GitInfo) - -func (fn gitOptionFunc) SetGitOption(gi *GitInfo) { - fn(gi) -} - -type GitInfo struct { - constraintsWrapper - KeepGitDir bool -} - -func KeepGitDir() GitOption { - return gitOptionFunc(func(gi *GitInfo) { - gi.KeepGitDir = true - }) -} - -func Scratch() State { - return NewState(nil) -} - -func Local(name string, opts ...LocalOption) State { - gi := &LocalInfo{} - - for _, o := range opts { - o.SetLocalOption(gi) - } - attrs := map[string]string{} - if gi.SessionID != "" { - attrs[pb.AttrLocalSessionID] = gi.SessionID - addCap(&gi.Constraints, pb.CapSourceLocalSessionID) - } - if gi.IncludePatterns != "" { - attrs[pb.AttrIncludePatterns] = gi.IncludePatterns - addCap(&gi.Constraints, pb.CapSourceLocalIncludePatterns) - } - if gi.FollowPaths != "" { - attrs[pb.AttrFollowPaths] = gi.FollowPaths - addCap(&gi.Constraints, pb.CapSourceLocalFollowPaths) - } - if gi.ExcludePatterns != "" { - attrs[pb.AttrExcludePatterns] = gi.ExcludePatterns - addCap(&gi.Constraints, pb.CapSourceLocalExcludePatterns) - } - if gi.SharedKeyHint != "" { - attrs[pb.AttrSharedKeyHint] = gi.SharedKeyHint - addCap(&gi.Constraints, pb.CapSourceLocalSharedKeyHint) - } - - addCap(&gi.Constraints, pb.CapSourceLocal) - - source := NewSource("local://"+name, attrs, gi.Constraints) - return NewState(source.Output()) -} - -type LocalOption interface { - SetLocalOption(*LocalInfo) -} - -type localOptionFunc func(*LocalInfo) - -func (fn localOptionFunc) SetLocalOption(li *LocalInfo) { - fn(li) -} - -func SessionID(id string) LocalOption { - return localOptionFunc(func(li *LocalInfo) { - li.SessionID = id - }) -} - -func IncludePatterns(p []string) LocalOption { - return localOptionFunc(func(li *LocalInfo) { - if len(p) == 0 { - li.IncludePatterns = "" - return - } - dt, _ := json.Marshal(p) // empty on error - li.IncludePatterns = string(dt) - }) -} - -func FollowPaths(p []string) LocalOption { - return localOptionFunc(func(li *LocalInfo) { - if len(p) == 0 { - li.FollowPaths = "" - return - } - dt, _ := json.Marshal(p) // empty on error - li.FollowPaths = string(dt) - }) -} - -func ExcludePatterns(p []string) LocalOption { - return localOptionFunc(func(li *LocalInfo) { - if len(p) == 0 { - li.ExcludePatterns = "" - return - } - dt, _ := json.Marshal(p) // empty on error - li.ExcludePatterns = string(dt) - }) -} - -func SharedKeyHint(h string) LocalOption { - return localOptionFunc(func(li *LocalInfo) { - li.SharedKeyHint = h - }) -} - -type LocalInfo struct { - constraintsWrapper - SessionID string - IncludePatterns string - ExcludePatterns string - FollowPaths string - SharedKeyHint string -} - -func HTTP(url string, opts ...HTTPOption) State { - hi := &HTTPInfo{} - for _, o := range opts { - o.SetHTTPOption(hi) - } - attrs := map[string]string{} - if hi.Checksum != "" { - attrs[pb.AttrHTTPChecksum] = hi.Checksum.String() - addCap(&hi.Constraints, pb.CapSourceHTTPChecksum) - } - if hi.Filename != "" { - attrs[pb.AttrHTTPFilename] = hi.Filename - } - if hi.Perm != 0 { - attrs[pb.AttrHTTPPerm] = "0" + strconv.FormatInt(int64(hi.Perm), 8) - addCap(&hi.Constraints, pb.CapSourceHTTPPerm) - } - if hi.UID != 0 { - attrs[pb.AttrHTTPUID] = strconv.Itoa(hi.UID) - addCap(&hi.Constraints, pb.CapSourceHTTPUIDGID) - } - if hi.GID != 0 { - attrs[pb.AttrHTTPGID] = strconv.Itoa(hi.GID) - addCap(&hi.Constraints, pb.CapSourceHTTPUIDGID) - } - - addCap(&hi.Constraints, pb.CapSourceHTTP) - source := NewSource(url, attrs, hi.Constraints) - return NewState(source.Output()) -} - -type HTTPInfo struct { - constraintsWrapper - Checksum digest.Digest - Filename string - Perm int - UID int - GID int -} - -type HTTPOption interface { - SetHTTPOption(*HTTPInfo) -} - -type httpOptionFunc func(*HTTPInfo) - -func (fn httpOptionFunc) SetHTTPOption(hi *HTTPInfo) { - fn(hi) -} - -func Checksum(dgst digest.Digest) HTTPOption { - return httpOptionFunc(func(hi *HTTPInfo) { - hi.Checksum = dgst - }) -} - -func Chmod(perm os.FileMode) HTTPOption { - return httpOptionFunc(func(hi *HTTPInfo) { - hi.Perm = int(perm) & 0777 - }) -} - -func Filename(name string) HTTPOption { - return httpOptionFunc(func(hi *HTTPInfo) { - hi.Filename = name - }) -} - -func Chown(uid, gid int) HTTPOption { - return httpOptionFunc(func(hi *HTTPInfo) { - hi.UID = uid - hi.GID = gid - }) -} - -func platformSpecificSource(id string) bool { - return strings.HasPrefix(id, "docker-image://") -} - -func addCap(c *Constraints, id apicaps.CapID) { - if c.Metadata.Caps == nil { - c.Metadata.Caps = make(map[apicaps.CapID]bool) - } - c.Metadata.Caps[id] = true -} diff --git a/vendor/github.com/moby/buildkit/client/llb/state.go b/vendor/github.com/moby/buildkit/client/llb/state.go deleted file mode 100644 index 24e3949bf8fe..000000000000 --- a/vendor/github.com/moby/buildkit/client/llb/state.go +++ /dev/null @@ -1,492 +0,0 @@ -package llb - -import ( - "context" - "encoding/json" - "fmt" - "net" - "strings" - - "github.com/containerd/containerd/platforms" - "github.com/moby/buildkit/identity" - "github.com/moby/buildkit/solver/pb" - "github.com/moby/buildkit/util/apicaps" - digest "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - -type StateOption func(State) State - -type Output interface { - ToInput(*Constraints) (*pb.Input, error) - Vertex() Vertex -} - -type Vertex interface { - Validate() error - Marshal(*Constraints) (digest.Digest, []byte, *pb.OpMetadata, error) - Output() Output - Inputs() []Output -} - -func NewState(o Output) State { - s := State{ - out: o, - ctx: context.Background(), - } - s = dir("/")(s) - s = s.ensurePlatform() - return s -} - -type State struct { - out Output - ctx context.Context - opts []ConstraintsOpt -} - -func (s State) ensurePlatform() State { - if o, ok := s.out.(interface { - Platform() *specs.Platform - }); ok { - if p := o.Platform(); p != nil { - s = platform(*p)(s) - } - } - return s -} - -func (s State) WithValue(k, v interface{}) State { - return State{ - out: s.out, - ctx: context.WithValue(s.ctx, k, v), - } -} - -func (s State) Value(k interface{}) interface{} { - return s.ctx.Value(k) -} - -func (s State) SetMarshalDefaults(co ...ConstraintsOpt) State { - s.opts = co - return s -} - -func (s State) Marshal(co ...ConstraintsOpt) (*Definition, error) { - def := &Definition{ - Metadata: make(map[digest.Digest]pb.OpMetadata, 0), - } - if s.Output() == nil { - return def, nil - } - - defaultPlatform := platforms.Normalize(platforms.DefaultSpec()) - c := &Constraints{ - Platform: &defaultPlatform, - LocalUniqueID: identity.NewID(), - } - for _, o := range append(s.opts, co...) { - o.SetConstraintsOption(c) - } - - def, err := marshal(s.Output().Vertex(), def, map[digest.Digest]struct{}{}, map[Vertex]struct{}{}, c) - if err != nil { - return def, err - } - inp, err := s.Output().ToInput(c) - if err != nil { - return def, err - } - proto := &pb.Op{Inputs: []*pb.Input{inp}} - dt, err := proto.Marshal() - if err != nil { - return def, err - } - def.Def = append(def.Def, dt) - - dgst := digest.FromBytes(dt) - md := def.Metadata[dgst] - md.Caps = map[apicaps.CapID]bool{ - pb.CapConstraints: true, - pb.CapPlatform: true, - } - - for _, m := range def.Metadata { - if m.IgnoreCache { - md.Caps[pb.CapMetaIgnoreCache] = true - } - if m.Description != nil { - md.Caps[pb.CapMetaDescription] = true - } - if m.ExportCache != nil { - md.Caps[pb.CapMetaExportCache] = true - } - } - - def.Metadata[dgst] = md - - return def, nil -} - -func marshal(v Vertex, def *Definition, cache map[digest.Digest]struct{}, vertexCache map[Vertex]struct{}, c *Constraints) (*Definition, error) { - if _, ok := vertexCache[v]; ok { - return def, nil - } - for _, inp := range v.Inputs() { - var err error - def, err = marshal(inp.Vertex(), def, cache, vertexCache, c) - if err != nil { - return def, err - } - } - - dgst, dt, opMeta, err := v.Marshal(c) - if err != nil { - return def, err - } - vertexCache[v] = struct{}{} - if opMeta != nil { - def.Metadata[dgst] = mergeMetadata(def.Metadata[dgst], *opMeta) - } - if _, ok := cache[dgst]; ok { - return def, nil - } - def.Def = append(def.Def, dt) - cache[dgst] = struct{}{} - return def, nil -} - -func (s State) Validate() error { - return s.Output().Vertex().Validate() -} - -func (s State) Output() Output { - return s.out -} - -func (s State) WithOutput(o Output) State { - s = State{ - out: o, - ctx: s.ctx, - } - s = s.ensurePlatform() - return s -} - -func (s State) WithImageConfig(c []byte) (State, error) { - var img struct { - Config struct { - Env []string `json:"Env,omitempty"` - WorkingDir string `json:"WorkingDir,omitempty"` - User string `json:"User,omitempty"` - } `json:"config,omitempty"` - } - if err := json.Unmarshal(c, &img); err != nil { - return State{}, err - } - for _, env := range img.Config.Env { - parts := strings.SplitN(env, "=", 2) - if len(parts[0]) > 0 { - var v string - if len(parts) > 1 { - v = parts[1] - } - s = s.AddEnv(parts[0], v) - } - } - s = s.Dir(img.Config.WorkingDir) - return s, nil -} - -func (s State) Run(ro ...RunOption) ExecState { - ei := &ExecInfo{State: s} - if p := s.GetPlatform(); p != nil { - ei.Constraints.Platform = p - } - for _, o := range ro { - o.SetRunOption(ei) - } - meta := Meta{ - Args: getArgs(ei.State), - Cwd: getDir(ei.State), - Env: getEnv(ei.State), - User: getUser(ei.State), - ProxyEnv: ei.ProxyEnv, - ExtraHosts: getExtraHosts(ei.State), - Network: getNetwork(ei.State), - } - - exec := NewExecOp(s.Output(), meta, ei.ReadonlyRootFS, ei.Constraints) - for _, m := range ei.Mounts { - exec.AddMount(m.Target, m.Source, m.Opts...) - } - exec.secrets = ei.Secrets - exec.ssh = ei.SSH - - return ExecState{ - State: s.WithOutput(exec.Output()), - exec: exec, - } -} - -func (s State) AddEnv(key, value string) State { - return s.AddEnvf(key, value) -} - -func (s State) AddEnvf(key, value string, v ...interface{}) State { - return addEnvf(key, value, v...)(s) -} - -func (s State) Dir(str string) State { - return s.Dirf(str) -} -func (s State) Dirf(str string, v ...interface{}) State { - return dirf(str, v...)(s) -} - -func (s State) GetEnv(key string) (string, bool) { - return getEnv(s).Get(key) -} - -func (s State) Env() []string { - return getEnv(s).ToArray() -} - -func (s State) GetDir() string { - return getDir(s) -} - -func (s State) GetArgs() []string { - return getArgs(s) -} - -func (s State) Reset(s2 State) State { - return reset(s2)(s) -} - -func (s State) User(v string) State { - return user(v)(s) -} - -func (s State) Platform(p specs.Platform) State { - return platform(p)(s) -} - -func (s State) GetPlatform() *specs.Platform { - return getPlatform(s) -} - -func (s State) Network(n pb.NetMode) State { - return network(n)(s) -} - -func (s State) GetNetwork() pb.NetMode { - return getNetwork(s) -} - -func (s State) With(so ...StateOption) State { - for _, o := range so { - s = o(s) - } - return s -} - -func (s State) AddExtraHost(host string, ip net.IP) State { - return extraHost(host, ip)(s) -} - -type output struct { - vertex Vertex - getIndex func() (pb.OutputIndex, error) - err error - platform *specs.Platform -} - -func (o *output) ToInput(c *Constraints) (*pb.Input, error) { - if o.err != nil { - return nil, o.err - } - var index pb.OutputIndex - if o.getIndex != nil { - var err error - index, err = o.getIndex() - if err != nil { - return nil, err - } - } - dgst, _, _, err := o.vertex.Marshal(c) - if err != nil { - return nil, err - } - return &pb.Input{Digest: dgst, Index: index}, nil -} - -func (o *output) Vertex() Vertex { - return o.vertex -} - -func (o *output) Platform() *specs.Platform { - return o.platform -} - -type ConstraintsOpt interface { - SetConstraintsOption(*Constraints) - RunOption - LocalOption - HTTPOption - ImageOption - GitOption -} - -type constraintsOptFunc func(m *Constraints) - -func (fn constraintsOptFunc) SetConstraintsOption(m *Constraints) { - fn(m) -} - -func (fn constraintsOptFunc) SetRunOption(ei *ExecInfo) { - ei.applyConstraints(fn) -} - -func (fn constraintsOptFunc) SetLocalOption(li *LocalInfo) { - li.applyConstraints(fn) -} - -func (fn constraintsOptFunc) SetHTTPOption(hi *HTTPInfo) { - hi.applyConstraints(fn) -} - -func (fn constraintsOptFunc) SetImageOption(ii *ImageInfo) { - ii.applyConstraints(fn) -} - -func (fn constraintsOptFunc) SetGitOption(gi *GitInfo) { - gi.applyConstraints(fn) -} - -func mergeMetadata(m1, m2 pb.OpMetadata) pb.OpMetadata { - if m2.IgnoreCache { - m1.IgnoreCache = true - } - if len(m2.Description) > 0 { - if m1.Description == nil { - m1.Description = make(map[string]string) - } - for k, v := range m2.Description { - m1.Description[k] = v - } - } - if m2.ExportCache != nil { - m1.ExportCache = m2.ExportCache - } - - for k := range m2.Caps { - if m1.Caps == nil { - m1.Caps = make(map[apicaps.CapID]bool, len(m2.Caps)) - } - m1.Caps[k] = true - } - - return m1 -} - -var IgnoreCache = constraintsOptFunc(func(c *Constraints) { - c.Metadata.IgnoreCache = true -}) - -func WithDescription(m map[string]string) ConstraintsOpt { - return constraintsOptFunc(func(c *Constraints) { - if c.Metadata.Description == nil { - c.Metadata.Description = map[string]string{} - } - for k, v := range m { - c.Metadata.Description[k] = v - } - }) -} - -func WithCustomName(name string, a ...interface{}) ConstraintsOpt { - return WithDescription(map[string]string{ - "llb.customname": fmt.Sprintf(name, a...), - }) -} - -// WithExportCache forces results for this vertex to be exported with the cache -func WithExportCache() ConstraintsOpt { - return constraintsOptFunc(func(c *Constraints) { - c.Metadata.ExportCache = &pb.ExportCache{Value: true} - }) -} - -// WithoutExportCache sets results for this vertex to be not exported with -// the cache -func WithoutExportCache() ConstraintsOpt { - return constraintsOptFunc(func(c *Constraints) { - // ExportCache with value false means to disable exporting - c.Metadata.ExportCache = &pb.ExportCache{Value: false} - }) -} - -// WithoutDefaultExportCache resets the cache export for the vertex to use -// the default defined by the build configuration. -func WithoutDefaultExportCache() ConstraintsOpt { - return constraintsOptFunc(func(c *Constraints) { - // nil means no vertex based config has been set - c.Metadata.ExportCache = nil - }) -} - -// WithCaps exposes supported LLB caps to the marshaler -func WithCaps(caps apicaps.CapSet) ConstraintsOpt { - return constraintsOptFunc(func(c *Constraints) { - c.Caps = &caps - }) -} - -type constraintsWrapper struct { - Constraints -} - -func (cw *constraintsWrapper) applyConstraints(f func(c *Constraints)) { - f(&cw.Constraints) -} - -type Constraints struct { - Platform *specs.Platform - WorkerConstraints []string - Metadata pb.OpMetadata - LocalUniqueID string - Caps *apicaps.CapSet -} - -func Platform(p specs.Platform) ConstraintsOpt { - return constraintsOptFunc(func(c *Constraints) { - c.Platform = &p - }) -} - -func LocalUniqueID(v string) ConstraintsOpt { - return constraintsOptFunc(func(c *Constraints) { - c.LocalUniqueID = v - }) -} - -var ( - LinuxAmd64 = Platform(specs.Platform{OS: "linux", Architecture: "amd64"}) - LinuxArmhf = Platform(specs.Platform{OS: "linux", Architecture: "arm", Variant: "v7"}) - LinuxArm = LinuxArmhf - LinuxArmel = Platform(specs.Platform{OS: "linux", Architecture: "arm", Variant: "v6"}) - LinuxArm64 = Platform(specs.Platform{OS: "linux", Architecture: "arm64"}) - LinuxS390x = Platform(specs.Platform{OS: "linux", Architecture: "s390x"}) - LinuxPpc64le = Platform(specs.Platform{OS: "linux", Architecture: "ppc64le"}) - Darwin = Platform(specs.Platform{OS: "darwin", Architecture: "amd64"}) - Windows = Platform(specs.Platform{OS: "windows", Architecture: "amd64"}) -) - -func Require(filters ...string) ConstraintsOpt { - return constraintsOptFunc(func(c *Constraints) { - for _, f := range filters { - c.WorkerConstraints = append(c.WorkerConstraints, f) - } - }) -} diff --git a/vendor/github.com/moby/buildkit/client/llb/state_test.go b/vendor/github.com/moby/buildkit/client/llb/state_test.go deleted file mode 100644 index aea7598ba3d8..000000000000 --- a/vendor/github.com/moby/buildkit/client/llb/state_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package llb - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestStateMeta(t *testing.T) { - t.Parallel() - - s := Image("foo") - s = s.AddEnv("BAR", "abc").Dir("/foo/bar") - - v, ok := s.GetEnv("BAR") - assert.True(t, ok) - assert.Equal(t, "abc", v) - - assert.Equal(t, "/foo/bar", s.GetDir()) - - s2 := Image("foo2") - s2 = s2.AddEnv("BAZ", "def").Reset(s) - - _, ok = s2.GetEnv("BAZ") - assert.False(t, ok) - - v, ok = s2.GetEnv("BAR") - assert.True(t, ok) - assert.Equal(t, "abc", v) -} diff --git a/vendor/github.com/moby/buildkit/client/prune.go b/vendor/github.com/moby/buildkit/client/prune.go deleted file mode 100644 index 27fe5dd8cdbc..000000000000 --- a/vendor/github.com/moby/buildkit/client/prune.go +++ /dev/null @@ -1,83 +0,0 @@ -package client - -import ( - "context" - "io" - "time" - - controlapi "github.com/moby/buildkit/api/services/control" - "github.com/pkg/errors" -) - -func (c *Client) Prune(ctx context.Context, ch chan UsageInfo, opts ...PruneOption) error { - info := &PruneInfo{} - for _, o := range opts { - o.SetPruneOption(info) - } - - req := &controlapi.PruneRequest{ - Filter: info.Filter, - KeepDuration: int64(info.KeepDuration), - KeepBytes: int64(info.KeepBytes), - } - if info.All { - req.All = true - } - cl, err := c.controlClient().Prune(ctx, req) - if err != nil { - return errors.Wrap(err, "failed to call prune") - } - - for { - d, err := cl.Recv() - if err != nil { - if err == io.EOF { - return nil - } - return err - } - if ch != nil { - ch <- UsageInfo{ - ID: d.ID, - Mutable: d.Mutable, - InUse: d.InUse, - Size: d.Size_, - Parent: d.Parent, - CreatedAt: d.CreatedAt, - Description: d.Description, - UsageCount: int(d.UsageCount), - LastUsedAt: d.LastUsedAt, - RecordType: UsageRecordType(d.RecordType), - Shared: d.Shared, - } - } - } -} - -type PruneOption interface { - SetPruneOption(*PruneInfo) -} - -type PruneInfo struct { - Filter []string - All bool - KeepDuration time.Duration - KeepBytes int64 -} - -type pruneOptionFunc func(*PruneInfo) - -func (f pruneOptionFunc) SetPruneOption(pi *PruneInfo) { - f(pi) -} - -var PruneAll = pruneOptionFunc(func(pi *PruneInfo) { - pi.All = true -}) - -func WithKeepOpt(duration time.Duration, bytes int64) PruneOption { - return pruneOptionFunc(func(pi *PruneInfo) { - pi.KeepDuration = duration - pi.KeepBytes = bytes - }) -} diff --git a/vendor/github.com/moby/buildkit/client/solve.go b/vendor/github.com/moby/buildkit/client/solve.go deleted file mode 100644 index b8da06972116..000000000000 --- a/vendor/github.com/moby/buildkit/client/solve.go +++ /dev/null @@ -1,297 +0,0 @@ -package client - -import ( - "context" - "io" - "os" - "path/filepath" - "strings" - "time" - - controlapi "github.com/moby/buildkit/api/services/control" - "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/identity" - "github.com/moby/buildkit/session" - "github.com/moby/buildkit/session/filesync" - "github.com/moby/buildkit/session/grpchijack" - "github.com/moby/buildkit/solver/pb" - "github.com/moby/buildkit/util/entitlements" - opentracing "github.com/opentracing/opentracing-go" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - fstypes "github.com/tonistiigi/fsutil/types" - "golang.org/x/sync/errgroup" -) - -type SolveOpt struct { - Exporter string - ExporterAttrs map[string]string - ExporterOutput io.WriteCloser // for ExporterOCI and ExporterDocker - ExporterOutputDir string // for ExporterLocal - LocalDirs map[string]string - SharedKey string - Frontend string - FrontendAttrs map[string]string - ExportCache string - ExportCacheAttrs map[string]string - ImportCache []string - Session []session.Attachable - AllowedEntitlements []entitlements.Entitlement -} - -// Solve calls Solve on the controller. -// def must be nil if (and only if) opt.Frontend is set. -func (c *Client) Solve(ctx context.Context, def *llb.Definition, opt SolveOpt, statusChan chan *SolveStatus) (*SolveResponse, error) { - defer func() { - if statusChan != nil { - close(statusChan) - } - }() - - if opt.Frontend == "" && def == nil { - return nil, errors.New("invalid empty definition") - } - if opt.Frontend != "" && def != nil { - return nil, errors.Errorf("invalid definition for frontend %s", opt.Frontend) - } - - return c.solve(ctx, def, nil, opt, statusChan) -} - -type runGatewayCB func(ref string, s *session.Session) error - -func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runGatewayCB, opt SolveOpt, statusChan chan *SolveStatus) (*SolveResponse, error) { - if def != nil && runGateway != nil { - return nil, errors.New("invalid with def and cb") - } - - syncedDirs, err := prepareSyncedDirs(def, opt.LocalDirs) - if err != nil { - return nil, err - } - - ref := identity.NewID() - eg, ctx := errgroup.WithContext(ctx) - - statusContext, cancelStatus := context.WithCancel(context.Background()) - defer cancelStatus() - - if span := opentracing.SpanFromContext(ctx); span != nil { - statusContext = opentracing.ContextWithSpan(statusContext, span) - } - - s, err := session.NewSession(statusContext, defaultSessionName(), opt.SharedKey) - if err != nil { - return nil, errors.Wrap(err, "failed to create session") - } - - if len(syncedDirs) > 0 { - s.Allow(filesync.NewFSSyncProvider(syncedDirs)) - } - - for _, a := range opt.Session { - s.Allow(a) - } - - switch opt.Exporter { - case ExporterLocal: - if opt.ExporterOutput != nil { - return nil, errors.New("output file writer is not supported by local exporter") - } - if opt.ExporterOutputDir == "" { - return nil, errors.New("output directory is required for local exporter") - } - s.Allow(filesync.NewFSSyncTargetDir(opt.ExporterOutputDir)) - case ExporterOCI, ExporterDocker: - if opt.ExporterOutputDir != "" { - return nil, errors.Errorf("output directory %s is not supported by %s exporter", opt.ExporterOutputDir, opt.Exporter) - } - if opt.ExporterOutput == nil { - return nil, errors.Errorf("output file writer is required for %s exporter", opt.Exporter) - } - s.Allow(filesync.NewFSSyncTarget(opt.ExporterOutput)) - default: - if opt.ExporterOutput != nil { - return nil, errors.Errorf("output file writer is not supported by %s exporter", opt.Exporter) - } - if opt.ExporterOutputDir != "" { - return nil, errors.Errorf("output directory %s is not supported by %s exporter", opt.ExporterOutputDir, opt.Exporter) - } - } - - eg.Go(func() error { - return s.Run(statusContext, grpchijack.Dialer(c.controlClient())) - }) - - solveCtx, cancelSolve := context.WithCancel(ctx) - var res *SolveResponse - eg.Go(func() error { - ctx := solveCtx - defer cancelSolve() - - defer func() { // make sure the Status ends cleanly on build errors - go func() { - <-time.After(3 * time.Second) - cancelStatus() - }() - logrus.Debugf("stopping session") - s.Close() - }() - var pbd *pb.Definition - if def != nil { - pbd = def.ToPB() - } - resp, err := c.controlClient().Solve(ctx, &controlapi.SolveRequest{ - Ref: ref, - Definition: pbd, - Exporter: opt.Exporter, - ExporterAttrs: opt.ExporterAttrs, - Session: s.ID(), - Frontend: opt.Frontend, - FrontendAttrs: opt.FrontendAttrs, - Cache: controlapi.CacheOptions{ - ExportRef: opt.ExportCache, - ImportRefs: opt.ImportCache, - ExportAttrs: opt.ExportCacheAttrs, - }, - Entitlements: opt.AllowedEntitlements, - }) - if err != nil { - return errors.Wrap(err, "failed to solve") - } - res = &SolveResponse{ - ExporterResponse: resp.ExporterResponse, - } - return nil - }) - - if runGateway != nil { - eg.Go(func() error { - err := runGateway(ref, s) - if err == nil { - return nil - } - - // If the callback failed then the main - // `Solve` (called above) should error as - // well. However as a fallback we wait up to - // 5s for that to happen before failing this - // goroutine. - select { - case <-solveCtx.Done(): - case <-time.After(5 * time.Second): - cancelSolve() - } - - return err - }) - } - - eg.Go(func() error { - stream, err := c.controlClient().Status(statusContext, &controlapi.StatusRequest{ - Ref: ref, - }) - if err != nil { - return errors.Wrap(err, "failed to get status") - } - for { - resp, err := stream.Recv() - if err != nil { - if err == io.EOF { - return nil - } - return errors.Wrap(err, "failed to receive status") - } - s := SolveStatus{} - for _, v := range resp.Vertexes { - s.Vertexes = append(s.Vertexes, &Vertex{ - Digest: v.Digest, - Inputs: v.Inputs, - Name: v.Name, - Started: v.Started, - Completed: v.Completed, - Error: v.Error, - Cached: v.Cached, - }) - } - for _, v := range resp.Statuses { - s.Statuses = append(s.Statuses, &VertexStatus{ - ID: v.ID, - Vertex: v.Vertex, - Name: v.Name, - Total: v.Total, - Current: v.Current, - Timestamp: v.Timestamp, - Started: v.Started, - Completed: v.Completed, - }) - } - for _, v := range resp.Logs { - s.Logs = append(s.Logs, &VertexLog{ - Vertex: v.Vertex, - Stream: int(v.Stream), - Data: v.Msg, - Timestamp: v.Timestamp, - }) - } - if statusChan != nil { - statusChan <- &s - } - } - }) - - if err := eg.Wait(); err != nil { - return nil, err - } - return res, nil -} - -func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) ([]filesync.SyncedDir, error) { - for _, d := range localDirs { - fi, err := os.Stat(d) - if err != nil { - return nil, errors.Wrapf(err, "could not find %s", d) - } - if !fi.IsDir() { - return nil, errors.Errorf("%s not a directory", d) - } - } - resetUIDAndGID := func(st *fstypes.Stat) bool { - st.Uid = 0 - st.Gid = 0 - return true - } - - dirs := make([]filesync.SyncedDir, 0, len(localDirs)) - if def == nil { - for name, d := range localDirs { - dirs = append(dirs, filesync.SyncedDir{Name: name, Dir: d, Map: resetUIDAndGID}) - } - } else { - for _, dt := range def.Def { - var op pb.Op - if err := (&op).Unmarshal(dt); err != nil { - return nil, errors.Wrap(err, "failed to parse llb proto op") - } - if src := op.GetSource(); src != nil { - if strings.HasPrefix(src.Identifier, "local://") { // TODO: just make a type property - name := strings.TrimPrefix(src.Identifier, "local://") - d, ok := localDirs[name] - if !ok { - return nil, errors.Errorf("local directory %s not enabled", name) - } - dirs = append(dirs, filesync.SyncedDir{Name: name, Dir: d, Map: resetUIDAndGID}) // TODO: excludes - } - } - } - } - return dirs, nil -} - -func defaultSessionName() string { - wd, err := os.Getwd() - if err != nil { - return "unknown" - } - return filepath.Base(wd) -} diff --git a/vendor/github.com/moby/buildkit/client/workers.go b/vendor/github.com/moby/buildkit/client/workers.go deleted file mode 100644 index b011ee2efdbf..000000000000 --- a/vendor/github.com/moby/buildkit/client/workers.go +++ /dev/null @@ -1,70 +0,0 @@ -package client - -import ( - "context" - "time" - - controlapi "github.com/moby/buildkit/api/services/control" - apitypes "github.com/moby/buildkit/api/types" - "github.com/moby/buildkit/solver/pb" - specs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -// WorkerInfo contains information about a worker -type WorkerInfo struct { - ID string - Labels map[string]string - Platforms []specs.Platform - GCPolicy []PruneInfo -} - -// ListWorkers lists all active workers -func (c *Client) ListWorkers(ctx context.Context, opts ...ListWorkersOption) ([]*WorkerInfo, error) { - info := &ListWorkersInfo{} - for _, o := range opts { - o.SetListWorkersOption(info) - } - - req := &controlapi.ListWorkersRequest{Filter: info.Filter} - resp, err := c.controlClient().ListWorkers(ctx, req) - if err != nil { - return nil, errors.Wrap(err, "failed to list workers") - } - - var wi []*WorkerInfo - - for _, w := range resp.Record { - wi = append(wi, &WorkerInfo{ - ID: w.ID, - Labels: w.Labels, - Platforms: pb.ToSpecPlatforms(w.Platforms), - GCPolicy: fromAPIGCPolicy(w.GCPolicy), - }) - } - - return wi, nil -} - -// ListWorkersOption is an option for a worker list query -type ListWorkersOption interface { - SetListWorkersOption(*ListWorkersInfo) -} - -// ListWorkersInfo is a payload for worker list query -type ListWorkersInfo struct { - Filter []string -} - -func fromAPIGCPolicy(in []*apitypes.GCPolicy) []PruneInfo { - out := make([]PruneInfo, 0, len(in)) - for _, p := range in { - out = append(out, PruneInfo{ - All: p.All, - Filter: p.Filters, - KeepDuration: time.Duration(p.KeepDuration), - KeepBytes: p.KeepBytes, - }) - } - return out -} diff --git a/vendor/github.com/moby/buildkit/cmd/buildctl/build.go b/vendor/github.com/moby/buildkit/cmd/buildctl/build.go deleted file mode 100644 index 0b246ea39ce0..000000000000 --- a/vendor/github.com/moby/buildkit/cmd/buildctl/build.go +++ /dev/null @@ -1,393 +0,0 @@ -package main - -import ( - "context" - "encoding/csv" - "encoding/json" - "io" - "os" - "strings" - - "github.com/containerd/console" - "github.com/moby/buildkit/client" - "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/session" - "github.com/moby/buildkit/session/auth/authprovider" - "github.com/moby/buildkit/session/secrets/secretsprovider" - "github.com/moby/buildkit/session/sshforward/sshprovider" - "github.com/moby/buildkit/solver/pb" - "github.com/moby/buildkit/util/entitlements" - "github.com/moby/buildkit/util/progress/progressui" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "github.com/urfave/cli" - "golang.org/x/sync/errgroup" -) - -var buildCommand = cli.Command{ - Name: "build", - Usage: "build", - Action: build, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "exporter", - Usage: "Define exporter for build result", - }, - cli.StringSliceFlag{ - Name: "exporter-opt", - Usage: "Define custom options for exporter", - }, - cli.StringFlag{ - Name: "progress", - Usage: "Set type of progress (auto, plain, tty). Use plain to show container output", - Value: "auto", - }, - cli.StringFlag{ - Name: "trace", - Usage: "Path to trace file. Defaults to no tracing.", - }, - cli.StringSliceFlag{ - Name: "local", - Usage: "Allow build access to the local directory", - }, - cli.StringFlag{ - Name: "frontend", - Usage: "Define frontend used for build", - }, - cli.StringSliceFlag{ - Name: "frontend-opt", - Usage: "Define custom options for frontend", - }, - cli.BoolFlag{ - Name: "no-cache", - Usage: "Disable cache for all the vertices", - }, - cli.StringFlag{ - Name: "export-cache", - Usage: "Reference to export build cache to", - }, - cli.StringSliceFlag{ - Name: "export-cache-opt", - Usage: "Define custom options for cache exporting", - }, - cli.StringSliceFlag{ - Name: "import-cache", - Usage: "Reference to import build cache from", - }, - cli.StringSliceFlag{ - Name: "secret", - Usage: "Secret value exposed to the build. Format id=secretname,src=filepath", - }, - cli.StringSliceFlag{ - Name: "allow", - Usage: "Allow extra privileged entitlement, e.g. network.host, security.unconfined", - }, - cli.StringSliceFlag{ - Name: "ssh", - Usage: "Allow forwarding SSH agent to the builder. Format default|[=|[,]]", - }, - }, -} - -func read(r io.Reader, clicontext *cli.Context) (*llb.Definition, error) { - def, err := llb.ReadFrom(r) - if err != nil { - return nil, errors.Wrap(err, "failed to parse input") - } - if clicontext.Bool("no-cache") { - for _, dt := range def.Def { - var op pb.Op - if err := (&op).Unmarshal(dt); err != nil { - return nil, errors.Wrap(err, "failed to parse llb proto op") - } - dgst := digest.FromBytes(dt) - opMetadata, ok := def.Metadata[dgst] - if !ok { - opMetadata = pb.OpMetadata{} - } - c := llb.Constraints{Metadata: opMetadata} - llb.IgnoreCache(&c) - def.Metadata[dgst] = c.Metadata - } - } - return def, nil -} - -func openTraceFile(clicontext *cli.Context) (*os.File, error) { - if traceFileName := clicontext.String("trace"); traceFileName != "" { - return os.OpenFile(traceFileName, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0600) - } - return nil, nil -} - -func build(clicontext *cli.Context) error { - c, err := resolveClient(clicontext) - if err != nil { - return err - } - - traceFile, err := openTraceFile(clicontext) - if err != nil { - return err - } - var traceEnc *json.Encoder - if traceFile != nil { - defer traceFile.Close() - traceEnc = json.NewEncoder(traceFile) - - logrus.Infof("tracing logs to %s", traceFile.Name()) - } - - attachable := []session.Attachable{authprovider.NewDockerAuthProvider()} - - if ssh := clicontext.StringSlice("ssh"); len(ssh) > 0 { - configs, err := parseSSHSpecs(ssh) - if err != nil { - return err - } - sp, err := sshprovider.NewSSHAgentProvider(configs) - if err != nil { - return err - } - attachable = append(attachable, sp) - } - - if secrets := clicontext.StringSlice("secret"); len(secrets) > 0 { - secretProvider, err := parseSecretSpecs(secrets) - if err != nil { - return err - } - attachable = append(attachable, secretProvider) - } - - allowed, err := parseEntitlements(clicontext.StringSlice("allow")) - if err != nil { - return err - } - - ch := make(chan *client.SolveStatus) - eg, ctx := errgroup.WithContext(commandContext(clicontext)) - - solveOpt := client.SolveOpt{ - Exporter: clicontext.String("exporter"), - // ExporterAttrs is set later - // LocalDirs is set later - Frontend: clicontext.String("frontend"), - // FrontendAttrs is set later - ExportCache: clicontext.String("export-cache"), - ImportCache: clicontext.StringSlice("import-cache"), - Session: attachable, - AllowedEntitlements: allowed, - } - solveOpt.ExporterAttrs, err = attrMap(clicontext.StringSlice("exporter-opt")) - if err != nil { - return errors.Wrap(err, "invalid exporter-opt") - } - solveOpt.ExporterOutput, solveOpt.ExporterOutputDir, err = resolveExporterOutput(solveOpt.Exporter, solveOpt.ExporterAttrs["output"]) - if err != nil { - return errors.Wrap(err, "invalid exporter-opt: output") - } - if solveOpt.ExporterOutput != nil || solveOpt.ExporterOutputDir != "" { - delete(solveOpt.ExporterAttrs, "output") - } - - solveOpt.FrontendAttrs, err = attrMap(clicontext.StringSlice("frontend-opt")) - if err != nil { - return errors.Wrap(err, "invalid frontend-opt") - } - - exportCacheAttrs, err := attrMap(clicontext.StringSlice("export-cache-opt")) - if err != nil { - return errors.Wrap(err, "invalid export-cache-opt") - } - if len(exportCacheAttrs) == 0 { - exportCacheAttrs = map[string]string{"mode": "min"} - } - solveOpt.ExportCacheAttrs = exportCacheAttrs - - solveOpt.LocalDirs, err = attrMap(clicontext.StringSlice("local")) - if err != nil { - return errors.Wrap(err, "invalid local") - } - - var def *llb.Definition - if clicontext.String("frontend") == "" { - def, err = read(os.Stdin, clicontext) - if err != nil { - return err - } - } else { - solveOpt.FrontendAttrs["no-cache"] = "" - } - - eg.Go(func() error { - resp, err := c.Solve(ctx, def, solveOpt, ch) - if err != nil { - return err - } - for k, v := range resp.ExporterResponse { - logrus.Debugf("solve response: %s=%s", k, v) - } - return err - }) - - displayCh := ch - if traceEnc != nil { - displayCh = make(chan *client.SolveStatus) - eg.Go(func() error { - defer close(displayCh) - for s := range ch { - if err := traceEnc.Encode(s); err != nil { - logrus.Error(err) - } - displayCh <- s - } - return nil - }) - } - - eg.Go(func() error { - var c console.Console - progressOpt := clicontext.String("progress") - - switch progressOpt { - case "auto", "tty": - cf, err := console.ConsoleFromFile(os.Stderr) - if err != nil && progressOpt == "tty" { - return err - } - c = cf - case "plain": - default: - return errors.Errorf("invalid progress value : %s", progressOpt) - } - - // not using shared context to not disrupt display but let is finish reporting errors - return progressui.DisplaySolveStatus(context.TODO(), "", c, os.Stdout, displayCh) - }) - - return eg.Wait() -} - -func attrMap(sl []string) (map[string]string, error) { - m := map[string]string{} - for _, v := range sl { - parts := strings.SplitN(v, "=", 2) - if len(parts) != 2 { - return nil, errors.Errorf("invalid value %s", v) - } - m[parts[0]] = parts[1] - } - return m, nil -} - -func parseSecretSpecs(sl []string) (session.Attachable, error) { - fs := make([]secretsprovider.FileSource, 0, len(sl)) - for _, v := range sl { - s, err := parseSecret(v) - if err != nil { - return nil, err - } - fs = append(fs, *s) - } - store, err := secretsprovider.NewFileStore(fs) - if err != nil { - return nil, err - } - return secretsprovider.NewSecretProvider(store), nil -} - -func parseSecret(value string) (*secretsprovider.FileSource, error) { - csvReader := csv.NewReader(strings.NewReader(value)) - fields, err := csvReader.Read() - if err != nil { - return nil, errors.Wrap(err, "failed to parse csv secret") - } - - fs := secretsprovider.FileSource{} - - for _, field := range fields { - parts := strings.SplitN(field, "=", 2) - key := strings.ToLower(parts[0]) - - if len(parts) != 2 { - return nil, errors.Errorf("invalid field '%s' must be a key=value pair", field) - } - - value := parts[1] - switch key { - case "type": - if value != "file" { - return nil, errors.Errorf("unsupported secret type %q", value) - } - case "id": - fs.ID = value - case "source", "src": - fs.FilePath = value - default: - return nil, errors.Errorf("unexpected key '%s' in '%s'", key, field) - } - } - return &fs, nil -} - -// resolveExporterOutput returns at most either one of io.WriteCloser (single file) or a string (directory path). -func resolveExporterOutput(exporter, output string) (io.WriteCloser, string, error) { - switch exporter { - case client.ExporterLocal: - if output == "" { - return nil, "", errors.New("output directory is required for local exporter") - } - return nil, output, nil - case client.ExporterOCI, client.ExporterDocker: - if output != "" { - fi, err := os.Stat(output) - if err != nil && !os.IsNotExist(err) { - return nil, "", errors.Wrapf(err, "invalid destination file: %s", output) - } - if err == nil && fi.IsDir() { - return nil, "", errors.Errorf("destination file is a directory") - } - w, err := os.Create(output) - return w, "", err - } - // if no output file is specified, use stdout - if _, err := console.ConsoleFromFile(os.Stdout); err == nil { - return nil, "", errors.Errorf("output file is required for %s exporter. refusing to write to console", exporter) - } - return os.Stdout, "", nil - default: // e.g. client.ExporterImage - if output != "" { - return nil, "", errors.Errorf("output %s is not supported by %s exporter", output, exporter) - } - return nil, "", nil - } -} - -func parseEntitlements(inp []string) ([]entitlements.Entitlement, error) { - ent := make([]entitlements.Entitlement, 0, len(inp)) - for _, v := range inp { - e, err := entitlements.Parse(v) - if err != nil { - return nil, err - } - ent = append(ent, e) - } - return ent, nil -} - -func parseSSHSpecs(inp []string) ([]sshprovider.AgentConfig, error) { - configs := make([]sshprovider.AgentConfig, 0, len(inp)) - for _, v := range inp { - parts := strings.SplitN(v, "=", 2) - cfg := sshprovider.AgentConfig{ - ID: parts[0], - } - if len(parts) > 1 { - cfg.Paths = strings.Split(parts[1], ",") - } - configs = append(configs, cfg) - } - return configs, nil -} diff --git a/vendor/github.com/moby/buildkit/cmd/buildctl/build_test.go b/vendor/github.com/moby/buildkit/cmd/buildctl/build_test.go deleted file mode 100644 index f54312bdb023..000000000000 --- a/vendor/github.com/moby/buildkit/cmd/buildctl/build_test.go +++ /dev/null @@ -1,121 +0,0 @@ -package main - -import ( - "bytes" - "context" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "testing" - "time" - - "github.com/containerd/containerd" - "github.com/containerd/containerd/namespaces" - "github.com/containerd/continuity/fs/fstest" - "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/util/testutil/integration" - "github.com/stretchr/testify/require" -) - -func testBuildWithLocalFiles(t *testing.T, sb integration.Sandbox) { - dir, err := tmpdir( - fstest.CreateFile("foo", []byte("bar"), 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - st := llb.Image("busybox"). - Run(llb.Shlex("sh -c 'echo -n bar > foo2'")). - Run(llb.Shlex("cmp -s /mnt/foo foo2")) - - st.AddMount("/mnt", llb.Local("src"), llb.Readonly) - - rdr, err := marshal(st.Root()) - require.NoError(t, err) - - cmd := sb.Cmd(fmt.Sprintf("build --progress=plain --local src=%s", dir)) - cmd.Stdin = rdr - - err = cmd.Run() - require.NoError(t, err) -} - -func testBuildLocalExporter(t *testing.T, sb integration.Sandbox) { - st := llb.Image("busybox"). - Run(llb.Shlex("sh -c 'echo -n bar > /out/foo'")) - - out := st.AddMount("/out", llb.Scratch()) - - rdr, err := marshal(out) - require.NoError(t, err) - - tmpdir, err := ioutil.TempDir("", "buildkit-buildctl") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) - - cmd := sb.Cmd(fmt.Sprintf("build --progress=plain --exporter=local --exporter-opt output=%s", tmpdir)) - cmd.Stdin = rdr - err = cmd.Run() - - require.NoError(t, err) - - dt, err := ioutil.ReadFile(filepath.Join(tmpdir, "foo")) - require.NoError(t, err) - require.Equal(t, string(dt), "bar") -} - -func testBuildContainerdExporter(t *testing.T, sb integration.Sandbox) { - var cdAddress string - if cd, ok := sb.(interface { - ContainerdAddress() string - }); !ok { - t.Skip("only for containerd worker") - } else { - cdAddress = cd.ContainerdAddress() - } - - st := llb.Image("busybox"). - Run(llb.Shlex("sh -c 'echo -n bar > /foo'")) - - rdr, err := marshal(st.Root()) - require.NoError(t, err) - - cmd := sb.Cmd("build --progress=plain --exporter=image --exporter-opt name=example.com/moby/imageexporter:test") - cmd.Stdin = rdr - err = cmd.Run() - require.NoError(t, err) - - client, err := containerd.New(cdAddress, containerd.WithTimeout(60*time.Second)) - require.NoError(t, err) - defer client.Close() - - ctx := namespaces.WithNamespace(context.Background(), "buildkit") - - _, err = client.ImageService().Get(ctx, "example.com/moby/imageexporter:test") - require.NoError(t, err) -} - -func marshal(st llb.State) (io.Reader, error) { - def, err := st.Marshal() - if err != nil { - return nil, err - } - dt, err := def.ToPB().Marshal() - if err != nil { - return nil, err - } - return bytes.NewBuffer(dt), nil -} - -func tmpdir(appliers ...fstest.Applier) (string, error) { - tmpdir, err := ioutil.TempDir("", "buildkit-buildctl") - if err != nil { - return "", err - } - if err := fstest.Apply(appliers...).Apply(tmpdir); err != nil { - return "", err - } - return tmpdir, nil -} diff --git a/vendor/github.com/moby/buildkit/cmd/buildctl/buildctl_test.go b/vendor/github.com/moby/buildkit/cmd/buildctl/buildctl_test.go deleted file mode 100644 index 6e9aa672afc4..000000000000 --- a/vendor/github.com/moby/buildkit/cmd/buildctl/buildctl_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package main - -import ( - "testing" - - "github.com/moby/buildkit/util/testutil/integration" - "github.com/stretchr/testify/require" -) - -func TestCLIIntegration(t *testing.T) { - integration.Run(t, []integration.Test{ - testDiskUsage, - testBuildWithLocalFiles, - testBuildLocalExporter, - testBuildContainerdExporter, - testPrune, - testUsage, - }, - integration.WithMirroredImages(integration.OfficialImages("busybox:latest")), - ) -} - -func testUsage(t *testing.T, sb integration.Sandbox) { - require.NoError(t, sb.Cmd().Run()) - - require.NoError(t, sb.Cmd("--help").Run()) -} diff --git a/vendor/github.com/moby/buildkit/cmd/buildctl/debug.go b/vendor/github.com/moby/buildkit/cmd/buildctl/debug.go deleted file mode 100644 index a6a33b6f8882..000000000000 --- a/vendor/github.com/moby/buildkit/cmd/buildctl/debug.go +++ /dev/null @@ -1,16 +0,0 @@ -package main - -import ( - "github.com/moby/buildkit/cmd/buildctl/debug" - "github.com/urfave/cli" -) - -var debugCommand = cli.Command{ - Name: "debug", - Usage: "debug utilities", - Subcommands: []cli.Command{ - debug.DumpLLBCommand, - debug.DumpMetadataCommand, - debug.WorkersCommand, - }, -} diff --git a/vendor/github.com/moby/buildkit/cmd/buildctl/debug/dumpllb.go b/vendor/github.com/moby/buildkit/cmd/buildctl/debug/dumpllb.go deleted file mode 100644 index be5c4af92ea6..000000000000 --- a/vendor/github.com/moby/buildkit/cmd/buildctl/debug/dumpllb.go +++ /dev/null @@ -1,117 +0,0 @@ -package debug - -import ( - "encoding/json" - "fmt" - "io" - "os" - "strings" - - "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/solver/pb" - digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/urfave/cli" -) - -var DumpLLBCommand = cli.Command{ - Name: "dump-llb", - Usage: "dump LLB in human-readable format. LLB can be also passed via stdin. This command does not require the daemon to be running.", - ArgsUsage: "", - Action: dumpLLB, - Flags: []cli.Flag{ - cli.BoolFlag{ - Name: "dot", - Usage: "Output dot format", - }, - }, -} - -func dumpLLB(clicontext *cli.Context) error { - var r io.Reader - if llbFile := clicontext.Args().First(); llbFile != "" && llbFile != "-" { - f, err := os.Open(llbFile) - if err != nil { - return err - } - defer f.Close() - r = f - } else { - r = os.Stdin - } - ops, err := loadLLB(r) - if err != nil { - return err - } - if clicontext.Bool("dot") { - writeDot(ops, os.Stdout) - } else { - enc := json.NewEncoder(os.Stdout) - for _, op := range ops { - if err := enc.Encode(op); err != nil { - return err - } - } - } - return nil -} - -type llbOp struct { - Op pb.Op - Digest digest.Digest - OpMetadata pb.OpMetadata -} - -func loadLLB(r io.Reader) ([]llbOp, error) { - def, err := llb.ReadFrom(r) - if err != nil { - return nil, err - } - var ops []llbOp - for _, dt := range def.Def { - var op pb.Op - if err := (&op).Unmarshal(dt); err != nil { - return nil, errors.Wrap(err, "failed to parse op") - } - dgst := digest.FromBytes(dt) - ent := llbOp{Op: op, Digest: dgst, OpMetadata: def.Metadata[dgst]} - ops = append(ops, ent) - } - return ops, nil -} - -func writeDot(ops []llbOp, w io.Writer) { - // TODO: print OpMetadata - fmt.Fprintln(w, "digraph {") - defer fmt.Fprintln(w, "}") - for _, op := range ops { - name, shape := attr(op.Digest, op.Op) - fmt.Fprintf(w, " %q [label=%q shape=%q];\n", op.Digest, name, shape) - } - for _, op := range ops { - for i, inp := range op.Op.Inputs { - label := "" - if eo, ok := op.Op.Op.(*pb.Op_Exec); ok { - for _, m := range eo.Exec.Mounts { - if int(m.Input) == i && m.Dest != "/" { - label = m.Dest - } - } - } - fmt.Fprintf(w, " %q -> %q [label=%q];\n", inp.Digest, op.Digest, label) - } - } -} - -func attr(dgst digest.Digest, op pb.Op) (string, string) { - switch op := op.Op.(type) { - case *pb.Op_Source: - return op.Source.Identifier, "ellipse" - case *pb.Op_Exec: - return strings.Join(op.Exec.Meta.Args, " "), "box" - case *pb.Op_Build: - return "build", "box3d" - default: - return dgst.String(), "plaintext" - } -} diff --git a/vendor/github.com/moby/buildkit/cmd/buildctl/debug/dumpmetadata.go b/vendor/github.com/moby/buildkit/cmd/buildctl/debug/dumpmetadata.go deleted file mode 100644 index 67270c6c879c..000000000000 --- a/vendor/github.com/moby/buildkit/cmd/buildctl/debug/dumpmetadata.go +++ /dev/null @@ -1,95 +0,0 @@ -package debug - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "time" - - "github.com/moby/buildkit/util/appdefaults" - "github.com/urfave/cli" - bolt "go.etcd.io/bbolt" -) - -var DumpMetadataCommand = cli.Command{ - Name: "dump-metadata", - Usage: "dump the meta in human-readable format. This command requires the daemon NOT to be running.", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "root", - Usage: "path to state directory", - Value: appdefaults.Root, - }, - }, - Action: func(clicontext *cli.Context) error { - dbFiles, err := findMetadataDBFiles(clicontext.String("root")) - if err != nil { - return err - } - for _, dbFile := range dbFiles { - fmt.Printf("===== %s =====\n", dbFile) - if err := dumpBolt(dbFile, func(k, v []byte) string { - return fmt.Sprintf("%q: %s", string(k), string(v)) - }); err != nil { - return err - } - } - return nil - }, -} - -func findMetadataDBFiles(root string) ([]string, error) { - dirs, err := ioutil.ReadDir(root) - if err != nil { - return nil, err - } - var files []string - for _, dir := range dirs { - if !dir.IsDir() { - continue - } - p := filepath.Join(root, dir.Name(), "metadata.db") - _, err := os.Stat(p) - if err == nil { - files = append(files, p) - } else if !os.IsNotExist(err) { - return nil, err - } - } - return files, nil -} - -func dumpBolt(dbFile string, stringifier func(k, v []byte) string) error { - if dbFile == "" { - return errors.New("dbfile not specified") - } - if dbFile == "-" { - // user could still specify "/dev/stdin" but unlikely to work - return errors.New("stdin unsupported") - } - db, err := bolt.Open(dbFile, 0400, &bolt.Options{ReadOnly: true, Timeout: 3 * time.Second}) - if err != nil { - return err - } - defer db.Close() - return db.View(func(tx *bolt.Tx) error { - // TODO: JSON format? - return tx.ForEach(func(name []byte, b *bolt.Bucket) error { - return dumpBucket(name, b, "", stringifier) - }) - }) -} - -func dumpBucket(name []byte, b *bolt.Bucket, indent string, stringifier func(k, v []byte) string) error { - fmt.Printf("%sbucket %q:\n", indent, string(name)) - childrenIndent := indent + " " - return b.ForEach(func(k, v []byte) error { - if bb := b.Bucket(k); bb != nil { - return dumpBucket(k, bb, childrenIndent, stringifier) - } - fmt.Printf("%s%s\n", childrenIndent, stringifier(k, v)) - return nil - }) -} diff --git a/vendor/github.com/moby/buildkit/cmd/buildctl/debug/workers.go b/vendor/github.com/moby/buildkit/cmd/buildctl/debug/workers.go deleted file mode 100644 index 4e55956e54a2..000000000000 --- a/vendor/github.com/moby/buildkit/cmd/buildctl/debug/workers.go +++ /dev/null @@ -1,118 +0,0 @@ -package debug - -import ( - "context" - "fmt" - "os" - "sort" - "strings" - "text/tabwriter" - - "github.com/containerd/containerd/platforms" - "github.com/moby/buildkit/client" - specs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/tonistiigi/units" - "github.com/urfave/cli" -) - -var WorkersCommand = cli.Command{ - Name: "workers", - Usage: "list workers", - Action: listWorkers, - Flags: []cli.Flag{ - cli.StringSliceFlag{ - Name: "filter, f", - Usage: "containerd-style filter string slice", - }, - cli.BoolFlag{ - Name: "verbose, v", - Usage: "Verbose output", - }, - }, -} - -func resolveClient(c *cli.Context) (*client.Client, error) { - return client.New(commandContext(c), c.GlobalString("addr"), client.WithFailFast()) -} - -func listWorkers(clicontext *cli.Context) error { - c, err := resolveClient(clicontext) - if err != nil { - return err - } - - workers, err := c.ListWorkers(commandContext(clicontext), client.WithFilter(clicontext.StringSlice("filter"))) - if err != nil { - return err - } - tw := tabwriter.NewWriter(os.Stdout, 1, 8, 1, '\t', 0) - - if clicontext.Bool("verbose") { - printWorkersVerbose(tw, workers) - } else { - printWorkersTable(tw, workers) - } - return nil -} - -func printWorkersVerbose(tw *tabwriter.Writer, winfo []*client.WorkerInfo) { - for _, wi := range winfo { - fmt.Fprintf(tw, "ID:\t%s\n", wi.ID) - fmt.Fprintf(tw, "Platforms:\t%s\n", joinPlatforms(wi.Platforms)) - fmt.Fprintf(tw, "Labels:\n") - for _, k := range sortedKeys(wi.Labels) { - v := wi.Labels[k] - fmt.Fprintf(tw, "\t%s:\t%s\n", k, v) - } - for i, rule := range wi.GCPolicy { - fmt.Fprintf(tw, "GC Policy rule#%d:\n", i) - fmt.Fprintf(tw, "\tAll:\t%v\n", rule.All) - if len(rule.Filter) > 0 { - fmt.Fprintf(tw, "\tFilters:\t%s\n", strings.Join(rule.Filter, " ")) - } - if rule.KeepDuration > 0 { - fmt.Fprintf(tw, "\tKeep Duration:\t%v\n", rule.KeepDuration.String()) - } - if rule.KeepBytes > 0 { - fmt.Fprintf(tw, "\tKeep Bytes:\t%g\n", units.Bytes(rule.KeepBytes)) - } - } - fmt.Fprintf(tw, "\n") - } - - tw.Flush() -} - -func printWorkersTable(tw *tabwriter.Writer, winfo []*client.WorkerInfo) { - fmt.Fprintln(tw, "ID\tPLATFORMS") - - for _, wi := range winfo { - id := wi.ID - fmt.Fprintf(tw, "%s\t%s\n", id, joinPlatforms(wi.Platforms)) - } - - tw.Flush() -} - -func sortedKeys(m map[string]string) []string { - s := make([]string, len(m)) - i := 0 - for k := range m { - s[i] = k - i++ - } - sort.Strings(s) - return s -} - -func commandContext(c *cli.Context) context.Context { - return c.App.Metadata["context"].(context.Context) -} - -func joinPlatforms(p []specs.Platform) string { - str := make([]string, 0, len(p)) - for _, pp := range p { - str = append(str, platforms.Format(platforms.Normalize(pp))) - } - return strings.Join(str, ",") -} diff --git a/vendor/github.com/moby/buildkit/cmd/buildctl/diskusage.go b/vendor/github.com/moby/buildkit/cmd/buildctl/diskusage.go deleted file mode 100644 index 56a747903413..000000000000 --- a/vendor/github.com/moby/buildkit/cmd/buildctl/diskusage.go +++ /dev/null @@ -1,141 +0,0 @@ -package main - -import ( - "fmt" - "io" - "os" - "text/tabwriter" - - "github.com/moby/buildkit/client" - "github.com/tonistiigi/units" - "github.com/urfave/cli" -) - -var diskUsageCommand = cli.Command{ - Name: "du", - Usage: "disk usage", - Action: diskUsage, - Flags: []cli.Flag{ - cli.StringSliceFlag{ - Name: "filter, f", - Usage: "Filter records", - }, - cli.BoolFlag{ - Name: "verbose, v", - Usage: "Verbose output", - }, - }, -} - -func diskUsage(clicontext *cli.Context) error { - c, err := resolveClient(clicontext) - if err != nil { - return err - } - - du, err := c.DiskUsage(commandContext(clicontext), client.WithFilter(clicontext.StringSlice("filter"))) - if err != nil { - return err - } - - tw := tabwriter.NewWriter(os.Stdout, 1, 8, 1, '\t', 0) - - if clicontext.Bool("verbose") { - printVerbose(tw, du) - } else { - printTable(tw, du) - } - - if len(clicontext.StringSlice("filter")) == 0 { - printSummary(tw, du) - } - - return nil -} - -func printKV(w io.Writer, k string, v interface{}) { - fmt.Fprintf(w, "%s:\t%v\n", k, v) -} - -func printVerbose(tw *tabwriter.Writer, du []*client.UsageInfo) { - for _, di := range du { - printKV(tw, "ID", di.ID) - if di.Parent != "" { - printKV(tw, "Parent", di.Parent) - } - printKV(tw, "Created at", di.CreatedAt) - printKV(tw, "Mutable", di.Mutable) - printKV(tw, "Reclaimable", !di.InUse) - printKV(tw, "Shared", di.Shared) - printKV(tw, "Size", fmt.Sprintf("%.2f", units.Bytes(di.Size))) - if di.Description != "" { - printKV(tw, "Description", di.Description) - } - printKV(tw, "Usage count", di.UsageCount) - if di.LastUsedAt != nil { - printKV(tw, "Last used", di.LastUsedAt) - } - if di.RecordType != "" { - printKV(tw, "Type", di.RecordType) - } - - fmt.Fprintf(tw, "\n") - } - - tw.Flush() -} - -func printTable(tw *tabwriter.Writer, du []*client.UsageInfo) { - printTableHeader(tw) - - for _, di := range du { - printTableRow(tw, di) - } - - tw.Flush() -} - -func printTableHeader(tw *tabwriter.Writer) { - fmt.Fprintln(tw, "ID\tRECLAIMABLE\tSIZE\tLAST ACCESSED") -} - -func printTableRow(tw *tabwriter.Writer, di *client.UsageInfo) { - id := di.ID - if di.Mutable { - id += "*" - } - size := fmt.Sprintf("%.2f", units.Bytes(di.Size)) - if di.Shared { - size += "*" - } - fmt.Fprintf(tw, "%-71s\t%-11v\t%s\t\n", id, !di.InUse, size) -} - -func printSummary(tw *tabwriter.Writer, du []*client.UsageInfo) { - total := int64(0) - reclaimable := int64(0) - shared := int64(0) - - for _, di := range du { - if di.Size > 0 { - total += di.Size - if !di.InUse { - reclaimable += di.Size - } - } - if di.Shared { - shared += di.Size - } - } - - tw = tabwriter.NewWriter(os.Stdout, 1, 8, 1, '\t', 0) - - if shared > 0 { - fmt.Fprintf(tw, "Shared:\t%.2f\n", units.Bytes(shared)) - fmt.Fprintf(tw, "Private:\t%.2f\n", units.Bytes(total-shared)) - } - - fmt.Fprintf(tw, "Reclaimable:\t%.2f\n", units.Bytes(reclaimable)) - fmt.Fprintf(tw, "Total:\t%.2f\n", units.Bytes(total)) - tw.Flush() -} diff --git a/vendor/github.com/moby/buildkit/cmd/buildctl/diskusage_test.go b/vendor/github.com/moby/buildkit/cmd/buildctl/diskusage_test.go deleted file mode 100644 index 68f845919d67..000000000000 --- a/vendor/github.com/moby/buildkit/cmd/buildctl/diskusage_test.go +++ /dev/null @@ -1,14 +0,0 @@ -package main - -import ( - "testing" - - "github.com/moby/buildkit/util/testutil/integration" - "github.com/stretchr/testify/assert" -) - -func testDiskUsage(t *testing.T, sb integration.Sandbox) { - cmd := sb.Cmd("du") - err := cmd.Run() - assert.NoError(t, err) -} diff --git a/vendor/github.com/moby/buildkit/cmd/buildctl/main.go b/vendor/github.com/moby/buildkit/cmd/buildctl/main.go deleted file mode 100644 index 5f1a6d93d370..000000000000 --- a/vendor/github.com/moby/buildkit/cmd/buildctl/main.go +++ /dev/null @@ -1,137 +0,0 @@ -package main - -import ( - "context" - "fmt" - "net/url" - "os" - "time" - - "github.com/moby/buildkit/client" - "github.com/moby/buildkit/util/apicaps" - "github.com/moby/buildkit/util/appdefaults" - "github.com/moby/buildkit/util/profiler" - "github.com/moby/buildkit/version" - opentracing "github.com/opentracing/opentracing-go" - "github.com/sirupsen/logrus" - "github.com/urfave/cli" -) - -func init() { - apicaps.ExportedProduct = "buildkit" -} - -func main() { - cli.VersionPrinter = func(c *cli.Context) { - fmt.Println(c.App.Name, version.Package, c.App.Version, version.Revision) - } - app := cli.NewApp() - app.Name = "buildctl" - app.Usage = "build utility" - app.Version = version.Version - - defaultAddress := os.Getenv("BUILDKIT_HOST") - if defaultAddress == "" { - defaultAddress = appdefaults.Address - } - - app.Flags = []cli.Flag{ - cli.BoolFlag{ - Name: "debug", - Usage: "enable debug output in logs", - }, - cli.StringFlag{ - Name: "addr", - Usage: "buildkitd address", - Value: defaultAddress, - }, - cli.StringFlag{ - Name: "tlsservername", - Usage: "buildkitd server name for certificate validation", - Value: "", - }, - cli.StringFlag{ - Name: "tlscacert", - Usage: "CA certificate for validation", - Value: "", - }, - cli.StringFlag{ - Name: "tlscert", - Usage: "client certificate", - Value: "", - }, - cli.StringFlag{ - Name: "tlskey", - Usage: "client key", - Value: "", - }, - cli.IntFlag{ - Name: "timeout", - Usage: "timeout backend connection after value seconds", - Value: 5, - }, - } - - app.Commands = []cli.Command{ - diskUsageCommand, - pruneCommand, - buildCommand, - debugCommand, - } - - var debugEnabled bool - - app.Before = func(context *cli.Context) error { - debugEnabled = context.GlobalBool("debug") - if debugEnabled { - logrus.SetLevel(logrus.DebugLevel) - } - return nil - } - - attachAppContext(app) - - profiler.Attach(app) - - if err := app.Run(os.Args); err != nil { - if debugEnabled { - fmt.Fprintf(os.Stderr, "error: %+v\n", err) - } else { - fmt.Fprintf(os.Stderr, "error: %v\n", err) - } - os.Exit(1) - } -} - -func resolveClient(c *cli.Context) (*client.Client, error) { - serverName := c.GlobalString("tlsservername") - if serverName == "" { - // guess servername as hostname of target address - uri, err := url.Parse(c.GlobalString("addr")) - if err != nil { - return nil, err - } - serverName = uri.Hostname() - } - caCert := c.GlobalString("tlscacert") - cert := c.GlobalString("tlscert") - key := c.GlobalString("tlskey") - - opts := []client.ClientOpt{client.WithFailFast()} - - ctx := commandContext(c) - - if span := opentracing.SpanFromContext(ctx); span != nil { - opts = append(opts, client.WithTracer(span.Tracer())) - } - - if caCert != "" || cert != "" || key != "" { - opts = append(opts, client.WithCredentials(serverName, caCert, cert, key)) - } - - timeout := time.Duration(c.GlobalInt("timeout")) - ctx, cancel := context.WithTimeout(ctx, timeout*time.Second) - defer cancel() - - return client.New(ctx, c.GlobalString("addr"), opts...) -} diff --git a/vendor/github.com/moby/buildkit/cmd/buildctl/prune.go b/vendor/github.com/moby/buildkit/cmd/buildctl/prune.go deleted file mode 100644 index 4c43cc4157f9..000000000000 --- a/vendor/github.com/moby/buildkit/cmd/buildctl/prune.go +++ /dev/null @@ -1,92 +0,0 @@ -package main - -import ( - "fmt" - "os" - "text/tabwriter" - - "github.com/moby/buildkit/client" - "github.com/tonistiigi/units" - "github.com/urfave/cli" -) - -var pruneCommand = cli.Command{ - Name: "prune", - Usage: "clean up build cache", - Action: prune, - Flags: []cli.Flag{ - cli.DurationFlag{ - Name: "keep-duration", - Usage: "Keep data newer than this limit", - }, - cli.Float64Flag{ - Name: "keep-storage", - Usage: "Keep data below this limit (in MB)", - }, - cli.StringSliceFlag{ - Name: "filter, f", - Usage: "Filter records", - }, - cli.BoolFlag{ - Name: "all", - Usage: "Include internal/frontend references", - }, - cli.BoolFlag{ - Name: "verbose, v", - Usage: "Verbose output", - }, - }, -} - -func prune(clicontext *cli.Context) error { - c, err := resolveClient(clicontext) - if err != nil { - return err - } - - ch := make(chan client.UsageInfo) - printed := make(chan struct{}) - - tw := tabwriter.NewWriter(os.Stdout, 1, 8, 1, '\t', 0) - first := true - total := int64(0) - - go func() { - defer close(printed) - for du := range ch { - total += du.Size - if clicontext.Bool("verbose") { - printVerbose(tw, []*client.UsageInfo{&du}) - } else { - if first { - printTableHeader(tw) - first = false - } - printTableRow(tw, &du) - tw.Flush() - } - } - }() - - opts := []client.PruneOption{ - client.WithFilter(clicontext.StringSlice("filter")), - client.WithKeepOpt(clicontext.Duration("keep-duration"), int64(clicontext.Float64("keep-storage")*1e6)), - } - - if clicontext.Bool("all") { - opts = append(opts, client.PruneAll) - } - - err = c.Prune(commandContext(clicontext), ch, opts...) - close(ch) - <-printed - if err != nil { - return err - } - - tw = tabwriter.NewWriter(os.Stdout, 1, 8, 1, '\t', 0) - fmt.Fprintf(tw, "Total:\t%.2f\n", units.Bytes(total)) - tw.Flush() - - return nil -} diff --git a/vendor/github.com/moby/buildkit/cmd/buildctl/prune_test.go b/vendor/github.com/moby/buildkit/cmd/buildctl/prune_test.go deleted file mode 100644 index 647650670181..000000000000 --- a/vendor/github.com/moby/buildkit/cmd/buildctl/prune_test.go +++ /dev/null @@ -1,14 +0,0 @@ -package main - -import ( - "testing" - - "github.com/moby/buildkit/util/testutil/integration" - "github.com/stretchr/testify/assert" -) - -func testPrune(t *testing.T, sb integration.Sandbox) { - cmd := sb.Cmd("prune") - err := cmd.Run() - assert.NoError(t, err) -} diff --git a/vendor/github.com/moby/buildkit/cmd/buildctl/trace.go b/vendor/github.com/moby/buildkit/cmd/buildctl/trace.go deleted file mode 100644 index bb7040e5b75f..000000000000 --- a/vendor/github.com/moby/buildkit/cmd/buildctl/trace.go +++ /dev/null @@ -1,94 +0,0 @@ -package main - -import ( - "context" - "io" - "os" - "strings" - - "github.com/moby/buildkit/util/appcontext" - opentracing "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/ext" - "github.com/opentracing/opentracing-go/log" - jaeger "github.com/uber/jaeger-client-go" - "github.com/urfave/cli" -) - -func getTracer() (opentracing.Tracer, io.Closer) { - if traceAddr := os.Getenv("JAEGER_TRACE"); traceAddr != "" { - tr, err := jaeger.NewUDPTransport(traceAddr, 0) - if err != nil { - panic(err) - } - - // metricsFactory := prometheus.New() - return jaeger.NewTracer( - "buildctl", - jaeger.NewConstSampler(true), - jaeger.NewRemoteReporter(tr), - ) - } - - return opentracing.NoopTracer{}, &nopCloser{} -} - -func attachAppContext(app *cli.App) { - ctx := appcontext.Context() - - tracer, closer := getTracer() - - var span opentracing.Span - - for i, cmd := range app.Commands { - func(before cli.BeforeFunc) { - name := cmd.Name - app.Commands[i].Before = func(clicontext *cli.Context) error { - if before != nil { - if err := before(clicontext); err != nil { - return err - } - } - - span = tracer.StartSpan(name) - span.LogFields(log.String("command", strings.Join(os.Args, " "))) - - ctx = opentracing.ContextWithSpan(ctx, span) - - clicontext.App.Metadata["context"] = ctx - return nil - } - }(cmd.Before) - } - - app.ExitErrHandler = func(clicontext *cli.Context, err error) { - if span != nil { - ext.Error.Set(span, true) - } - cli.HandleExitCoder(err) - } - - after := app.After - app.After = func(clicontext *cli.Context) error { - if after != nil { - if err := after(clicontext); err != nil { - return err - } - } - if span != nil { - span.Finish() - } - return closer.Close() - } - -} - -func commandContext(c *cli.Context) context.Context { - return c.App.Metadata["context"].(context.Context) -} - -type nopCloser struct { -} - -func (*nopCloser) Close() error { - return nil -} diff --git a/vendor/github.com/moby/buildkit/cmd/buildkitd/config/config.go b/vendor/github.com/moby/buildkit/cmd/buildkitd/config/config.go deleted file mode 100644 index 758fa62b5c62..000000000000 --- a/vendor/github.com/moby/buildkit/cmd/buildkitd/config/config.go +++ /dev/null @@ -1,95 +0,0 @@ -package config - -import ( - "io" - "os" - - "github.com/BurntSushi/toml" - "github.com/pkg/errors" -) - -// Config provides containerd configuration data for the server -type Config struct { - Debug bool `toml:"debug"` - - // Root is the path to a directory where buildkit will store persistent data - Root string `toml:"root"` - - // GRPC configuration settings - GRPC GRPCConfig `toml:"grpc"` - - Workers struct { - OCI OCIConfig `toml:"oci"` - Containerd ContainerdConfig `toml:"containerd"` - } `toml:"worker"` - - Registries map[string]RegistryConfig `toml:"registry"` -} - -type GRPCConfig struct { - Address []string `toml:"address"` - DebugAddress string `toml:"debugAddress"` - UID int `toml:"uid"` - GID int `toml:"gid"` - - TLS TLSConfig `toml:"tls"` - // MaxRecvMsgSize int `toml:"max_recv_message_size"` - // MaxSendMsgSize int `toml:"max_send_message_size"` -} - -type RegistryConfig struct { - Mirrors []string `toml:"mirrors"` - PlainHTTP bool `toml:"http"` -} - -type TLSConfig struct { - Cert string `toml:"cert"` - Key string `toml:"key"` - CA string `toml:"ca"` -} - -type OCIConfig struct { - Enabled *bool `toml:"enabled"` - Labels map[string]string `toml:"labels"` - Platforms []string `toml:"platforms"` - Snapshotter string `toml:"snapshotter"` - Rootless bool `toml:"rootless"` - GCPolicy []GCPolicy `toml:"gcpolicy"` -} - -type ContainerdConfig struct { - Address string `toml:"address"` - Enabled *bool `toml:"enabled"` - Labels map[string]string `toml:"labels"` - Platforms []string `toml:"platforms"` - GCPolicy []GCPolicy `toml:"gcpolicy"` - Namespace string `toml:"namespace"` -} - -type GCPolicy struct { - All bool `toml:"all"` - KeepBytes int64 `toml:"keepBytes"` - KeepDuration int64 `toml:"keepDuration"` - Filters []string `toml:"filters"` -} - -func Load(r io.Reader) (Config, *toml.MetaData, error) { - var c Config - md, err := toml.DecodeReader(r, &c) - if err != nil { - return c, nil, errors.Wrap(err, "failed to parse config") - } - return c, &md, nil -} - -func LoadFile(fp string) (Config, *toml.MetaData, error) { - f, err := os.Open(fp) - if err != nil { - if os.IsNotExist(err) { - return Config{}, nil, nil - } - return Config{}, nil, errors.Wrapf(err, "failed to load config from %s", fp) - } - defer f.Close() - return Load(f) -} diff --git a/vendor/github.com/moby/buildkit/cmd/buildkitd/config/config_test.go b/vendor/github.com/moby/buildkit/cmd/buildkitd/config/config_test.go deleted file mode 100644 index 2fe9365ec369..000000000000 --- a/vendor/github.com/moby/buildkit/cmd/buildkitd/config/config_test.go +++ /dev/null @@ -1,90 +0,0 @@ -package config - -import ( - "bytes" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestConfig(t *testing.T) { - - const testConfig = ` -root = "/foo/bar" -debug=true - -[grpc] -address=["buildkit.sock"] -debugAddress="debug.sock" -gid=1234 -[grpc.tls] -cert="mycert.pem" - -[worker.oci] -enabled=true -snapshotter="overlay" -rootless=true -[worker.oci.labels] -foo="bar" -"aa.bb.cc"="baz" - -[worker.containerd] -namespace="non-default" -platforms=["linux/amd64"] -address="containerd.sock" -[[worker.containerd.gcpolicy]] -all=true -filters=["foo==bar"] -keepBytes=20 -keepDuration=3600 -[[worker.containerd.gcpolicy]] -keepBytes=40 -keepDuration=7200 - -[registry."docker.io"] -mirrors=["hub.docker.io"] -http=true -` - - cfg, md, err := Load(bytes.NewBuffer([]byte(testConfig))) - require.NoError(t, err) - - require.Equal(t, "/foo/bar", cfg.Root) - require.Equal(t, true, cfg.Debug) - - require.Equal(t, "buildkit.sock", cfg.GRPC.Address[0]) - require.Equal(t, "debug.sock", cfg.GRPC.DebugAddress) - require.Equal(t, 1234, cfg.GRPC.GID) - require.Equal(t, "mycert.pem", cfg.GRPC.TLS.Cert) - - require.True(t, md.IsDefined("grpc", "gid")) - require.False(t, md.IsDefined("grpc", "uid")) - - require.NotNil(t, cfg.Workers.OCI.Enabled) - require.Equal(t, true, *cfg.Workers.OCI.Enabled) - require.Equal(t, "overlay", cfg.Workers.OCI.Snapshotter) - require.Equal(t, true, cfg.Workers.OCI.Rootless) - - require.Equal(t, "bar", cfg.Workers.OCI.Labels["foo"]) - require.Equal(t, "baz", cfg.Workers.OCI.Labels["aa.bb.cc"]) - - require.Nil(t, cfg.Workers.Containerd.Enabled) - require.Equal(t, 1, len(cfg.Workers.Containerd.Platforms)) - require.Equal(t, "containerd.sock", cfg.Workers.Containerd.Address) - - require.Equal(t, 0, len(cfg.Workers.OCI.GCPolicy)) - require.Equal(t, "non-default", cfg.Workers.Containerd.Namespace) - require.Equal(t, 2, len(cfg.Workers.Containerd.GCPolicy)) - - require.Equal(t, true, cfg.Workers.Containerd.GCPolicy[0].All) - require.Equal(t, false, cfg.Workers.Containerd.GCPolicy[1].All) - require.Equal(t, int64(20), cfg.Workers.Containerd.GCPolicy[0].KeepBytes) - require.Equal(t, int64(40), cfg.Workers.Containerd.GCPolicy[1].KeepBytes) - require.Equal(t, int64(3600), cfg.Workers.Containerd.GCPolicy[0].KeepDuration) - require.Equal(t, int64(7200), cfg.Workers.Containerd.GCPolicy[1].KeepDuration) - require.Equal(t, 1, len(cfg.Workers.Containerd.GCPolicy[0].Filters)) - require.Equal(t, 0, len(cfg.Workers.Containerd.GCPolicy[1].Filters)) - - require.Equal(t, cfg.Registries["docker.io"].PlainHTTP, true) - require.Equal(t, cfg.Registries["docker.io"].Mirrors[0], "hub.docker.io") -} diff --git a/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy.go b/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy.go deleted file mode 100644 index 09b121fc7417..000000000000 --- a/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy.go +++ /dev/null @@ -1,29 +0,0 @@ -package config - -const defaultCap int64 = 2e9 // 2GB - -func DefaultGCPolicy(p string) []GCPolicy { - keep := detectDefaultGCCap(p) - return []GCPolicy{ - // if build cache uses more than 512MB delete the most easily reproducible data after it has not been used for 2 days - { - Filters: []string{"type==source.local,type==exec.cachemount,type==source.git.checkout"}, - KeepDuration: 48 * 3600, // 48h - KeepBytes: 512 * 1e6, // 512MB - }, - // remove any data not used for 60 days - { - KeepDuration: 60 * 24 * 3600, // 60d - KeepBytes: keep, - }, - // keep the unshared build cache under cap - { - KeepBytes: keep, - }, - // if previous policies were insufficient start deleting internal data to keep build cache under cap - { - All: true, - KeepBytes: keep, - }, - } -} diff --git a/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_unix.go b/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_unix.go deleted file mode 100644 index 8ae71825ab15..000000000000 --- a/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_unix.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build !windows - -package config - -import ( - "syscall" -) - -func detectDefaultGCCap(root string) int64 { - var st syscall.Statfs_t - if err := syscall.Statfs(root, &st); err != nil { - return defaultCap - } - diskSize := int64(st.Bsize) * int64(st.Blocks) - avail := diskSize / 10 - return (avail/(1<<30) + 1) * 1e9 // round up -} diff --git a/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_windows.go b/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_windows.go deleted file mode 100644 index 2f6ca0dc2bcc..000000000000 --- a/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build windows - -package config - -func detectDefaultGCCap(root string) int64 { - return defaultCap -} diff --git a/vendor/github.com/moby/buildkit/cmd/buildkitd/debug.go b/vendor/github.com/moby/buildkit/cmd/buildkitd/debug.go deleted file mode 100644 index 908a8ab2845e..000000000000 --- a/vendor/github.com/moby/buildkit/cmd/buildkitd/debug.go +++ /dev/null @@ -1,37 +0,0 @@ -package main - -import ( - "expvar" - "net" - "net/http" - "net/http/pprof" - - "github.com/sirupsen/logrus" - "golang.org/x/net/trace" -) - -func setupDebugHandlers(addr string) error { - m := http.NewServeMux() - m.Handle("/debug/vars", expvar.Handler()) - m.Handle("/debug/pprof/", http.HandlerFunc(pprof.Index)) - m.Handle("/debug/pprof/cmdline", http.HandlerFunc(pprof.Cmdline)) - m.Handle("/debug/pprof/profile", http.HandlerFunc(pprof.Profile)) - m.Handle("/debug/pprof/symbol", http.HandlerFunc(pprof.Symbol)) - m.Handle("/debug/pprof/trace", http.HandlerFunc(pprof.Trace)) - // TODO: reenable after golang.org/x/net update - // m.Handle("/debug/requests", http.HandlerFunc(trace.Traces)) - // m.Handle("/debug/events", http.HandlerFunc(trace.Events)) - - // setting debugaddr is opt-in. permission is defined by listener address - trace.AuthRequest = func(_ *http.Request) (bool, bool) { - return true, true - } - - l, err := net.Listen("tcp", addr) - if err != nil { - return err - } - logrus.Debugf("debug handlers listening at %s", addr) - go http.Serve(l, m) - return nil -} diff --git a/vendor/github.com/moby/buildkit/cmd/buildkitd/main.go b/vendor/github.com/moby/buildkit/cmd/buildkitd/main.go deleted file mode 100644 index d714a2a554d3..000000000000 --- a/vendor/github.com/moby/buildkit/cmd/buildkitd/main.go +++ /dev/null @@ -1,607 +0,0 @@ -package main - -import ( - "context" - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "net" - "os" - "os/user" - "path/filepath" - "sort" - "strconv" - "strings" - "time" - - "github.com/BurntSushi/toml" - "github.com/containerd/containerd/pkg/seed" - "github.com/containerd/containerd/platforms" - "github.com/containerd/containerd/sys" - "github.com/docker/go-connections/sockets" - "github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc" - registryremotecache "github.com/moby/buildkit/cache/remotecache/registry" - "github.com/moby/buildkit/client" - "github.com/moby/buildkit/cmd/buildkitd/config" - "github.com/moby/buildkit/control" - "github.com/moby/buildkit/frontend" - dockerfile "github.com/moby/buildkit/frontend/dockerfile/builder" - "github.com/moby/buildkit/frontend/gateway" - "github.com/moby/buildkit/frontend/gateway/forwarder" - "github.com/moby/buildkit/session" - "github.com/moby/buildkit/solver/bboltcachestorage" - "github.com/moby/buildkit/util/apicaps" - "github.com/moby/buildkit/util/appcontext" - "github.com/moby/buildkit/util/appdefaults" - "github.com/moby/buildkit/util/profiler" - "github.com/moby/buildkit/util/resolver" - "github.com/moby/buildkit/version" - "github.com/moby/buildkit/worker" - specs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/opencontainers/runc/libcontainer/system" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "github.com/urfave/cli" - "golang.org/x/sync/errgroup" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" -) - -func init() { - apicaps.ExportedProduct = "buildkit" - seed.WithTimeAndRand() -} - -type workerInitializerOpt struct { - sessionManager *session.Manager - config *config.Config -} - -type workerInitializer struct { - fn func(c *cli.Context, common workerInitializerOpt) ([]worker.Worker, error) - // less priority number, more preferred - priority int -} - -var ( - appFlags []cli.Flag - workerInitializers []workerInitializer -) - -func registerWorkerInitializer(wi workerInitializer, flags ...cli.Flag) { - workerInitializers = append(workerInitializers, wi) - sort.Slice(workerInitializers, - func(i, j int) bool { - return workerInitializers[i].priority < workerInitializers[j].priority - }) - appFlags = append(appFlags, flags...) -} - -func main() { - cli.VersionPrinter = func(c *cli.Context) { - fmt.Println(c.App.Name, version.Package, c.App.Version, version.Revision) - } - app := cli.NewApp() - app.Name = "buildkitd" - app.Usage = "build daemon" - app.Version = version.Version - - defaultConf, md := defaultConf() - - rootlessUsage := "set all the default options to be compatible with rootless containers" - if system.RunningInUserNS() { - app.Flags = append(app.Flags, cli.BoolTFlag{ - Name: "rootless", - Usage: rootlessUsage + " (default: true)", - }) - } else { - app.Flags = append(app.Flags, cli.BoolFlag{ - Name: "rootless", - Usage: rootlessUsage, - }) - } - - groupValue := func(gid int) string { - if md == nil || !md.IsDefined("grpc", "gid") { - return "" - } - return strconv.Itoa(gid) - } - - app.Flags = append(app.Flags, - cli.StringFlag{ - Name: "config", - Usage: "path to config file", - Value: defaultConfigPath(), - }, - cli.BoolFlag{ - Name: "debug", - Usage: "enable debug output in logs", - }, - cli.StringFlag{ - Name: "root", - Usage: "path to state directory", - Value: defaultConf.Root, - }, - cli.StringSliceFlag{ - Name: "addr", - Usage: "listening address (socket or tcp)", - Value: &cli.StringSlice{defaultConf.GRPC.Address[0]}, - }, - cli.StringFlag{ - Name: "group", - Usage: "group (name or gid) which will own all Unix socket listening addresses", - Value: groupValue(defaultConf.GRPC.GID), - }, - cli.StringFlag{ - Name: "debugaddr", - Usage: "debugging address (eg. 0.0.0.0:6060)", - Value: defaultConf.GRPC.DebugAddress, - }, - cli.StringFlag{ - Name: "tlscert", - Usage: "certificate file to use", - Value: defaultConf.GRPC.TLS.Cert, - }, - cli.StringFlag{ - Name: "tlskey", - Usage: "key file to use", - Value: defaultConf.GRPC.TLS.Key, - }, - cli.StringFlag{ - Name: "tlscacert", - Usage: "ca certificate to verify clients", - Value: defaultConf.GRPC.TLS.CA, - }, - ) - app.Flags = append(app.Flags, appFlags...) - - app.Action = func(c *cli.Context) error { - if os.Geteuid() != 0 { - return errors.New("rootless mode requires to be executed as the mapped root in a user namespace; you may use RootlessKit for setting up the namespace") - } - ctx, cancel := context.WithCancel(appcontext.Context()) - defer cancel() - - cfg, md, err := config.LoadFile(c.GlobalString("config")) - if err != nil { - return err - } - - setDefaultConfig(&cfg) - if err := applyMainFlags(c, &cfg, md); err != nil { - return err - } - - if cfg.Debug { - logrus.SetLevel(logrus.DebugLevel) - } - - if cfg.GRPC.DebugAddress != "" { - if err := setupDebugHandlers(cfg.GRPC.DebugAddress); err != nil { - return err - } - } - opts := []grpc.ServerOption{unaryInterceptor(ctx), grpc.StreamInterceptor(otgrpc.OpenTracingStreamServerInterceptor(tracer))} - creds, err := serverCredentials(cfg.GRPC.TLS) - if err != nil { - return err - } - if creds != nil { - opts = append(opts, creds) - } - server := grpc.NewServer(opts...) - - // relative path does not work with nightlyone/lockfile - root, err := filepath.Abs(cfg.Root) - if err != nil { - return err - } - cfg.Root = root - - if err := os.MkdirAll(root, 0700); err != nil { - return errors.Wrapf(err, "failed to create %s", root) - } - - controller, err := newController(c, &cfg) - if err != nil { - return err - } - - controller.Register(server) - - errCh := make(chan error, 1) - if err := serveGRPC(cfg.GRPC, server, errCh); err != nil { - return err - } - - select { - case serverErr := <-errCh: - err = serverErr - cancel() - case <-ctx.Done(): - err = ctx.Err() - } - - logrus.Infof("stopping server") - server.GracefulStop() - - return err - } - - app.After = func(context *cli.Context) error { - if closeTracer != nil { - return closeTracer.Close() - } - return nil - } - - profiler.Attach(app) - - if err := app.Run(os.Args); err != nil { - fmt.Fprintf(os.Stderr, "buildkitd: %s\n", err) - os.Exit(1) - } -} - -func serveGRPC(cfg config.GRPCConfig, server *grpc.Server, errCh chan error) error { - addrs := cfg.Address - if len(addrs) == 0 { - return errors.New("--addr cannot be empty") - } - eg, _ := errgroup.WithContext(context.Background()) - listeners := make([]net.Listener, 0, len(addrs)) - for _, addr := range addrs { - l, err := getListener(cfg, addr) - if err != nil { - for _, l := range listeners { - l.Close() - } - return err - } - listeners = append(listeners, l) - } - for _, l := range listeners { - func(l net.Listener) { - eg.Go(func() error { - defer l.Close() - logrus.Infof("running server on %s", l.Addr()) - return server.Serve(l) - }) - }(l) - } - go func() { - errCh <- eg.Wait() - }() - return nil -} - -func defaultConfigPath() string { - if system.RunningInUserNS() { - return filepath.Join(appdefaults.UserConfigDir(), "buildkitd.toml") - } - return filepath.Join(appdefaults.ConfigDir, "buildkitd.toml") -} - -func defaultConf() (config.Config, *toml.MetaData) { - cfg, md, err := config.LoadFile(defaultConfigPath()) - if err != nil { - return cfg, nil - } - setDefaultConfig(&cfg) - - return cfg, md -} - -func setDefaultConfig(cfg *config.Config) { - orig := *cfg - - if cfg.Root == "" { - cfg.Root = appdefaults.Root - } - - if len(cfg.GRPC.Address) == 0 { - cfg.GRPC.Address = []string{appdefaults.Address} - } - - if system.RunningInUserNS() { - // if buildkitd is being executed as the mapped-root (not only EUID==0 but also $USER==root) - // in a user namespace, we need to enable the rootless mode but - // we don't want to honor $HOME for setting up default paths. - if u := os.Getenv("USER"); u != "" && u != "root" { - if orig.Root == "" { - cfg.Root = appdefaults.UserRoot() - } - if len(orig.GRPC.Address) == 0 { - cfg.GRPC.Address = []string{appdefaults.UserAddress()} - } - appdefaults.EnsureUserAddressDir() - } - } -} - -func applyMainFlags(c *cli.Context, cfg *config.Config, md *toml.MetaData) error { - if c.IsSet("debug") { - cfg.Debug = c.Bool("debug") - } - if c.IsSet("root") { - cfg.Root = c.String("root") - } - - if c.IsSet("addr") || len(cfg.GRPC.Address) == 0 { - addrs := c.StringSlice("addr") - if len(addrs) > 1 { - addrs = addrs[1:] // https://github.com/urfave/cli/issues/160 - } - - cfg.GRPC.Address = make([]string, 0, len(addrs)) - for _, v := range addrs { - cfg.GRPC.Address = append(cfg.GRPC.Address, v) - } - } - - if c.IsSet("debugaddr") { - cfg.GRPC.DebugAddress = c.String("debugaddr") - } - - if md == nil || !md.IsDefined("grpc", "uid") { - cfg.GRPC.UID = os.Getuid() - } - - if md == nil || !md.IsDefined("grpc", "gid") { - cfg.GRPC.GID = os.Getgid() - } - - if group := c.String("group"); group != "" { - gid, err := groupToGid(group) - if err != nil { - return err - } - cfg.GRPC.GID = gid - } - - if tlscert := c.String("tlscert"); tlscert != "" { - cfg.GRPC.TLS.Cert = tlscert - } - if tlskey := c.String("tlskey"); tlskey != "" { - cfg.GRPC.TLS.Key = tlskey - } - if tlsca := c.String("tlsca"); tlsca != "" { - cfg.GRPC.TLS.CA = tlsca - } - return nil -} - -// Convert a string containing either a group name or a stringified gid into a numeric id) -func groupToGid(group string) (int, error) { - if group == "" { - return os.Getgid(), nil - } - - var ( - err error - id int - ) - - // Try and parse as a number, if the error is ErrSyntax - // (i.e. its not a number) then we carry on and try it as a - // name. - if id, err = strconv.Atoi(group); err == nil { - return id, nil - } else if err.(*strconv.NumError).Err != strconv.ErrSyntax { - return 0, err - } - - ginfo, err := user.LookupGroup(group) - if err != nil { - return 0, err - } - group = ginfo.Gid - - if id, err = strconv.Atoi(group); err != nil { - return 0, err - } - - return id, nil -} - -func getListener(cfg config.GRPCConfig, addr string) (net.Listener, error) { - addrSlice := strings.SplitN(addr, "://", 2) - if len(addrSlice) < 2 { - return nil, errors.Errorf("address %s does not contain proto, you meant unix://%s ?", - addr, addr) - } - proto := addrSlice[0] - listenAddr := addrSlice[1] - switch proto { - case "unix", "npipe": - return sys.GetLocalListener(listenAddr, cfg.UID, cfg.GID) - case "tcp": - return sockets.NewTCPSocket(listenAddr, nil) - default: - return nil, errors.Errorf("addr %s not supported", addr) - } -} - -func unaryInterceptor(globalCtx context.Context) grpc.ServerOption { - withTrace := otgrpc.OpenTracingServerInterceptor(tracer, otgrpc.LogPayloads()) - - return grpc.UnaryInterceptor(func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - go func() { - select { - case <-ctx.Done(): - case <-globalCtx.Done(): - cancel() - } - }() - - resp, err = withTrace(ctx, req, info, handler) - if err != nil { - logrus.Errorf("%s returned error: %+v", info.FullMethod, err) - } - return - }) -} - -func serverCredentials(cfg config.TLSConfig) (grpc.ServerOption, error) { - certFile := cfg.Cert - keyFile := cfg.Key - caFile := cfg.CA - if certFile == "" && keyFile == "" { - return nil, nil - } - err := errors.New("you must specify key and cert file if one is specified") - if certFile == "" { - return nil, err - } - if keyFile == "" { - return nil, err - } - certificate, err := tls.LoadX509KeyPair(certFile, keyFile) - if err != nil { - return nil, errors.Wrap(err, "could not load server key pair") - } - tlsConf := &tls.Config{ - Certificates: []tls.Certificate{certificate}, - } - if caFile != "" { - certPool := x509.NewCertPool() - ca, err := ioutil.ReadFile(caFile) - if err != nil { - return nil, errors.Wrap(err, "could not read ca certificate") - } - // Append the client certificates from the CA - if ok := certPool.AppendCertsFromPEM(ca); !ok { - return nil, errors.New("failed to append ca cert") - } - tlsConf.ClientAuth = tls.RequireAndVerifyClientCert - tlsConf.ClientCAs = certPool - } - creds := grpc.Creds(credentials.NewTLS(tlsConf)) - return creds, nil -} - -func newController(c *cli.Context, cfg *config.Config) (*control.Controller, error) { - sessionManager, err := session.NewManager() - if err != nil { - return nil, err - } - wc, err := newWorkerController(c, workerInitializerOpt{ - sessionManager: sessionManager, - config: cfg, - }) - if err != nil { - return nil, err - } - frontends := map[string]frontend.Frontend{} - frontends["dockerfile.v0"] = forwarder.NewGatewayForwarder(wc, dockerfile.Build) - frontends["gateway.v0"] = gateway.NewGatewayFrontend(wc) - - cacheStorage, err := bboltcachestorage.NewStore(filepath.Join(cfg.Root, "cache.db")) - if err != nil { - return nil, err - } - - resolverFn := resolverFunc(cfg) - - return control.NewController(control.Opt{ - SessionManager: sessionManager, - WorkerController: wc, - Frontends: frontends, - // TODO: support non-registry remote cache - ResolveCacheExporterFunc: registryremotecache.ResolveCacheExporterFunc(sessionManager, resolverFn), - ResolveCacheImporterFunc: registryremotecache.ResolveCacheImporterFunc(sessionManager, resolverFn), - CacheKeyStorage: cacheStorage, - }) -} - -func resolverFunc(cfg *config.Config) resolver.ResolveOptionsFunc { - m := map[string]resolver.RegistryConf{} - for k, v := range cfg.Registries { - m[k] = resolver.RegistryConf{ - Mirrors: v.Mirrors, - PlainHTTP: v.PlainHTTP, - } - } - return resolver.NewResolveOptionsFunc(m) -} - -func newWorkerController(c *cli.Context, wiOpt workerInitializerOpt) (*worker.Controller, error) { - wc := &worker.Controller{} - nWorkers := 0 - for _, wi := range workerInitializers { - ws, err := wi.fn(c, wiOpt) - if err != nil { - return nil, err - } - for _, w := range ws { - logrus.Infof("found worker %q, labels=%v, platforms=%v", w.ID(), w.Labels(), formatPlatforms(w.Platforms())) - if err = wc.Add(w); err != nil { - return nil, err - } - nWorkers++ - } - } - if nWorkers == 0 { - return nil, errors.New("no worker found, rebuild the buildkit daemon?") - } - defaultWorker, err := wc.GetDefault() - if err != nil { - return nil, err - } - logrus.Infof("found %d workers, default=%q", nWorkers, defaultWorker.ID()) - logrus.Warn("currently, only the default worker can be used.") - return wc, nil -} - -func attrMap(sl []string) (map[string]string, error) { - m := map[string]string{} - for _, v := range sl { - parts := strings.SplitN(v, "=", 2) - if len(parts) != 2 { - return nil, errors.Errorf("invalid value %s", v) - } - m[parts[0]] = parts[1] - } - return m, nil -} - -func formatPlatforms(p []specs.Platform) []string { - str := make([]string, 0, len(p)) - for _, pp := range p { - str = append(str, platforms.Format(platforms.Normalize(pp))) - } - return str -} - -func parsePlatforms(platformsStr []string) ([]specs.Platform, error) { - out := make([]specs.Platform, 0, len(platformsStr)) - for _, s := range platformsStr { - p, err := platforms.Parse(s) - if err != nil { - return nil, err - } - out = append(out, platforms.Normalize(p)) - } - return out, nil -} - -func getGCPolicy(rules []config.GCPolicy, root string) []client.PruneInfo { - if len(rules) == 0 { - rules = config.DefaultGCPolicy(root) - } - out := make([]client.PruneInfo, 0, len(rules)) - for _, rule := range rules { - out = append(out, client.PruneInfo{ - Filter: rule.Filters, - All: rule.All, - KeepBytes: rule.KeepBytes, - KeepDuration: time.Duration(rule.KeepDuration) * time.Second, - }) - } - return out -} diff --git a/vendor/github.com/moby/buildkit/cmd/buildkitd/main_containerd_worker.go b/vendor/github.com/moby/buildkit/cmd/buildkitd/main_containerd_worker.go deleted file mode 100644 index b72246cc9644..000000000000 --- a/vendor/github.com/moby/buildkit/cmd/buildkitd/main_containerd_worker.go +++ /dev/null @@ -1,174 +0,0 @@ -// +build linux,!no_containerd_worker - -package main - -import ( - "os" - "strconv" - "strings" - "time" - - ctd "github.com/containerd/containerd" - "github.com/moby/buildkit/cmd/buildkitd/config" - "github.com/moby/buildkit/worker" - "github.com/moby/buildkit/worker/base" - "github.com/moby/buildkit/worker/containerd" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "github.com/urfave/cli" -) - -const ( - defaultContainerdAddress = "/run/containerd/containerd.sock" - defaultContainerdNamespace = "buildkit" -) - -func init() { - defaultConf, _ := defaultConf() - - enabledValue := func(b *bool) string { - if b == nil { - return "auto" - } - return strconv.FormatBool(*b) - } - - if defaultConf.Workers.Containerd.Address == "" { - defaultConf.Workers.Containerd.Address = defaultContainerdAddress - } - - if defaultConf.Workers.Containerd.Namespace == "" { - defaultConf.Workers.Containerd.Namespace = defaultContainerdNamespace - } - - registerWorkerInitializer( - workerInitializer{ - fn: containerdWorkerInitializer, - // 1 is less preferred than 0 (runcCtor) - priority: 1, - }, - cli.StringFlag{ - Name: "containerd-worker", - Usage: "enable containerd workers (true/false/auto)", - Value: enabledValue(defaultConf.Workers.Containerd.Enabled), - }, - cli.StringFlag{ - Name: "containerd-worker-addr", - Usage: "containerd socket", - Value: defaultConf.Workers.Containerd.Address, - }, - cli.StringSliceFlag{ - Name: "containerd-worker-labels", - Usage: "user-specific annotation labels (com.example.foo=bar)", - }, - // TODO: containerd-worker-platform should be replaced by ability - // to set these from containerd configuration - cli.StringSliceFlag{ - Name: "containerd-worker-platform", - Usage: "override supported platforms for worker", - Hidden: true, - }, - cli.StringFlag{ - Name: "containerd-worker-namespace", - Usage: "override containerd namespace", - Value: defaultConf.Workers.Containerd.Namespace, - Hidden: true, - }, - ) - // TODO(AkihiroSuda): allow using multiple snapshotters. should be useful for some applications that does not work with the default overlay snapshotter. e.g. mysql (docker/for-linux#72)", -} - -func applyContainerdFlags(c *cli.Context, cfg *config.Config) error { - if cfg.Workers.Containerd.Address == "" { - cfg.Workers.Containerd.Address = defaultContainerdAddress - } - - if c.GlobalIsSet("containerd-worker") { - boolOrAuto, err := parseBoolOrAuto(c.GlobalString("containerd-worker")) - if err != nil { - return err - } - cfg.Workers.Containerd.Enabled = boolOrAuto - } - - // GlobalBool works for BoolT as well - rootless := c.GlobalBool("rootless") - if rootless { - logrus.Warn("rootless mode is not supported for containerd workers. disabling containerd worker.") - b := false - cfg.Workers.Containerd.Enabled = &b - return nil - } - - labels, err := attrMap(c.GlobalStringSlice("containerd-worker-labels")) - if err != nil { - return err - } - if cfg.Workers.Containerd.Labels == nil { - cfg.Workers.Containerd.Labels = make(map[string]string) - } - for k, v := range labels { - cfg.Workers.Containerd.Labels[k] = v - } - if c.GlobalIsSet("containerd-worker-addr") { - cfg.Workers.Containerd.Address = c.GlobalString("containerd-worker-addr") - } - - if platforms := c.GlobalStringSlice("containerd-worker-platform"); len(platforms) != 0 { - cfg.Workers.Containerd.Platforms = platforms - } - - if c.GlobalIsSet("containerd-worker-namespace") || cfg.Workers.Containerd.Namespace == "" { - cfg.Workers.Containerd.Namespace = c.GlobalString("containerd-worker-namespace") - } - - return nil -} - -func containerdWorkerInitializer(c *cli.Context, common workerInitializerOpt) ([]worker.Worker, error) { - if err := applyContainerdFlags(c, common.config); err != nil { - return nil, err - } - - cfg := common.config.Workers.Containerd - - if (cfg.Enabled == nil && !validContainerdSocket(cfg.Address)) || (cfg.Enabled != nil && !*cfg.Enabled) { - return nil, nil - } - - opt, err := containerd.NewWorkerOpt(common.config.Root, cfg.Address, ctd.DefaultSnapshotter, cfg.Namespace, cfg.Labels, ctd.WithTimeout(60*time.Second)) - if err != nil { - return nil, err - } - opt.SessionManager = common.sessionManager - opt.GCPolicy = getGCPolicy(cfg.GCPolicy, common.config.Root) - opt.ResolveOptionsFunc = resolverFunc(common.config) - - if platformsStr := cfg.Platforms; len(platformsStr) != 0 { - platforms, err := parsePlatforms(platformsStr) - if err != nil { - return nil, errors.Wrap(err, "invalid platforms") - } - opt.Platforms = platforms - } - w, err := base.NewWorker(opt) - if err != nil { - return nil, err - } - return []worker.Worker{w}, nil -} - -func validContainerdSocket(socket string) bool { - if strings.HasPrefix(socket, "tcp://") { - // FIXME(AkihiroSuda): prohibit tcp? - return true - } - socketPath := strings.TrimPrefix(socket, "unix://") - if _, err := os.Stat(socketPath); os.IsNotExist(err) { - // FIXME(AkihiroSuda): add more conditions - logrus.Warnf("skipping containerd worker, as %q does not exist", socketPath) - return false - } - // TODO: actually dial and call introspection API - return true -} diff --git a/vendor/github.com/moby/buildkit/cmd/buildkitd/main_oci_worker.go b/vendor/github.com/moby/buildkit/cmd/buildkitd/main_oci_worker.go deleted file mode 100644 index 68cd1c2e8bac..000000000000 --- a/vendor/github.com/moby/buildkit/cmd/buildkitd/main_oci_worker.go +++ /dev/null @@ -1,195 +0,0 @@ -// +build linux,!no_oci_worker - -package main - -import ( - "os/exec" - "strconv" - - ctdsnapshot "github.com/containerd/containerd/snapshots" - "github.com/containerd/containerd/snapshots/native" - "github.com/containerd/containerd/snapshots/overlay" - "github.com/moby/buildkit/cmd/buildkitd/config" - "github.com/moby/buildkit/worker" - "github.com/moby/buildkit/worker/base" - "github.com/moby/buildkit/worker/runc" - "github.com/opencontainers/runc/libcontainer/system" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "github.com/urfave/cli" -) - -func init() { - defaultConf, _ := defaultConf() - - enabledValue := func(b *bool) string { - if b == nil { - return "auto" - } - return strconv.FormatBool(*b) - } - - if defaultConf.Workers.OCI.Snapshotter == "" { - defaultConf.Workers.OCI.Snapshotter = "auto" - } - - flags := []cli.Flag{ - cli.StringFlag{ - Name: "oci-worker", - Usage: "enable oci workers (true/false/auto)", - Value: enabledValue(defaultConf.Workers.OCI.Enabled), - }, - cli.StringSliceFlag{ - Name: "oci-worker-labels", - Usage: "user-specific annotation labels (com.example.foo=bar)", - }, - cli.StringFlag{ - Name: "oci-worker-snapshotter", - Usage: "name of snapshotter (overlayfs or native)", - Value: defaultConf.Workers.OCI.Snapshotter, - }, - cli.StringSliceFlag{ - Name: "oci-worker-platform", - Usage: "override supported platforms for worker", - }, - } - n := "oci-worker-rootless" - u := "enable rootless mode" - if system.RunningInUserNS() { - flags = append(flags, cli.BoolTFlag{ - Name: n, - Usage: u, - }) - } else { - flags = append(flags, cli.BoolFlag{ - Name: n, - Usage: u, - }) - } - registerWorkerInitializer( - workerInitializer{ - fn: ociWorkerInitializer, - priority: 0, - }, - flags..., - ) - // TODO: allow multiple oci runtimes -} - -func applyOCIFlags(c *cli.Context, cfg *config.Config) error { - if cfg.Workers.OCI.Snapshotter == "" { - cfg.Workers.OCI.Snapshotter = "auto" - } - - if c.GlobalIsSet("oci-worker") { - boolOrAuto, err := parseBoolOrAuto(c.GlobalString("oci-worker")) - if err != nil { - return err - } - cfg.Workers.OCI.Enabled = boolOrAuto - } - - labels, err := attrMap(c.GlobalStringSlice("oci-worker-labels")) - if err != nil { - return err - } - if cfg.Workers.OCI.Labels == nil { - cfg.Workers.OCI.Labels = make(map[string]string) - } - for k, v := range labels { - cfg.Workers.OCI.Labels[k] = v - } - if c.GlobalIsSet("oci-worker-snapshotter") { - cfg.Workers.OCI.Snapshotter = c.GlobalString("oci-worker-snapshotter") - } - - if c.GlobalIsSet("rootless") || c.GlobalBool("rootless") { - cfg.Workers.OCI.Rootless = c.GlobalBool("rootless") - } - if c.GlobalIsSet("oci-worker-rootless") { - cfg.Workers.OCI.Rootless = c.GlobalBool("oci-worker-rootless") - } - - if platforms := c.GlobalStringSlice("oci-worker-platform"); len(platforms) != 0 { - cfg.Workers.OCI.Platforms = platforms - } - - return nil -} - -func ociWorkerInitializer(c *cli.Context, common workerInitializerOpt) ([]worker.Worker, error) { - if err := applyOCIFlags(c, common.config); err != nil { - return nil, err - } - - cfg := common.config.Workers.OCI - - if (cfg.Enabled == nil && !validOCIBinary()) || (cfg.Enabled != nil && !*cfg.Enabled) { - return nil, nil - } - - snFactory, err := snapshotterFactory(common.config.Root, cfg.Snapshotter) - if err != nil { - return nil, err - } - - if cfg.Rootless { - logrus.Debugf("running in rootless mode") - } - opt, err := runc.NewWorkerOpt(common.config.Root, snFactory, cfg.Rootless, cfg.Labels) - if err != nil { - return nil, err - } - opt.SessionManager = common.sessionManager - opt.GCPolicy = getGCPolicy(cfg.GCPolicy, common.config.Root) - opt.ResolveOptionsFunc = resolverFunc(common.config) - - if platformsStr := cfg.Platforms; len(platformsStr) != 0 { - platforms, err := parsePlatforms(platformsStr) - if err != nil { - return nil, errors.Wrap(err, "invalid platforms") - } - opt.Platforms = platforms - } - w, err := base.NewWorker(opt) - if err != nil { - return nil, err - } - return []worker.Worker{w}, nil -} - -func snapshotterFactory(commonRoot, name string) (runc.SnapshotterFactory, error) { - if name == "auto" { - if err := overlay.Supported(commonRoot); err == nil { - logrus.Debug("auto snapshotter: using overlayfs") - name = "overlayfs" - } else { - logrus.Debugf("auto snapshotter: using native, because overlayfs is not available for %s: %v", commonRoot, err) - name = "native" - } - } - snFactory := runc.SnapshotterFactory{ - Name: name, - } - switch name { - case "native": - snFactory.New = native.NewSnapshotter - case "overlayfs": // not "overlay", for consistency with containerd snapshotter plugin ID. - snFactory.New = func(root string) (ctdsnapshot.Snapshotter, error) { - return overlay.NewSnapshotter(root) - } - default: - return snFactory, errors.Errorf("unknown snapshotter name: %q", name) - } - return snFactory, nil -} - -func validOCIBinary() bool { - _, err := exec.LookPath("runc") - _, err1 := exec.LookPath("buildkit-runc") - if err != nil && err1 != nil { - logrus.Warnf("skipping oci worker, as runc does not exist") - return false - } - return true -} diff --git a/vendor/github.com/moby/buildkit/cmd/buildkitd/trace.go b/vendor/github.com/moby/buildkit/cmd/buildkitd/trace.go deleted file mode 100644 index 3d6fed83a82e..000000000000 --- a/vendor/github.com/moby/buildkit/cmd/buildkitd/trace.go +++ /dev/null @@ -1,31 +0,0 @@ -package main - -import ( - "io" - "os" - - opentracing "github.com/opentracing/opentracing-go" - jaeger "github.com/uber/jaeger-client-go" -) - -var tracer opentracing.Tracer -var closeTracer io.Closer - -func init() { - - tracer = opentracing.NoopTracer{} - - if traceAddr := os.Getenv("JAEGER_TRACE"); traceAddr != "" { - tr, err := jaeger.NewUDPTransport(traceAddr, 0) - if err != nil { - panic(err) - } - - tracer, closeTracer = jaeger.NewTracer( - "buildkitd", - jaeger.NewConstSampler(true), - jaeger.NewRemoteReporter(tr), - ) - } - -} diff --git a/vendor/github.com/moby/buildkit/cmd/buildkitd/util.go b/vendor/github.com/moby/buildkit/cmd/buildkitd/util.go deleted file mode 100644 index 5d462bd23203..000000000000 --- a/vendor/github.com/moby/buildkit/cmd/buildkitd/util.go +++ /dev/null @@ -1,15 +0,0 @@ -package main - -import ( - "strconv" - "strings" -) - -// parseBoolOrAuto returns (nil, nil) if s is "auto" -func parseBoolOrAuto(s string) (*bool, error) { - if s == "" || strings.ToLower(s) == "auto" { - return nil, nil - } - b, err := strconv.ParseBool(s) - return &b, err -} diff --git a/vendor/github.com/moby/buildkit/control/control.go b/vendor/github.com/moby/buildkit/control/control.go deleted file mode 100644 index 96862ed680c1..000000000000 --- a/vendor/github.com/moby/buildkit/control/control.go +++ /dev/null @@ -1,409 +0,0 @@ -package control - -import ( - "context" - "sync" - "time" - - "github.com/docker/distribution/reference" - controlapi "github.com/moby/buildkit/api/services/control" - apitypes "github.com/moby/buildkit/api/types" - "github.com/moby/buildkit/cache/remotecache" - "github.com/moby/buildkit/client" - controlgateway "github.com/moby/buildkit/control/gateway" - "github.com/moby/buildkit/exporter" - "github.com/moby/buildkit/frontend" - "github.com/moby/buildkit/session" - "github.com/moby/buildkit/session/grpchijack" - "github.com/moby/buildkit/solver" - "github.com/moby/buildkit/solver/llbsolver" - "github.com/moby/buildkit/solver/pb" - "github.com/moby/buildkit/util/throttle" - "github.com/moby/buildkit/worker" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/sync/errgroup" - "google.golang.org/grpc" -) - -type ResolveCacheExporterFunc func(ctx context.Context, typ, target string) (remotecache.Exporter, error) - -type Opt struct { - SessionManager *session.Manager - WorkerController *worker.Controller - Frontends map[string]frontend.Frontend - CacheKeyStorage solver.CacheKeyStorage - ResolveCacheExporterFunc remotecache.ResolveCacheExporterFunc - ResolveCacheImporterFunc remotecache.ResolveCacheImporterFunc -} - -type Controller struct { // TODO: ControlService - opt Opt - solver *llbsolver.Solver - cache solver.CacheManager - gatewayForwarder *controlgateway.GatewayForwarder - throttledGC func() - gcmu sync.Mutex -} - -func NewController(opt Opt) (*Controller, error) { - cache := solver.NewCacheManager("local", opt.CacheKeyStorage, worker.NewCacheResultStorage(opt.WorkerController)) - - gatewayForwarder := controlgateway.NewGatewayForwarder() - - solver, err := llbsolver.New(opt.WorkerController, opt.Frontends, cache, opt.ResolveCacheImporterFunc, gatewayForwarder) - if err != nil { - return nil, errors.Wrap(err, "failed to create solver") - } - - c := &Controller{ - opt: opt, - solver: solver, - cache: cache, - gatewayForwarder: gatewayForwarder, - } - c.throttledGC = throttle.ThrottleAfter(time.Minute, c.gc) - - defer func() { - time.AfterFunc(time.Second, c.throttledGC) - }() - - return c, nil -} - -func (c *Controller) Register(server *grpc.Server) error { - controlapi.RegisterControlServer(server, c) - c.gatewayForwarder.Register(server) - return nil -} - -func (c *Controller) DiskUsage(ctx context.Context, r *controlapi.DiskUsageRequest) (*controlapi.DiskUsageResponse, error) { - resp := &controlapi.DiskUsageResponse{} - workers, err := c.opt.WorkerController.List() - if err != nil { - return nil, err - } - for _, w := range workers { - du, err := w.DiskUsage(ctx, client.DiskUsageInfo{ - Filter: r.Filter, - }) - if err != nil { - return nil, err - } - - for _, r := range du { - resp.Record = append(resp.Record, &controlapi.UsageRecord{ - // TODO: add worker info - ID: r.ID, - Mutable: r.Mutable, - InUse: r.InUse, - Size_: r.Size, - Parent: r.Parent, - UsageCount: int64(r.UsageCount), - Description: r.Description, - CreatedAt: r.CreatedAt, - LastUsedAt: r.LastUsedAt, - RecordType: string(r.RecordType), - Shared: r.Shared, - }) - } - } - return resp, nil -} - -func (c *Controller) Prune(req *controlapi.PruneRequest, stream controlapi.Control_PruneServer) error { - ch := make(chan client.UsageInfo) - - eg, ctx := errgroup.WithContext(stream.Context()) - workers, err := c.opt.WorkerController.List() - if err != nil { - return errors.Wrap(err, "failed to list workers for prune") - } - - didPrune := false - defer func() { - if didPrune { - if c, ok := c.cache.(interface { - ReleaseUnreferenced() error - }); ok { - if err := c.ReleaseUnreferenced(); err != nil { - logrus.Errorf("failed to release cache metadata: %+v", err) - } - } - } - }() - - for _, w := range workers { - func(w worker.Worker) { - eg.Go(func() error { - return w.Prune(ctx, ch, client.PruneInfo{ - Filter: req.Filter, - All: req.All, - KeepDuration: time.Duration(req.KeepDuration), - KeepBytes: req.KeepBytes, - }) - }) - }(w) - } - - eg2, _ := errgroup.WithContext(stream.Context()) - - eg2.Go(func() error { - defer close(ch) - return eg.Wait() - }) - - eg2.Go(func() error { - for r := range ch { - didPrune = true - if err := stream.Send(&controlapi.UsageRecord{ - // TODO: add worker info - ID: r.ID, - Mutable: r.Mutable, - InUse: r.InUse, - Size_: r.Size, - Parent: r.Parent, - UsageCount: int64(r.UsageCount), - Description: r.Description, - CreatedAt: r.CreatedAt, - LastUsedAt: r.LastUsedAt, - RecordType: string(r.RecordType), - Shared: r.Shared, - }); err != nil { - return err - } - } - return nil - }) - - return eg2.Wait() -} - -func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (*controlapi.SolveResponse, error) { - ctx = session.NewContext(ctx, req.Session) - - defer func() { - time.AfterFunc(time.Second, c.throttledGC) - }() - - var expi exporter.ExporterInstance - // TODO: multiworker - // This is actually tricky, as the exporter should come from the worker that has the returned reference. We may need to delay this so that the solver loads this. - w, err := c.opt.WorkerController.GetDefault() - if err != nil { - return nil, err - } - if req.Exporter != "" { - exp, err := w.Exporter(req.Exporter) - if err != nil { - return nil, err - } - expi, err = exp.Resolve(ctx, req.ExporterAttrs) - if err != nil { - return nil, err - } - } - - var cacheExporter remotecache.Exporter - if ref := req.Cache.ExportRef; ref != "" && c.opt.ResolveCacheExporterFunc != nil { - parsed, err := reference.ParseNormalizedNamed(ref) - if err != nil { - return nil, err - } - exportCacheRef := reference.TagNameOnly(parsed).String() - typ := "" // unimplemented yet (typically registry) - cacheExporter, err = c.opt.ResolveCacheExporterFunc(ctx, typ, exportCacheRef) - if err != nil { - return nil, err - } - } - - var importCacheRefs []string - for _, ref := range req.Cache.ImportRefs { - parsed, err := reference.ParseNormalizedNamed(ref) - if err != nil { - return nil, err - } - importCacheRefs = append(importCacheRefs, reference.TagNameOnly(parsed).String()) - } - - resp, err := c.solver.Solve(ctx, req.Ref, frontend.SolveRequest{ - Frontend: req.Frontend, - Definition: req.Definition, - FrontendOpt: req.FrontendAttrs, - ImportCacheRefs: importCacheRefs, - }, llbsolver.ExporterRequest{ - Exporter: expi, - CacheExporter: cacheExporter, - CacheExportMode: parseCacheExporterOpt(req.Cache.ExportAttrs), - }, req.Entitlements) - if err != nil { - return nil, err - } - return &controlapi.SolveResponse{ - ExporterResponse: resp.ExporterResponse, - }, nil -} - -func (c *Controller) Status(req *controlapi.StatusRequest, stream controlapi.Control_StatusServer) error { - ch := make(chan *client.SolveStatus, 8) - - eg, ctx := errgroup.WithContext(stream.Context()) - eg.Go(func() error { - return c.solver.Status(ctx, req.Ref, ch) - }) - - eg.Go(func() error { - for { - ss, ok := <-ch - if !ok { - return nil - } - sr := controlapi.StatusResponse{} - for _, v := range ss.Vertexes { - sr.Vertexes = append(sr.Vertexes, &controlapi.Vertex{ - Digest: v.Digest, - Inputs: v.Inputs, - Name: v.Name, - Started: v.Started, - Completed: v.Completed, - Error: v.Error, - Cached: v.Cached, - }) - } - for _, v := range ss.Statuses { - sr.Statuses = append(sr.Statuses, &controlapi.VertexStatus{ - ID: v.ID, - Vertex: v.Vertex, - Name: v.Name, - Current: v.Current, - Total: v.Total, - Timestamp: v.Timestamp, - Started: v.Started, - Completed: v.Completed, - }) - } - for _, v := range ss.Logs { - sr.Logs = append(sr.Logs, &controlapi.VertexLog{ - Vertex: v.Vertex, - Stream: int64(v.Stream), - Msg: v.Data, - Timestamp: v.Timestamp, - }) - } - if err := stream.SendMsg(&sr); err != nil { - return err - } - } - }) - - return eg.Wait() -} - -func (c *Controller) Session(stream controlapi.Control_SessionServer) error { - logrus.Debugf("session started") - conn, closeCh, opts := grpchijack.Hijack(stream) - defer conn.Close() - - ctx, cancel := context.WithCancel(stream.Context()) - go func() { - <-closeCh - cancel() - }() - - err := c.opt.SessionManager.HandleConn(ctx, conn, opts) - logrus.Debugf("session finished: %v", err) - return err -} - -func (c *Controller) ListWorkers(ctx context.Context, r *controlapi.ListWorkersRequest) (*controlapi.ListWorkersResponse, error) { - resp := &controlapi.ListWorkersResponse{} - workers, err := c.opt.WorkerController.List(r.Filter...) - if err != nil { - return nil, err - } - for _, w := range workers { - resp.Record = append(resp.Record, &apitypes.WorkerRecord{ - ID: w.ID(), - Labels: w.Labels(), - Platforms: pb.PlatformsFromSpec(w.Platforms()), - GCPolicy: toPBGCPolicy(w.GCPolicy()), - }) - } - return resp, nil -} - -func (c *Controller) gc() { - c.gcmu.Lock() - defer c.gcmu.Unlock() - - workers, err := c.opt.WorkerController.List() - if err != nil { - return - } - - eg, ctx := errgroup.WithContext(context.TODO()) - - var size int64 - ch := make(chan client.UsageInfo) - done := make(chan struct{}) - go func() { - for ui := range ch { - size += ui.Size - } - close(done) - }() - - for _, w := range workers { - func(w worker.Worker) { - eg.Go(func() error { - if policy := w.GCPolicy(); len(policy) > 0 { - return w.Prune(ctx, ch, policy...) - } - return nil - }) - }(w) - } - - err = eg.Wait() - close(ch) - if err != nil { - logrus.Errorf("gc error: %+v", err) - } - <-done - if size > 0 { - logrus.Debugf("gc cleaned up %d bytes", size) - } -} - -func parseCacheExporterOpt(opt map[string]string) solver.CacheExportMode { - for k, v := range opt { - switch k { - case "mode": - switch v { - case "min": - return solver.CacheExportModeMin - case "max": - return solver.CacheExportModeMax - default: - logrus.Debugf("skipping incalid cache export mode: %s", v) - } - default: - logrus.Warnf("skipping invalid cache export opt: %s", v) - } - } - return solver.CacheExportModeMin -} - -func toPBGCPolicy(in []client.PruneInfo) []*apitypes.GCPolicy { - policy := make([]*apitypes.GCPolicy, 0, len(in)) - for _, p := range in { - policy = append(policy, &apitypes.GCPolicy{ - All: p.All, - KeepBytes: p.KeepBytes, - KeepDuration: int64(p.KeepDuration), - Filters: p.Filter, - }) - } - return policy -} diff --git a/vendor/github.com/moby/buildkit/control/gateway/gateway.go b/vendor/github.com/moby/buildkit/control/gateway/gateway.go deleted file mode 100644 index 074e739445ad..000000000000 --- a/vendor/github.com/moby/buildkit/control/gateway/gateway.go +++ /dev/null @@ -1,143 +0,0 @@ -package gateway - -import ( - "context" - "sync" - "time" - - "github.com/moby/buildkit/client/buildid" - "github.com/moby/buildkit/frontend/gateway" - gwapi "github.com/moby/buildkit/frontend/gateway/pb" - "github.com/pkg/errors" - "google.golang.org/grpc" -) - -type GatewayForwarder struct { - mu sync.RWMutex - updateCond *sync.Cond - builds map[string]gateway.LLBBridgeForwarder -} - -func NewGatewayForwarder() *GatewayForwarder { - gwf := &GatewayForwarder{ - builds: map[string]gateway.LLBBridgeForwarder{}, - } - gwf.updateCond = sync.NewCond(gwf.mu.RLocker()) - return gwf -} - -func (gwf *GatewayForwarder) Register(server *grpc.Server) { - gwapi.RegisterLLBBridgeServer(server, gwf) -} - -func (gwf *GatewayForwarder) RegisterBuild(ctx context.Context, id string, bridge gateway.LLBBridgeForwarder) error { - gwf.mu.Lock() - defer gwf.mu.Unlock() - - if _, ok := gwf.builds[id]; ok { - return errors.Errorf("build ID %s exists", id) - } - - gwf.builds[id] = bridge - gwf.updateCond.Broadcast() - - return nil -} - -func (gwf *GatewayForwarder) UnregisterBuild(ctx context.Context, id string) { - gwf.mu.Lock() - defer gwf.mu.Unlock() - - delete(gwf.builds, id) - gwf.updateCond.Broadcast() -} - -func (gwf *GatewayForwarder) lookupForwarder(ctx context.Context) (gateway.LLBBridgeForwarder, error) { - bid := buildid.FromIncomingContext(ctx) - if bid == "" { - return nil, errors.New("no buildid found in context") - } - - ctx, cancel := context.WithTimeout(ctx, 3*time.Second) - defer cancel() - - go func() { - <-ctx.Done() - gwf.updateCond.Broadcast() - }() - - gwf.mu.RLock() - defer gwf.mu.RUnlock() - for { - select { - case <-ctx.Done(): - return nil, errors.Errorf("no such job %s", bid) - default: - } - fwd, ok := gwf.builds[bid] - if !ok { - gwf.updateCond.Wait() - continue - } - return fwd, nil - } -} - -func (gwf *GatewayForwarder) ResolveImageConfig(ctx context.Context, req *gwapi.ResolveImageConfigRequest) (*gwapi.ResolveImageConfigResponse, error) { - fwd, err := gwf.lookupForwarder(ctx) - if err != nil { - return nil, errors.Wrap(err, "forwarding ResolveImageConfig") - } - - return fwd.ResolveImageConfig(ctx, req) -} - -func (gwf *GatewayForwarder) Solve(ctx context.Context, req *gwapi.SolveRequest) (*gwapi.SolveResponse, error) { - fwd, err := gwf.lookupForwarder(ctx) - if err != nil { - return nil, errors.Wrap(err, "forwarding Solve") - } - - return fwd.Solve(ctx, req) -} - -func (gwf *GatewayForwarder) ReadFile(ctx context.Context, req *gwapi.ReadFileRequest) (*gwapi.ReadFileResponse, error) { - fwd, err := gwf.lookupForwarder(ctx) - if err != nil { - return nil, errors.Wrap(err, "forwarding ReadFile") - } - return fwd.ReadFile(ctx, req) -} - -func (gwf *GatewayForwarder) Ping(ctx context.Context, req *gwapi.PingRequest) (*gwapi.PongResponse, error) { - fwd, err := gwf.lookupForwarder(ctx) - if err != nil { - return nil, errors.Wrap(err, "forwarding Ping") - } - return fwd.Ping(ctx, req) -} - -func (gwf *GatewayForwarder) Return(ctx context.Context, req *gwapi.ReturnRequest) (*gwapi.ReturnResponse, error) { - fwd, err := gwf.lookupForwarder(ctx) - if err != nil { - return nil, errors.Wrap(err, "forwarding Return") - } - res, err := fwd.Return(ctx, req) - return res, err -} - -func (gwf *GatewayForwarder) ReadDir(ctx context.Context, req *gwapi.ReadDirRequest) (*gwapi.ReadDirResponse, error) { - fwd, err := gwf.lookupForwarder(ctx) - if err != nil { - return nil, errors.Wrap(err, "forwarding ReadDir") - } - return fwd.ReadDir(ctx, req) -} - -func (gwf *GatewayForwarder) StatFile(ctx context.Context, req *gwapi.StatFileRequest) (*gwapi.StatFileResponse, error) { - fwd, err := gwf.lookupForwarder(ctx) - if err != nil { - return nil, errors.Wrap(err, "forwarding StatFile") - } - return fwd.StatFile(ctx, req) -} diff --git a/vendor/github.com/moby/buildkit/doc.go b/vendor/github.com/moby/buildkit/doc.go deleted file mode 100644 index e81189147365..000000000000 --- a/vendor/github.com/moby/buildkit/doc.go +++ /dev/null @@ -1 +0,0 @@ -package buildkit diff --git a/vendor/github.com/moby/buildkit/docs/rootless.md b/vendor/github.com/moby/buildkit/docs/rootless.md deleted file mode 100644 index ef42886a5c21..000000000000 --- a/vendor/github.com/moby/buildkit/docs/rootless.md +++ /dev/null @@ -1,83 +0,0 @@ -# Rootless mode (Experimental) - -Requirements: -- runc `a00bf0190895aa465a5fbed0268888e2c8ddfe85` (Oct 15, 2018) or later -- Some distros such as Debian (excluding Ubuntu) and Arch Linux require `sudo sh -c "echo 1 > /proc/sys/kernel/unprivileged_userns_clone"`. -- RHEL/CentOS 7 requires `sudo sh -c "echo 28633 > /proc/sys/user/max_user_namespaces"`. You may also need `sudo grubby --args="namespace.unpriv_enable=1 user_namespace.enable=1" --update-kernel="$(grubby --default-kernel)"`. -- `newuidmap` and `newgidmap` need to be installed on the host. These commands are provided by the `uidmap` package. For RHEL/CentOS 7, RPM is not officially provided but available at https://copr.fedorainfracloud.org/coprs/vbatts/shadow-utils-newxidmap/ . -- `/etc/subuid` and `/etc/subgid` should contain >= 65536 sub-IDs. e.g. `penguin:231072:65536`. -- To run in a Docker container with non-root `USER`, `docker run --privileged` is still required. See also Jessie's blog: https://blog.jessfraz.com/post/building-container-images-securely-on-kubernetes/ - - -## Set up - -Setting up rootless mode also requires some bothersome steps as follows, but you can also use [`rootlesskit`](https://github.com/rootless-containers/rootlesskit) for automating these steps. - -### Terminal 1: - -``` -$ unshare -U -m -unshared$ echo $$ > /tmp/pid -``` - -Unsharing mountns (and userns) is required for mounting filesystems without real root privileges. - -### Terminal 2: - -``` -$ id -u -1001 -$ grep $(whoami) /etc/subuid -penguin:231072:65536 -$ grep $(whoami) /etc/subgid -penguin:231072:65536 -$ newuidmap $(cat /tmp/pid) 0 1001 1 1 231072 65536 -$ newgidmap $(cat /tmp/pid) 0 1001 1 1 231072 65536 -``` - -### Terminal 1: - -``` -unshared# buildkitd -``` - -* The data dir will be set to `/home/penguin/.local/share/buildkit` -* The address will be set to `unix:///run/user/1001/buildkit/buildkitd.sock` -* `overlayfs` snapshotter is not supported except Ubuntu-flavored kernel: http://kernel.ubuntu.com/git/ubuntu/ubuntu-artful.git/commit/fs/overlayfs?h=Ubuntu-4.13.0-25.29&id=0a414bdc3d01f3b61ed86cfe3ce8b63a9240eba7 -* containerd worker is not supported ( pending PR: https://github.com/containerd/containerd/pull/2006 ) -* Network namespace is not used at the moment. -* Cgroups is disabled. - -### Terminal 2: - -``` -$ go get ./examples/build-using-dockerfile -$ build-using-dockerfile --buildkit-addr unix:///run/user/1001/buildkit/buildkitd.sock -t foo /path/to/somewhere -``` - -## Set up (using a container) - -Docker image is available as [`moby/buildkit:rootless`](https://hub.docker.com/r/moby/buildkit/tags/). - -``` -$ docker run --name buildkitd -d --privileged -p 1234:1234 moby/buildkit:rootless --addr tcp://0.0.0.0:1234 -``` - -`docker run` requires `--privileged` but the BuildKit daemon is executed as a normal user. -See [`docker/cli#1347`](https://github.com/docker/cli/pull/1347) for the ongoing work to remove this requirement - -``` -$ docker exec buildkitd id -uid=1000(user) gid=1000(user) -$ docker exec buildkitd ps aux -PID USER TIME COMMAND - 1 user 0:00 rootlesskit buildkitd --addr tcp://0.0.0.0:1234 - 13 user 0:00 /proc/self/exe buildkitd --addr tcp://0.0.0.0:1234 - 21 user 0:00 buildkitd --addr tcp://0.0.0.0:1234 - 29 user 0:00 ps aux -``` - -``` -$ go get ./examples/build-using-dockerfile -$ build-using-dockerfile --buildkit-addr tcp://127.0.0.1:1234 -t foo /path/to/somewhere -``` diff --git a/vendor/github.com/moby/buildkit/docs/solver.md b/vendor/github.com/moby/buildkit/docs/solver.md deleted file mode 100644 index 45b81c5cb078..000000000000 --- a/vendor/github.com/moby/buildkit/docs/solver.md +++ /dev/null @@ -1,161 +0,0 @@ -## Buildkit solver design - -The solver is a component in BuildKit responsible for parsing the build definition and scheduling the operations to the workers for execution. - -Solver package is heavily optimized for deduplication of work, concurrent requests, remote and local caching and different per-vertex caching modes. It also allows operations and frontends to call back to itself with new definition that they have generated. - -The implementation of the solver is quite complicated, mostly because it is supposed to be performant with snapshot-based storage layer and distribution model using layer tarballs. It is expected that calculating the content based checksum of snapshots between every operation or after every command execution is too slow for common use cases and needs to be postponed to when it is likely to have a meaningful impact. Ideally, the user shouldn't realize that these optimizations are taking place and just get intuitive caching. It is also hoped that if some implementations can provide better cache capabilities, the solver would take advantage of that without requiring significant modification. - -In addition to avoiding content checksum scanning the implementation is also designed to make decisions with minimum available data. For example, for remote caching sources to be effective the solver will not require the cache to be loaded or exists for all the vertexes in the graph but will only load it for the final node that is determined to match cache. As another example, if one of the inputs (for example image) can produce a definition based cache match for a vertex, and another (for example local source files) can only produce a content-based(slower) cache match, the solver is designed to detect it and skip content-based check for the first input(that would cause a pull to happen). - -### Build definition - -The solver takes in a build definition in the form of a content addressable operation definition that forms a graph. - -A vertex in this graph is defined by these properties: - -```go -type Vertex interface { - Digest() digest.Digest - Options() VertexOptions - Sys() interface{} - Inputs() []Edge - Name() string -} - -type Edge struct { - Index Index - Vertex Vertex -} - -type Index int -``` - -Every vertex has a content-addressable digest that represents a checksum of the definition graph up to that vertex including all of its inputs. If two vertexes have the same checksum, they are considered identical when they are executing concurrently. That means that if two other vertexes request a vertex with the same digest as an input, they will wait for the same operation to finish. - -The vertex digest can only be used for comparison while the solver is running and not between different invocations. For example, if parallel builds require using `docker.io/library/alpine:latest` image as one of the operations, it is pulled only once. But if a build using `docker.io/library/alpine:latest` was built earlier, the checksum based on that name can't be used for finding if the vertex was already built because the image might have changed in the registry and "latest" tag might be pointing to another image. - -`Sys()` method returns an object that is used to resolve the executor for the operation. This is how a definition can pass logic to the worker that will execute the task associated with the vertex, without the solver needing to know anything about the implementation. When the solver needs to execute a vertex, it will send this object to a worker, so the worker needs to be configured to understand the object returned by `Sys()`. The solver itself doesn't care how the operations are implemented and therefore doesn't define a type for this value. In LLB solver this value would be with type `llb.Op`. - -`Inputs()` returns an array of other vertexes the current vertex depends on. A vertex may have zero inputs. After an operation has executed, it returns an array of return references. If another operation wants to depend on any of these references they would define an input with that vertex and an index of the reference from the return array(starting from zero). Inputs need to be contained in the `Digest()` of the vertex - two vertexes with different inputs should never have the same digest. - -Options contain extra information that can be associated with the vertex but what doesn't change the definition(or equality check) of it. Normally this is either a hint to the solver, for example, to ignore cache when executing. It can also be used for associating messages with the vertex that can be helpful for tracing purposes. - - -### Operation interface - -Operation interface is how the solver can evaluate the properties of the actual vertex operation. These methods run on the worker, and their implementation is determined by the value of `vertex.Sys()`. The solver is configured with a "resolve" function that can convert a `vertex.Sys()` into an `Op`. - -```go -// Op is an implementation for running a vertex -type Op interface { - // CacheMap returns structure describing how the operation is cached. - // Currently only roots are allowed to return multiple cache maps per op. - CacheMap(context.Context, int) (*CacheMap, bool, error) - // Exec runs an operation given results from previous operations. - // Note that this is not the process execution but can have any definition. - Exec(ctx context.Context, inputs []Result) (outputs []Result, err error) -} - -type CacheMap struct { - // Digest is a base digest for operation that needs to be combined with - // inputs cache or selectors for dependencies. - Digest digest.Digest - Deps []struct { - // Optional digest that is merged with the cache key of the input - Selector digest.Digest - // Optional function that returns a digest for the input based on its - // return value - ComputeDigestFunc ResultBasedCacheFunc - } -} - -type ResultBasedCacheFunc func(context.Context, Result) (digest.Digest, error) - - -// Result is an abstract return value for a solve -type Result interface { - ID() string - Release(context.Context) error - Sys() interface{} -} -``` - -There are two functions that every operation defines. One describes how to calculate a cache key for a vertex and another how to execute it. - -`CacheMap` is a description for calculating the cache key. It contains a digest that is combined with the cache keys of the inputs to determine the stable checksum that can be used to cache the operation result. For the vertexes that don't have inputs(roots), it is important that this digest is a stable secure checksum. For example, in LLB this digest is a manifest digest for container images or a commit SHA for git sources. - -`CacheMap` may also define optional selectors or content-based cache functions for its inputs. A selector is combined with the input cache key and useful for describing when different parts of an input are being used, and inputs cache key needs to be customized. Content-based cache function allows computing a new cache key for an input after it has completed. In LLB this is used for calculating cache key based on the checksum of file contents of the input snapshots. - -`Exec` executes the operation defined by a vertex by passing in the results of the inputs. - - -### Shared graph - -After new build request is sent to the solver, it first loads all the vertexes to the shared graph structure. For status tracking, a job instance needs to be created, and vertexes are loaded through jobs. A job ID is assigned to every vertex. If vertex with the same digest has already been loaded to the shared graph, a new job ID is appended to the existing record. When the job finishes, it removes all of its references from the loaded vertex. The resources are released if no more references remain. - -Loading a vertex also creates a progress writer associated with it and sets up the cache sources associated with the specific vertex. - -After vertexes have been loaded to the job, it is safe to request a result from an edge pointing to a previously loaded vertex. To do this `build(ctx, Edge) (CachedResult, error)` method is called on the static scheduler instance associated with the solver. - -### Scheduler - -The scheduler is a component responsible for invoking the individual operations needed to find the result for the graph. While the build definition is defined with vertexes, the scheduler is solving edges. In the case of LLB solver, a result of a solved edge is associated with a snapshot. Usually, to solve an edge, the input edges need to be solved first and this can be done concurrently, but there are many exceptions like edge may be cached but its input might be not, or solving one input might cause a cache hit while solving others would just be wasteful. Scheduler tries do handle all these cases. - -The scheduler is implemented as a single threaded non-blocking event loop. The single threaded constraint is for simplicity and might be removed in the future - currently, it is not known if this would have any performance impact. All the events in the scheduler have one fixed sender and receiver. The interface for interacting with the scheduler is to create a "pipe" between a sender and a receiver. One or both sides of the pipe may be an edge instance of the graph. If a pipe is added it to the scheduler and an edge receives an event from the pipe, the scheduler will "unpark" that edge so it can process all the events it had received. - -The unpark handler for an edge needs to be non-blocking and execute quickly. The edge will process the data from the incoming events and update its internal state. When calling unpark, the scheduler has already separated out the sender and receiver sides of the pipes that in the code are referred as incoming and outgoing requests. The incoming requests are usually requests to retrieve a result or a cache key from an edge. If it appears that an edge doesn't have enough internal state to satisfy the requests, it can make new pipes and register them with the scheduler. These new pipes are generally of two types: ones asking for some async function to be completed and others that request an input edge to reach a specific state first. - -To avoid bugs and deadlocks in this logic, the unpark method needs to follow the following rules. If unpark has finished without completing all incoming requests it needs to create outgoing requests. Similarly, if an incoming request remains pending, at least one outgoing request needs to exist as well. Failing to comply with this rule will cause the scheduler to panic as a precaution to avoid leaks and hiding errors. - -### Edge state - -During unpark, edge state is incremented until it can fulfill the incoming requests. - -An edge can be in the following states: initial, cache-fast, cache-slow, completed. Completed edge contains a reference to the final result, in-progress edge may have zero or more cache keys. - -The initial state is the starting state for any edge. If a state has reached a cache-fast state, it means that all the definition based cache key lookups have been performed. Cache-slow means that content-based cache lookup has been performed as well. If possible, the scheduler will avoid looking up the slow keys of inputs if they are unnecessary for solving current edge. - -The unpark method is split into four phases. The first phase processes all incoming events (responses from outgoing requests or new incoming requests) that caused the unpark to be called. These contain responses from async functions like calls to get the cachemap, execution result or content-based checksum for an input, or responses from input edges when their state or number of cache keys has changed. All the results are stored in edge's internal state. For the new cache keys, a query is performed to determine if any of them can create potential matches to the current edge. - -After that, if any of the updates caused changes to edge's properties, a new state is calculated for the current vertex. In this step, all potential cache keys from inputs can cause new cache keys for the edge to be created and the status of an edge might be updated. - -Third, the edge will go over all of its incoming requests, to determine if the current internal state is sufficient for satisfying them all. There are a couple of possibilities how this check may end up. If all requests can be completed and there are no outgoing requests the requests finish and unpark method returns. If there are outgoing requests but the edge has reached the completed state or all incoming requests have been canceled, the outgoing requests are canceled. This is an async operation as well and will cause unpark to be called again after completion. If this condition didn't apply but requests could be completed and there are outgoing requests, then the incoming request is answered but not completed. The receiver can then decide to cancel this request if needed. If no new data has appeared to answer the incoming requests, the desired state for an edge is determined for an edge from the incoming requests, and we continue to the next step. - -The fourth step sets up outgoing requests based on the desired state determined in the third step. If the current state requires calling any async functions to move forward then it is done here. We will also loop through all the inputs to determine if it is important to raise their desired state. Depending on what inputs can produce content based cache keys and what inputs have already returned possible cache matches, the desired state for inputs may be raised at different times. - -When an edge needs to resolve an operation to call the async `CacheMap` and `Exec` methods, it does so by calling back to the shared graph. This makes sure that two different edges pointing to the same vertex do not execute twice. The result values for the operation that is shared by the edges is also cached until the vertex is cleaned up. Progress reporting is also handled and forwarded to the job through this shared vertex instance. - -Edge state is cleaned up when a final job that loaded the vertexes that they are connected to is discarded. - - -### Cache providers - -Cache providers determine if there is a result that matches the cache keys generated during the build that could be reused instead of fully reevaluating the vertex and its inputs. There can be multiple cache providers, and specific providers can be defined per vertex using the vertex options. - -There are multiple backend implementations for cache providers, in-memory one used in unit tests, the default local one using bbolt and one based on cache manifests in a remote registry. - -Simplified cache provider has following methods: - -```go -Query(...) ([]*CacheKey, error) -Records(ck *CacheKey) ([]*CacheRecord, error) -Load(ctx context.Context, rec *CacheRecord) (Result, error) -Save(key *CacheKey, s Result) (*ExportableCacheKey, error) -``` - -Query method is used to determine if there exist a possible cache link between the input and a vertex. It takes parameters provided by `op.CacheMap` and cache keys returned by the calling the same method on its inputs. - -If a cache key has been found, the matching records can be asked for them. A cache key can have zero or more records. Having a record means that a cached result can be loaded for a specific vertex. The solver supports partial cache chains, meaning that not all inputs need to have a cache record to match cache for a vertex. - -Load method is used to load a specific record into a result reference. This value is the same type as the one returned by the `op.Exec` method. - -Save allows adding more records to the cache. - -### Merging edges - -One final piece of solver logic allows merging two edges into one when they have both returned the same cache key. In practice, this appears for example when a build uses image references `alpine:latest` and `alpine@sha256:abcabc` in its definition and they actually point to the same image. Another case where this appears is when same source files from different sources are being used as part of the build. - -After scheduler has called `unpark()` on an edge it checks it the method added any new cache keys to its state. If it did it will check its internal index if another active edge already exists with the same cache key. If it does it performs some basic validation, for example checking that the new edge has not explicitly asked cache to be ignored, and if it passes, merges the states of two edges. - -In the result of the merge, the edge that was checked is deleted, its ongoing requests are canceled and the incoming ones are added to the original edge. \ No newline at end of file diff --git a/vendor/github.com/moby/buildkit/examples/build-using-dockerfile/main.go b/vendor/github.com/moby/buildkit/examples/build-using-dockerfile/main.go deleted file mode 100644 index 12fa84398fb4..000000000000 --- a/vendor/github.com/moby/buildkit/examples/build-using-dockerfile/main.go +++ /dev/null @@ -1,181 +0,0 @@ -package main - -import ( - "context" - "fmt" - "io" - "os" - "os/exec" - "path/filepath" - "strings" - - "github.com/containerd/console" - "github.com/moby/buildkit/client" - dockerfile "github.com/moby/buildkit/frontend/dockerfile/builder" - "github.com/moby/buildkit/util/appcontext" - "github.com/moby/buildkit/util/appdefaults" - "github.com/moby/buildkit/util/progress/progressui" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "github.com/urfave/cli" - "golang.org/x/sync/errgroup" -) - -func main() { - app := cli.NewApp() - app.Name = "build-using-dockerfile" - app.UsageText = `build-using-dockerfile [OPTIONS] PATH | URL | -` - app.Description = ` -build using Dockerfile. - -This command mimics behavior of "docker build" command so that people can easily get started with BuildKit. -This command is NOT the replacement of "docker build", and should NOT be used for building production images. - -By default, the built image is loaded to Docker. -` - dockerIncompatibleFlags := []cli.Flag{ - cli.StringFlag{ - Name: "buildkit-addr", - Usage: "buildkit daemon address", - EnvVar: "BUILDKIT_HOST", - Value: appdefaults.Address, - }, - cli.BoolFlag{ - Name: "clientside-frontend", - Usage: "run dockerfile frontend client side, rather than builtin to buildkitd", - EnvVar: "BUILDKIT_CLIENTSIDE_FRONTEND", - }, - } - app.Flags = append([]cli.Flag{ - cli.StringSliceFlag{ - Name: "build-arg", - Usage: "Set build-time variables", - }, - cli.StringFlag{ - Name: "file, f", - Usage: "Name of the Dockerfile (Default is 'PATH/Dockerfile')", - }, - cli.StringFlag{ - Name: "tag, t", - Usage: "Name and optionally a tag in the 'name:tag' format", - }, - cli.StringFlag{ - Name: "target", - Usage: "Set the target build stage to build.", - }, - cli.BoolFlag{ - Name: "no-cache", - Usage: "Do not use cache when building the image", - }, - }, dockerIncompatibleFlags...) - app.Action = action - if err := app.Run(os.Args); err != nil { - fmt.Fprintf(os.Stderr, "error: %v\n", err) - os.Exit(1) - } -} - -func action(clicontext *cli.Context) error { - ctx := appcontext.Context() - - if tag := clicontext.String("tag"); tag == "" { - return errors.New("tag is not specified") - } - c, err := client.New(ctx, clicontext.String("buildkit-addr"), client.WithFailFast()) - if err != nil { - return err - } - pipeR, pipeW := io.Pipe() - solveOpt, err := newSolveOpt(clicontext, pipeW) - if err != nil { - return err - } - ch := make(chan *client.SolveStatus) - eg, ctx := errgroup.WithContext(ctx) - eg.Go(func() error { - var err error - if clicontext.Bool("clientside-frontend") { - _, err = c.Build(ctx, *solveOpt, "", dockerfile.Build, ch) - } else { - _, err = c.Solve(ctx, nil, *solveOpt, ch) - } - return err - }) - eg.Go(func() error { - var c console.Console - if cn, err := console.ConsoleFromFile(os.Stderr); err == nil { - c = cn - } - // not using shared context to not disrupt display but let is finish reporting errors - return progressui.DisplaySolveStatus(context.TODO(), "", c, os.Stdout, ch) - }) - eg.Go(func() error { - if err := loadDockerTar(pipeR); err != nil { - return err - } - return pipeR.Close() - }) - if err := eg.Wait(); err != nil { - return err - } - logrus.Infof("Loaded the image %q to Docker.", clicontext.String("tag")) - return nil -} - -func newSolveOpt(clicontext *cli.Context, w io.WriteCloser) (*client.SolveOpt, error) { - buildCtx := clicontext.Args().First() - if buildCtx == "" { - return nil, errors.New("please specify build context (e.g. \".\" for the current directory)") - } else if buildCtx == "-" { - return nil, errors.New("stdin not supported yet") - } - - file := clicontext.String("file") - if file == "" { - file = filepath.Join(buildCtx, "Dockerfile") - } - localDirs := map[string]string{ - "context": buildCtx, - "dockerfile": filepath.Dir(file), - } - - frontend := "dockerfile.v0" // TODO: use gateway - if clicontext.Bool("clientside-frontend") { - frontend = "" - } - frontendAttrs := map[string]string{ - "filename": filepath.Base(file), - } - if target := clicontext.String("target"); target != "" { - frontendAttrs["target"] = target - } - if clicontext.Bool("no-cache") { - frontendAttrs["no-cache"] = "" - } - for _, buildArg := range clicontext.StringSlice("build-arg") { - kv := strings.SplitN(buildArg, "=", 2) - if len(kv) != 2 { - return nil, errors.Errorf("invalid build-arg value %s", buildArg) - } - frontendAttrs["build-arg:"+kv[0]] = kv[1] - } - return &client.SolveOpt{ - Exporter: "docker", // TODO: use containerd image store when it is integrated to Docker - ExporterAttrs: map[string]string{ - "name": clicontext.String("tag"), - }, - ExporterOutput: w, - LocalDirs: localDirs, - Frontend: frontend, - FrontendAttrs: frontendAttrs, - }, nil -} - -func loadDockerTar(r io.Reader) error { - // no need to use moby/moby/client here - cmd := exec.Command("docker", "load") - cmd.Stdin = r - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - return cmd.Run() -} diff --git a/vendor/github.com/moby/buildkit/examples/buildkit0/buildkit.go b/vendor/github.com/moby/buildkit/examples/buildkit0/buildkit.go deleted file mode 100644 index b7cfbba2fa8d..000000000000 --- a/vendor/github.com/moby/buildkit/examples/buildkit0/buildkit.go +++ /dev/null @@ -1,91 +0,0 @@ -package main - -import ( - "flag" - "os" - - "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/util/system" -) - -type buildOpt struct { - withContainerd bool - containerd string - runc string -} - -func main() { - var opt buildOpt - flag.BoolVar(&opt.withContainerd, "with-containerd", true, "enable containerd worker") - flag.StringVar(&opt.containerd, "containerd", "v1.2.0-rc.1", "containerd version") - flag.StringVar(&opt.runc, "runc", "a00bf0190895aa465a5fbed0268888e2c8ddfe85", "runc version") - flag.Parse() - - bk := buildkit(opt) - out := bk.Run(llb.Shlex("ls -l /bin")) // debug output - - dt, err := out.Marshal(llb.LinuxAmd64) - if err != nil { - panic(err) - } - llb.WriteTo(dt, os.Stdout) -} - -func goBuildBase() llb.State { - goAlpine := llb.Image("docker.io/library/golang:1.11-alpine") - return goAlpine. - AddEnv("PATH", "/usr/local/go/bin:"+system.DefaultPathEnv). - AddEnv("GOPATH", "/go"). - Run(llb.Shlex("apk add --no-cache g++ linux-headers")). - Run(llb.Shlex("apk add --no-cache git libseccomp-dev make")).Root() -} - -func runc(version string) llb.State { - return goBuildBase(). - Run(llb.Shlex("git clone https://github.com/opencontainers/runc.git /go/src/github.com/opencontainers/runc")). - Dir("/go/src/github.com/opencontainers/runc"). - Run(llb.Shlexf("git checkout -q %s", version)). - Run(llb.Shlex("go build -o /usr/bin/runc ./")).Root() -} - -func containerd(version string) llb.State { - return goBuildBase(). - Run(llb.Shlex("apk add --no-cache btrfs-progs-dev")). - Run(llb.Shlex("git clone https://github.com/containerd/containerd.git /go/src/github.com/containerd/containerd")). - Dir("/go/src/github.com/containerd/containerd"). - Run(llb.Shlexf("git checkout -q %s", version)). - Run(llb.Shlex("make bin/containerd")).Root() -} - -func buildkit(opt buildOpt) llb.State { - src := goBuildBase(). - Run(llb.Shlex("git clone https://github.com/moby/buildkit.git /go/src/github.com/moby/buildkit")). - Dir("/go/src/github.com/moby/buildkit") - - buildkitdOCIWorkerOnly := src. - Run(llb.Shlex("go build -o /bin/buildkitd.oci_only -tags no_containerd_worker ./cmd/buildkitd")) - - buildkitd := src. - Run(llb.Shlex("go build -o /bin/buildkitd ./cmd/buildkitd")) - - buildctl := src. - Run(llb.Shlex("go build -o /bin/buildctl ./cmd/buildctl")) - - r := llb.Image("docker.io/library/alpine:latest") - r = copy(buildctl.Root(), "/bin/buildctl", r, "/bin/") - r = copy(runc(opt.runc), "/usr/bin/runc", r, "/bin/") - if opt.withContainerd { - r = copy(containerd(opt.containerd), "/go/src/github.com/containerd/containerd/bin/containerd", r, "/bin/") - r = copy(buildkitd.Root(), "/bin/buildkitd", r, "/bin/") - } else { - r = copy(buildkitdOCIWorkerOnly.Root(), "/bin/buildkitd.oci_only", r, "/bin/") - } - return r -} - -func copy(src llb.State, srcPath string, dest llb.State, destPath string) llb.State { - cpImage := llb.Image("docker.io/library/alpine:latest") - cp := cpImage.Run(llb.Shlexf("cp -a /src%s /dest%s", srcPath, destPath)) - cp.AddMount("/src", src) - return cp.AddMount("/dest", dest) -} diff --git a/vendor/github.com/moby/buildkit/examples/buildkit1/buildkit.go b/vendor/github.com/moby/buildkit/examples/buildkit1/buildkit.go deleted file mode 100644 index 6abbce1a3060..000000000000 --- a/vendor/github.com/moby/buildkit/examples/buildkit1/buildkit.go +++ /dev/null @@ -1,108 +0,0 @@ -package main - -import ( - "flag" - "os" - - "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/util/system" -) - -type buildOpt struct { - withContainerd bool - containerd string - runc string -} - -func main() { - var opt buildOpt - flag.BoolVar(&opt.withContainerd, "with-containerd", true, "enable containerd worker") - flag.StringVar(&opt.containerd, "containerd", "v1.2.0-rc.1", "containerd version") - flag.StringVar(&opt.runc, "runc", "a00bf0190895aa465a5fbed0268888e2c8ddfe85", "runc version") - flag.Parse() - - bk := buildkit(opt) - out := bk.Run(llb.Shlex("ls -l /bin")) // debug output - - dt, err := out.Marshal(llb.LinuxAmd64) - if err != nil { - panic(err) - } - llb.WriteTo(dt, os.Stdout) -} - -func goBuildBase() llb.State { - goAlpine := llb.Image("docker.io/library/golang:1.11-alpine") - return goAlpine. - AddEnv("PATH", "/usr/local/go/bin:"+system.DefaultPathEnv). - AddEnv("GOPATH", "/go"). - Run(llb.Shlex("apk add --no-cache g++ linux-headers")). - Run(llb.Shlex("apk add --no-cache git libseccomp-dev make")).Root() -} - -func runc(version string) llb.State { - return goBuildBase(). - With(goFromGit("github.com/opencontainers/runc", version)). - Run(llb.Shlex("go build -o /usr/bin/runc ./")). - Root() -} - -func containerd(version string) llb.State { - return goBuildBase(). - Run(llb.Shlex("apk add --no-cache btrfs-progs-dev")). - With(goFromGit("github.com/containerd/containerd", version)). - Run(llb.Shlex("make bin/containerd")).Root() -} - -func buildkit(opt buildOpt) llb.State { - src := goBuildBase().With(goFromGit("github.com/moby/buildkit", "master")) - - buildkitdOCIWorkerOnly := src. - Run(llb.Shlex("go build -o /bin/buildkitd.oci_only -tags no_containerd_worker ./cmd/buildkitd")).Root() - - buildkitd := src. - Run(llb.Shlex("go build -o /bin/buildkitd ./cmd/buildkitd")).Root() - - buildctl := src. - Run(llb.Shlex("go build -o /bin/buildctl ./cmd/buildctl")).Root() - - r := llb.Image("docker.io/library/alpine:latest").With( - copyFrom(buildctl, "/bin/buildctl", "/bin/"), - copyFrom(runc(opt.runc), "/usr/bin/runc", "/bin/"), - ) - - if opt.withContainerd { - return r.With( - copyFrom(containerd(opt.containerd), "/go/src/github.com/containerd/containerd/bin/containerd", "/bin/"), - copyFrom(buildkitd, "/bin/buildkitd", "/bin/")) - } - return r.With(copyFrom(buildkitdOCIWorkerOnly, "/bin/buildkitd.oci_only", "/bin/")) -} - -// goFromGit is a helper for cloning a git repo, checking out a tag and copying -// source directory into -func goFromGit(repo, tag string) llb.StateOption { - src := llb.Image("docker.io/library/alpine:latest"). - Run(llb.Shlex("apk add --no-cache git")). - Run(llb.Shlexf("git clone https://%[1]s.git /go/src/%[1]s", repo)). - Dirf("/go/src/%s", repo). - Run(llb.Shlexf("git checkout -q %s", tag)).Root() - return func(s llb.State) llb.State { - return s.With(copyFrom(src, "/go", "/")).Reset(s).Dir(src.GetDir()) - } -} - -// copyFrom has similar semantics as `COPY --from` -func copyFrom(src llb.State, srcPath, destPath string) llb.StateOption { - return func(s llb.State) llb.State { - return copy(src, srcPath, s, destPath) - } -} - -// copy copies files between 2 states using cp until there is no copyOp -func copy(src llb.State, srcPath string, dest llb.State, destPath string) llb.State { - cpImage := llb.Image("docker.io/library/alpine:latest") - cp := cpImage.Run(llb.Shlexf("cp -a /src%s /dest%s", srcPath, destPath)) - cp.AddMount("/src", src) - return cp.AddMount("/dest", dest) -} diff --git a/vendor/github.com/moby/buildkit/examples/buildkit2/buildkit.go b/vendor/github.com/moby/buildkit/examples/buildkit2/buildkit.go deleted file mode 100644 index 662d1209349b..000000000000 --- a/vendor/github.com/moby/buildkit/examples/buildkit2/buildkit.go +++ /dev/null @@ -1,105 +0,0 @@ -package main - -import ( - "flag" - "os" - - "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/util/system" -) - -type buildOpt struct { - withContainerd bool - containerd string - runc string -} - -func main() { - var opt buildOpt - flag.BoolVar(&opt.withContainerd, "with-containerd", true, "enable containerd worker") - flag.StringVar(&opt.containerd, "containerd", "v1.2.0-rc.1", "containerd version") - flag.StringVar(&opt.runc, "runc", "a00bf0190895aa465a5fbed0268888e2c8ddfe85", "runc version") - flag.Parse() - - bk := buildkit(opt) - out := bk.Run(llb.Shlex("ls -l /bin")) // debug output - - dt, err := out.Marshal(llb.LinuxAmd64) - if err != nil { - panic(err) - } - llb.WriteTo(dt, os.Stdout) -} - -func goBuildBase() llb.State { - goAlpine := llb.Image("docker.io/library/golang:1.11-alpine") - return goAlpine. - AddEnv("PATH", "/usr/local/go/bin:"+system.DefaultPathEnv). - AddEnv("GOPATH", "/go"). - Run(llb.Shlex("apk add --no-cache g++ linux-headers libseccomp-dev make")).Root() -} - -func goRepo(s llb.State, repo, ref string, g ...llb.GitOption) func(ro ...llb.RunOption) llb.State { - dir := "/go/src/" + repo - return func(ro ...llb.RunOption) llb.State { - es := s.Dir(dir).Run(ro...) - es.AddMount(dir, llb.Git(repo, ref, g...)) - return es.AddMount(dir+"/bin", llb.Scratch()) - } -} - -func runc(version string) llb.State { - return goRepo(goBuildBase(), "github.com/opencontainers/runc", version)( - llb.Shlex("go build -o ./bin/runc ./"), - ) -} - -func containerd(version string) llb.State { - return goRepo( - goBuildBase(). - Run(llb.Shlex("apk add --no-cache btrfs-progs-dev")).Root(), - "github.com/containerd/containerd", version, llb.KeepGitDir())( - llb.Shlex("make bin/containerd"), - ) -} - -func buildkit(opt buildOpt) llb.State { - run := goRepo(goBuildBase(), "github.com/moby/buildkit", "master") - - buildkitdOCIWorkerOnly := run(llb.Shlex("go build -o ./bin/buildkitd.oci_only -tags no_containerd_worker ./cmd/buildkitd")) - - buildkitd := run(llb.Shlex("go build -o ./bin/buildkitd ./cmd/buildkitd")) - - buildctl := run(llb.Shlex("go build -o ./bin/buildctl ./cmd/buildctl")) - - r := llb.Image("docker.io/library/alpine:latest").With( - copyAll(buildctl, "/bin"), - copyAll(runc(opt.runc), "/bin"), - ) - - if opt.withContainerd { - return r.With( - copyAll(containerd(opt.containerd), "/bin"), - copyAll(buildkitd, "/bin")) - } - return r.With(copyAll(buildkitdOCIWorkerOnly, "/bin")) -} - -func copyAll(src llb.State, destPath string) llb.StateOption { - return copyFrom(src, "/.", destPath) -} - -// copyFrom has similar semantics as `COPY --from` -func copyFrom(src llb.State, srcPath, destPath string) llb.StateOption { - return func(s llb.State) llb.State { - return copy(src, srcPath, s, destPath) - } -} - -// copy copies files between 2 states using cp until there is no copyOp -func copy(src llb.State, srcPath string, dest llb.State, destPath string) llb.State { - cpImage := llb.Image("docker.io/library/alpine:latest") - cp := cpImage.Run(llb.Shlexf("cp -a /src%s /dest%s", srcPath, destPath)) - cp.AddMount("/src", src) - return cp.AddMount("/dest", dest) -} diff --git a/vendor/github.com/moby/buildkit/examples/buildkit3/buildkit.go b/vendor/github.com/moby/buildkit/examples/buildkit3/buildkit.go deleted file mode 100644 index f255a4b4392d..000000000000 --- a/vendor/github.com/moby/buildkit/examples/buildkit3/buildkit.go +++ /dev/null @@ -1,121 +0,0 @@ -package main - -import ( - "flag" - "os" - - "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/util/system" -) - -type buildOpt struct { - withContainerd bool - containerd string - runc string - buildkit string -} - -func main() { - var opt buildOpt - flag.BoolVar(&opt.withContainerd, "with-containerd", true, "enable containerd worker") - flag.StringVar(&opt.containerd, "containerd", "v1.2.0-rc.1", "containerd version") - flag.StringVar(&opt.runc, "runc", "a00bf0190895aa465a5fbed0268888e2c8ddfe85", "runc version") - flag.StringVar(&opt.buildkit, "buildkit", "master", "buildkit version") - flag.Parse() - - bk := buildkit(opt) - out := bk - dt, err := out.Marshal(llb.LinuxAmd64) - if err != nil { - panic(err) - } - llb.WriteTo(dt, os.Stdout) -} - -func goBuildBase() llb.State { - goAlpine := llb.Image("docker.io/library/golang:1.11-alpine") - return goAlpine. - AddEnv("PATH", "/usr/local/go/bin:"+system.DefaultPathEnv). - AddEnv("GOPATH", "/go"). - Run(llb.Shlex("apk add --no-cache g++ linux-headers libseccomp-dev make")).Root() -} - -func goRepo(s llb.State, repo string, src llb.State) func(ro ...llb.RunOption) llb.State { - dir := "/go/src/" + repo - return func(ro ...llb.RunOption) llb.State { - es := s.Dir(dir).Run(ro...) - es.AddMount(dir, src, llb.Readonly) - return es.AddMount("/out", llb.Scratch()) - } -} - -func runc(version string) llb.State { - repo := "github.com/opencontainers/runc" - src := llb.Git(repo, version) - if version == "local" { - src = llb.Local("runc-src") - } - return goRepo(goBuildBase(), repo, src)( - llb.Shlex("go build -o /out/runc ./"), - ) -} - -func containerd(version string) llb.State { - repo := "github.com/containerd/containerd" - src := llb.Git(repo, version, llb.KeepGitDir()) - if version == "local" { - src = llb.Local("containerd-src") - } - return goRepo( - goBuildBase(). - Run(llb.Shlex("apk add --no-cache btrfs-progs-dev")).Root(), - repo, src)( - llb.Shlex("go build -o /out/containerd ./cmd/containerd"), - ) -} - -func buildkit(opt buildOpt) llb.State { - repo := "github.com/moby/buildkit" - src := llb.Git(repo, opt.buildkit) - if opt.buildkit == "local" { - src = llb.Local("buildkit-src") - } - run := goRepo(goBuildBase(), repo, src) - - buildkitdOCIWorkerOnly := run(llb.Shlex("go build -o /out/buildkitd.oci_only -tags no_containerd_worker ./cmd/buildkitd")) - - buildkitd := run(llb.Shlex("go build -o /out/buildkitd ./cmd/buildkitd")) - - buildctl := run(llb.Shlex("go build -o /out/buildctl ./cmd/buildctl")) - - r := llb.Scratch().With( - copyAll(buildctl, "/"), - copyAll(runc(opt.runc), "/"), - ) - - if opt.withContainerd { - return r.With( - copyAll(containerd(opt.containerd), "/"), - copyAll(buildkitd, "/")) - } - return r.With(copyAll(buildkitdOCIWorkerOnly, "/")) -} - -func copyAll(src llb.State, destPath string) llb.StateOption { - return copyFrom(src, "/.", destPath) -} - -// copyFrom has similar semantics as `COPY --from` -func copyFrom(src llb.State, srcPath, destPath string) llb.StateOption { - return func(s llb.State) llb.State { - return copy(src, srcPath, s, destPath) - } -} - -// copy copies files between 2 states using cp until there is no copyOp -func copy(src llb.State, srcPath string, dest llb.State, destPath string) llb.State { - cpImage := llb.Image("docker.io/library/alpine:latest@sha256:1072e499f3f655a032e88542330cf75b02e7bdf673278f701d7ba61629ee3ebe") - cp := cpImage.Run(llb.Shlexf("cp -a /src%s /dest%s", srcPath, destPath)) - cp.AddMount("/src", src, llb.Readonly) - return cp.AddMount("/dest", dest) -} diff --git a/vendor/github.com/moby/buildkit/examples/dockerfile2llb/main.go b/vendor/github.com/moby/buildkit/examples/dockerfile2llb/main.go deleted file mode 100644 index 9b31ea5d3656..000000000000 --- a/vendor/github.com/moby/buildkit/examples/dockerfile2llb/main.go +++ /dev/null @@ -1,45 +0,0 @@ -package main - -import ( - "flag" - "io/ioutil" - "log" - "os" - - "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/client/llb/imagemetaresolver" - "github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb" - "github.com/moby/buildkit/util/appcontext" -) - -type buildOpt struct { - target string -} - -func main() { - var opt buildOpt - flag.StringVar(&opt.target, "target", "", "target stage") - flag.Parse() - - df, err := ioutil.ReadAll(os.Stdin) - if err != nil { - panic(err) - } - - state, img, err := dockerfile2llb.Dockerfile2LLB(appcontext.Context(), df, dockerfile2llb.ConvertOpt{ - MetaResolver: imagemetaresolver.Default(), - Target: opt.target, - }) - if err != nil { - log.Printf("err: %+v", err) - panic(err) - } - - _ = img - - dt, err := state.Marshal() - if err != nil { - panic(err) - } - llb.WriteTo(dt, os.Stdout) -} diff --git a/vendor/github.com/moby/buildkit/examples/gobuild/main.go b/vendor/github.com/moby/buildkit/examples/gobuild/main.go deleted file mode 100644 index fdd52e2687d1..000000000000 --- a/vendor/github.com/moby/buildkit/examples/gobuild/main.go +++ /dev/null @@ -1,96 +0,0 @@ -// +build ignore - -package main - -import ( - "os" - - "github.com/moby/buildkit/client/llb" - gobuild "github.com/tonistiigi/llb-gobuild" -) - -func main() { - if err := run(); err != nil { - panic(err) - } -} - -func run() error { - src := llb.Local("src") - - gb := gobuild.New(nil) - // gb := gobuild.New(&gobuild.Opt{DevMode: true}) - - buildctl, err := gb.BuildExe(gobuild.BuildOpt{ - Source: src, - MountPath: "/go/src/github.com/moby/buildkit", - Pkg: "github.com/moby/buildkit/cmd/buildctl", - BuildTags: []string{}, - }) - if err != nil { - return err - } - - buildkitd, err := gb.BuildExe(gobuild.BuildOpt{ - Source: src, - MountPath: "/go/src/github.com/moby/buildkit", - Pkg: "github.com/moby/buildkit/cmd/buildkitd", - BuildTags: []string{"no_containerd_worker"}, - }) - if err != nil { - return err - } - _ = buildkitd - - containerd, err := gb.BuildExe(gobuild.BuildOpt{ - Source: llb.Git("github.com/containerd/containerd", "v1.2.0-rc.1"), - MountPath: "/go/src/github.com/containerd/containerd", - Pkg: "github.com/containerd/containerd/cmd/containerd", - BuildTags: []string{"no_btrfs"}, - }) - if err != nil { - return err - } - runc, err := gb.BuildExe(gobuild.BuildOpt{ - CgoEnabled: true, - Source: llb.Git("github.com/opencontainers/runc", "master"), - MountPath: "/go/src/github.com/opencontainers/runc", - Pkg: "github.com/opencontainers/runc", - BuildTags: []string{}, - }) - if err != nil { - return err - } - - sc := llb.Scratch(). - With(copyAll(*buildctl, "/")). - With(copyAll(*containerd, "/")). - // With(copyAll(*buildkitd, "/")). - With(copyAll(*runc, "/")) - - dt, err := sc.Marshal(llb.LinuxAmd64) - if err != nil { - panic(err) - } - llb.WriteTo(dt, os.Stdout) - return nil -} - -func copyAll(src llb.State, destPath string) llb.StateOption { - return copyFrom(src, "/.", destPath) -} - -// copyFrom has similar semantics as `COPY --from` -func copyFrom(src llb.State, srcPath, destPath string) llb.StateOption { - return func(s llb.State) llb.State { - return copy(src, srcPath, s, destPath) - } -} - -// copy copies files between 2 states using cp until there is no copyOp -func copy(src llb.State, srcPath string, dest llb.State, destPath string) llb.State { - cpImage := llb.Image("docker.io/library/alpine@sha256:1072e499f3f655a032e88542330cf75b02e7bdf673278f701d7ba61629ee3ebe") - cp := cpImage.Run(llb.Shlexf("cp -a /src%s /dest%s", srcPath, destPath)) - cp.AddMount("/src", src, llb.Readonly) - return cp.AddMount("/dest", dest) -} diff --git a/vendor/github.com/moby/buildkit/examples/nested-llb/main.go b/vendor/github.com/moby/buildkit/examples/nested-llb/main.go deleted file mode 100644 index f5379ecd88ae..000000000000 --- a/vendor/github.com/moby/buildkit/examples/nested-llb/main.go +++ /dev/null @@ -1,39 +0,0 @@ -package main - -import ( - "os" - - "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/client/llb/llbbuild" - "github.com/moby/buildkit/util/system" -) - -const url = "https://gist.githubusercontent.com/tonistiigi/03b4049f8cc3de059bd2a1a1d8643714/raw/b5960995d570d8c6d94db527e805edc6d5854268/buildprs.go" - -func main() { - build := goBuildBase(). - Run(llb.Shlex("apk add --no-cache curl")). - Run(llb.Shlexf("curl -o /buildprs.go \"%s\"", url)) - - buildkitRepo := "github.com/moby/buildkit" - - build = build.Run(llb.Shlex("sh -c \"go run /buildprs.go > /out/buildkit.llb.definition\"")) - build.AddMount("/go/src/"+buildkitRepo, llb.Git(buildkitRepo, "master")) - pb := build.AddMount("/out", llb.Scratch()) - - built := pb.With(llbbuild.Build()) - - dt, err := llb.Image("docker.io/library/alpine:latest").Run(llb.Shlex("ls -l /out"), llb.AddMount("/out", built, llb.Readonly)).Marshal(llb.LinuxAmd64) - if err != nil { - panic(err) - } - llb.WriteTo(dt, os.Stdout) -} - -func goBuildBase() llb.State { - goAlpine := llb.Image("docker.io/library/golang:1.11-alpine") - return goAlpine. - AddEnv("PATH", "/usr/local/go/bin:"+system.DefaultPathEnv). - AddEnv("GOPATH", "/go"). - Run(llb.Shlex("apk add --no-cache g++ linux-headers make")).Root() -} diff --git a/vendor/github.com/moby/buildkit/executor/containerdexecutor/executor.go b/vendor/github.com/moby/buildkit/executor/containerdexecutor/executor.go deleted file mode 100644 index ed6553aa604f..000000000000 --- a/vendor/github.com/moby/buildkit/executor/containerdexecutor/executor.go +++ /dev/null @@ -1,187 +0,0 @@ -package containerdexecutor - -import ( - "context" - "io" - "os" - "path/filepath" - "strings" - "syscall" - "time" - - "github.com/containerd/containerd" - "github.com/containerd/containerd/cio" - "github.com/containerd/containerd/contrib/seccomp" - containerdoci "github.com/containerd/containerd/oci" - "github.com/moby/buildkit/cache" - "github.com/moby/buildkit/executor" - "github.com/moby/buildkit/executor/oci" - "github.com/moby/buildkit/identity" - "github.com/moby/buildkit/snapshot" - "github.com/moby/buildkit/solver/pb" - "github.com/moby/buildkit/util/network" - "github.com/moby/buildkit/util/system" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -type containerdExecutor struct { - client *containerd.Client - root string - networkProviders map[pb.NetMode]network.Provider - cgroupParent string -} - -// New creates a new executor backed by connection to containerd API -func New(client *containerd.Client, root, cgroup string, networkProviders map[pb.NetMode]network.Provider) executor.Executor { - // clean up old hosts/resolv.conf file. ignore errors - os.RemoveAll(filepath.Join(root, "hosts")) - os.RemoveAll(filepath.Join(root, "resolv.conf")) - - return containerdExecutor{ - client: client, - root: root, - networkProviders: networkProviders, - cgroupParent: cgroup, - } -} - -func (w containerdExecutor) Exec(ctx context.Context, meta executor.Meta, root cache.Mountable, mounts []executor.Mount, stdin io.ReadCloser, stdout, stderr io.WriteCloser) (err error) { - id := identity.NewID() - - resolvConf, err := oci.GetResolvConf(ctx, w.root) - if err != nil { - return err - } - - hostsFile, clean, err := oci.GetHostsFile(ctx, w.root, meta.ExtraHosts) - if err != nil { - return err - } - if clean != nil { - defer clean() - } - - mountable, err := root.Mount(ctx, false) - if err != nil { - return err - } - - rootMounts, err := mountable.Mount() - if err != nil { - return err - } - defer mountable.Release() - - var sgids []uint32 - uid, gid, err := oci.ParseUIDGID(meta.User) - if err != nil { - lm := snapshot.LocalMounterWithMounts(rootMounts) - rootfsPath, err := lm.Mount() - if err != nil { - return err - } - uid, gid, sgids, err = oci.GetUser(ctx, rootfsPath, meta.User) - if err != nil { - lm.Unmount() - return err - } - lm.Unmount() - } - - provider, ok := w.networkProviders[meta.NetMode] - if !ok { - return errors.Errorf("unknown network mode %s", meta.NetMode) - } - namespace, err := provider.New() - if err != nil { - return err - } - defer namespace.Close() - - if meta.NetMode == pb.NetMode_HOST { - logrus.Info("enabling HostNetworking") - } - - opts := []containerdoci.SpecOpts{oci.WithUIDGID(uid, gid, sgids)} - if meta.ReadonlyRootFS { - opts = append(opts, containerdoci.WithRootFSReadonly()) - } - if system.SeccompSupported() { - opts = append(opts, seccomp.WithDefaultProfile()) - } - if w.cgroupParent != "" { - var cgroupsPath string - lastSeparator := w.cgroupParent[len(w.cgroupParent)-1:] - if strings.Contains(w.cgroupParent, ".slice") && lastSeparator == ":" { - cgroupsPath = w.cgroupParent + id - } else { - cgroupsPath = filepath.Join("/", w.cgroupParent, "buildkit", id) - } - opts = append(opts, containerdoci.WithCgroup(cgroupsPath)) - } - spec, cleanup, err := oci.GenerateSpec(ctx, meta, mounts, id, resolvConf, hostsFile, namespace, opts...) - if err != nil { - return err - } - defer cleanup() - - container, err := w.client.NewContainer(ctx, id, - containerd.WithSpec(spec), - ) - if err != nil { - return err - } - - defer func() { - if err1 := container.Delete(context.TODO()); err == nil && err1 != nil { - err = errors.Wrapf(err1, "failed to delete container %s", id) - } - }() - - task, err := container.NewTask(ctx, cio.NewCreator(cio.WithStreams(stdin, stdout, stderr)), containerd.WithRootFS(rootMounts)) - if err != nil { - return err - } - defer func() { - if _, err1 := task.Delete(context.TODO()); err == nil && err1 != nil { - err = errors.Wrapf(err1, "failed to delete task %s", id) - } - }() - - if err := task.Start(ctx); err != nil { - return err - } - - statusCh, err := task.Wait(context.Background()) - if err != nil { - return err - } - - var cancel func() - ctxDone := ctx.Done() - for { - select { - case <-ctxDone: - ctxDone = nil - var killCtx context.Context - killCtx, cancel = context.WithTimeout(context.Background(), 10*time.Second) - task.Kill(killCtx, syscall.SIGKILL) - case status := <-statusCh: - if cancel != nil { - cancel() - } - if status.ExitCode() != 0 { - err := errors.Errorf("process returned non-zero exit code: %d", status.ExitCode()) - select { - case <-ctx.Done(): - err = errors.Wrap(ctx.Err(), err.Error()) - default: - } - return err - } - return nil - } - } - -} diff --git a/vendor/github.com/moby/buildkit/executor/executor.go b/vendor/github.com/moby/buildkit/executor/executor.go deleted file mode 100644 index 91ae976faa9a..000000000000 --- a/vendor/github.com/moby/buildkit/executor/executor.go +++ /dev/null @@ -1,38 +0,0 @@ -package executor - -import ( - "context" - "io" - "net" - - "github.com/moby/buildkit/cache" - "github.com/moby/buildkit/solver/pb" -) - -type Meta struct { - Args []string - Env []string - User string - Cwd string - Tty bool - ReadonlyRootFS bool - ExtraHosts []HostIP - NetMode pb.NetMode -} - -type Mount struct { - Src cache.Mountable - Selector string - Dest string - Readonly bool -} - -type Executor interface { - // TODO: add stdout/err - Exec(ctx context.Context, meta Meta, rootfs cache.Mountable, mounts []Mount, stdin io.ReadCloser, stdout, stderr io.WriteCloser) error -} - -type HostIP struct { - Host string - IP net.IP -} diff --git a/vendor/github.com/moby/buildkit/executor/oci/hosts.go b/vendor/github.com/moby/buildkit/executor/oci/hosts.go deleted file mode 100644 index c350a6de27c2..000000000000 --- a/vendor/github.com/moby/buildkit/executor/oci/hosts.go +++ /dev/null @@ -1,69 +0,0 @@ -package oci - -import ( - "bytes" - "context" - "fmt" - "io/ioutil" - "os" - "path/filepath" - - "github.com/moby/buildkit/executor" - "github.com/moby/buildkit/identity" -) - -const hostsContent = ` -127.0.0.1 localhost buildkitsandbox -::1 localhost ip6-localhost ip6-loopback -` - -func GetHostsFile(ctx context.Context, stateDir string, extraHosts []executor.HostIP) (string, func(), error) { - if len(extraHosts) == 0 { - _, err := g.Do(ctx, stateDir, func(ctx context.Context) (interface{}, error) { - _, _, err := makeHostsFile(stateDir, nil) - return nil, err - }) - if err != nil { - return "", nil, err - } - return filepath.Join(stateDir, "hosts"), func() {}, nil - } - return makeHostsFile(stateDir, extraHosts) -} - -func makeHostsFile(stateDir string, extraHosts []executor.HostIP) (string, func(), error) { - p := filepath.Join(stateDir, "hosts") - if len(extraHosts) != 0 { - p += "." + identity.NewID() - } - _, err := os.Stat(p) - if err == nil { - return "", func() {}, nil - } - if !os.IsNotExist(err) { - return "", nil, err - } - - b := &bytes.Buffer{} - - if _, err := b.Write([]byte(hostsContent)); err != nil { - return "", nil, err - } - - for _, h := range extraHosts { - if _, err := b.Write([]byte(fmt.Sprintf("%s\t%s\n", h.IP.String(), h.Host))); err != nil { - return "", nil, err - } - } - - if err := ioutil.WriteFile(p+".tmp", b.Bytes(), 0644); err != nil { - return "", nil, err - } - - if err := os.Rename(p+".tmp", p); err != nil { - return "", nil, err - } - return p, func() { - os.RemoveAll(p) - }, nil -} diff --git a/vendor/github.com/moby/buildkit/executor/oci/mounts.go b/vendor/github.com/moby/buildkit/executor/oci/mounts.go deleted file mode 100644 index a0fe8a9f925e..000000000000 --- a/vendor/github.com/moby/buildkit/executor/oci/mounts.go +++ /dev/null @@ -1,68 +0,0 @@ -package oci - -import ( - "context" - - specs "github.com/opencontainers/runtime-spec/specs-go" -) - -// MountOpts sets oci spec specific info for mount points -type MountOpts func([]specs.Mount) []specs.Mount - -//GetMounts returns default required for buildkit -// https://github.com/moby/buildkit/issues/429 -func GetMounts(ctx context.Context, mountOpts ...MountOpts) []specs.Mount { - mounts := []specs.Mount{ - { - Destination: "/proc", - Type: "proc", - Source: "proc", - }, - { - Destination: "/dev", - Type: "tmpfs", - Source: "tmpfs", - Options: []string{"nosuid", "strictatime", "mode=755", "size=65536k"}, - }, - { - Destination: "/dev/pts", - Type: "devpts", - Source: "devpts", - Options: []string{"nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5"}, - }, - { - Destination: "/dev/shm", - Type: "tmpfs", - Source: "shm", - Options: []string{"nosuid", "noexec", "nodev", "mode=1777", "size=65536k"}, - }, - { - Destination: "/dev/mqueue", - Type: "mqueue", - Source: "mqueue", - Options: []string{"nosuid", "noexec", "nodev"}, - }, - { - Destination: "/sys", - Type: "sysfs", - Source: "sysfs", - Options: []string{"nosuid", "noexec", "nodev", "ro"}, - }, - } - for _, o := range mountOpts { - mounts = o(mounts) - } - return mounts -} - -func withROBind(src, dest string) func(m []specs.Mount) []specs.Mount { - return func(m []specs.Mount) []specs.Mount { - m = append(m, specs.Mount{ - Destination: dest, - Type: "bind", - Source: src, - Options: []string{"rbind", "ro"}, - }) - return m - } -} diff --git a/vendor/github.com/moby/buildkit/executor/oci/resolvconf.go b/vendor/github.com/moby/buildkit/executor/oci/resolvconf.go deleted file mode 100644 index f22eceed2230..000000000000 --- a/vendor/github.com/moby/buildkit/executor/oci/resolvconf.go +++ /dev/null @@ -1,81 +0,0 @@ -package oci - -import ( - "context" - "io/ioutil" - "os" - "path/filepath" - - "github.com/docker/libnetwork/resolvconf" - "github.com/moby/buildkit/util/flightcontrol" -) - -var g flightcontrol.Group -var notFirstRun bool -var lastNotEmpty bool - -func GetResolvConf(ctx context.Context, stateDir string) (string, error) { - p := filepath.Join(stateDir, "resolv.conf") - _, err := g.Do(ctx, stateDir, func(ctx context.Context) (interface{}, error) { - generate := !notFirstRun - notFirstRun = true - - if !generate { - fi, err := os.Stat(p) - if err != nil { - if !os.IsNotExist(err) { - return "", err - } - generate = true - } - if !generate { - fiMain, err := os.Stat("/etc/resolv.conf") - if err != nil { - if !os.IsNotExist(err) { - return nil, err - } - if lastNotEmpty { - generate = true - lastNotEmpty = false - } - } else { - if fi.ModTime().Before(fiMain.ModTime()) { - generate = true - } - } - } - } - - if !generate { - return "", nil - } - - var dt []byte - f, err := resolvconf.Get() - if err != nil { - if !os.IsNotExist(err) { - return "", err - } - } else { - dt = f.Content - } - - f, err = resolvconf.FilterResolvDNS(dt, true) - if err != nil { - return "", err - } - - if err := ioutil.WriteFile(p+".tmp", f.Content, 0644); err != nil { - return "", err - } - - if err := os.Rename(p+".tmp", p); err != nil { - return "", err - } - return "", nil - }) - if err != nil { - return "", err - } - return p, nil -} diff --git a/vendor/github.com/moby/buildkit/executor/oci/spec_unix.go b/vendor/github.com/moby/buildkit/executor/oci/spec_unix.go deleted file mode 100644 index 3e1976a690ad..000000000000 --- a/vendor/github.com/moby/buildkit/executor/oci/spec_unix.go +++ /dev/null @@ -1,186 +0,0 @@ -// +build !windows - -package oci - -import ( - "context" - "path" - "sync" - - "github.com/containerd/containerd/containers" - "github.com/containerd/containerd/mount" - "github.com/containerd/containerd/namespaces" - "github.com/containerd/containerd/oci" - "github.com/containerd/continuity/fs" - "github.com/mitchellh/hashstructure" - "github.com/moby/buildkit/executor" - "github.com/moby/buildkit/snapshot" - "github.com/moby/buildkit/util/network" - specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" -) - -// Ideally we don't have to import whole containerd just for the default spec - -// GenerateSpec generates spec using containerd functionality. -func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mount, id, resolvConf, hostsFile string, namespace network.Namespace, opts ...oci.SpecOpts) (*specs.Spec, func(), error) { - c := &containers.Container{ - ID: id, - } - _, ok := namespaces.Namespace(ctx) - if !ok { - ctx = namespaces.WithNamespace(ctx, "buildkit") - } - - // Note that containerd.GenerateSpec is namespaced so as to make - // specs.Linux.CgroupsPath namespaced - s, err := oci.GenerateSpec(ctx, nil, c, opts...) - if err != nil { - return nil, nil, err - } - // set the networking information on the spec - namespace.Set(s) - - s.Process.Args = meta.Args - s.Process.Env = meta.Env - s.Process.Cwd = meta.Cwd - s.Process.Rlimits = nil // reset open files limit - s.Hostname = "buildkitsandbox" - - s.Mounts = GetMounts(ctx, - withROBind(resolvConf, "/etc/resolv.conf"), - withROBind(hostsFile, "/etc/hosts"), - ) - - s.Mounts = append(s.Mounts, specs.Mount{ - Destination: "/sys/fs/cgroup", - Type: "cgroup", - Source: "cgroup", - Options: []string{"ro", "nosuid", "noexec", "nodev"}, - }) - - // TODO: User - - sm := &submounts{} - - var releasers []func() error - releaseAll := func() { - sm.cleanup() - for _, f := range releasers { - f() - } - } - - for _, m := range mounts { - if m.Src == nil { - return nil, nil, errors.Errorf("mount %s has no source", m.Dest) - } - mountable, err := m.Src.Mount(ctx, m.Readonly) - if err != nil { - releaseAll() - return nil, nil, errors.Wrapf(err, "failed to mount %s", m.Dest) - } - mounts, err := mountable.Mount() - if err != nil { - releaseAll() - return nil, nil, errors.WithStack(err) - } - releasers = append(releasers, mountable.Release) - for _, mount := range mounts { - mount, err = sm.subMount(mount, m.Selector) - if err != nil { - releaseAll() - return nil, nil, err - } - s.Mounts = append(s.Mounts, specs.Mount{ - Destination: m.Dest, - Type: mount.Type, - Source: mount.Source, - Options: mount.Options, - }) - } - } - - return s, releaseAll, nil -} - -type mountRef struct { - mount mount.Mount - unmount func() error -} - -type submounts struct { - m map[uint64]mountRef -} - -func (s *submounts) subMount(m mount.Mount, subPath string) (mount.Mount, error) { - if path.Join("/", subPath) == "/" { - return m, nil - } - if s.m == nil { - s.m = map[uint64]mountRef{} - } - h, err := hashstructure.Hash(m, nil) - if err != nil { - return mount.Mount{}, nil - } - if mr, ok := s.m[h]; ok { - sm, err := sub(mr.mount, subPath) - if err != nil { - return mount.Mount{}, nil - } - return sm, nil - } - - lm := snapshot.LocalMounterWithMounts([]mount.Mount{m}) - - mp, err := lm.Mount() - if err != nil { - return mount.Mount{}, err - } - - opts := []string{"rbind"} - for _, opt := range m.Options { - if opt == "ro" { - opts = append(opts, opt) - } - } - - s.m[h] = mountRef{ - mount: mount.Mount{ - Source: mp, - Type: "bind", - Options: opts, - }, - unmount: lm.Unmount, - } - - sm, err := sub(s.m[h].mount, subPath) - if err != nil { - return mount.Mount{}, err - } - return sm, nil -} - -func (s *submounts) cleanup() { - var wg sync.WaitGroup - wg.Add(len(s.m)) - for _, m := range s.m { - func(m mountRef) { - go func() { - m.unmount() - wg.Done() - }() - }(m) - } - wg.Wait() -} - -func sub(m mount.Mount, subPath string) (mount.Mount, error) { - src, err := fs.RootPath(m.Source, subPath) - if err != nil { - return mount.Mount{}, err - } - m.Source = src - return m, nil -} diff --git a/vendor/github.com/moby/buildkit/executor/oci/user.go b/vendor/github.com/moby/buildkit/executor/oci/user.go deleted file mode 100644 index ac5dbebdf294..000000000000 --- a/vendor/github.com/moby/buildkit/executor/oci/user.go +++ /dev/null @@ -1,107 +0,0 @@ -package oci - -import ( - "context" - "errors" - "os" - "strconv" - "strings" - - "github.com/containerd/containerd/containers" - containerdoci "github.com/containerd/containerd/oci" - "github.com/containerd/continuity/fs" - "github.com/opencontainers/runc/libcontainer/user" - "github.com/opencontainers/runtime-spec/specs-go" -) - -func GetUser(ctx context.Context, root, username string) (uint32, uint32, []uint32, error) { - // fast path from uid/gid - if uid, gid, err := ParseUIDGID(username); err == nil { - return uid, gid, nil, nil - } - - passwdPath, err := user.GetPasswdPath() - if err != nil { - return 0, 0, nil, err - } - groupPath, err := user.GetGroupPath() - if err != nil { - return 0, 0, nil, err - } - passwdFile, err := openUserFile(root, passwdPath) - if err == nil { - defer passwdFile.Close() - } - groupFile, err := openUserFile(root, groupPath) - if err == nil { - defer groupFile.Close() - } - - execUser, err := user.GetExecUser(username, nil, passwdFile, groupFile) - if err != nil { - return 0, 0, nil, err - } - var sgids []uint32 - for _, g := range execUser.Sgids { - sgids = append(sgids, uint32(g)) - } - return uint32(execUser.Uid), uint32(execUser.Gid), sgids, nil -} - -// ParseUIDGID takes the fast path to parse UID and GID if and only if they are both provided -func ParseUIDGID(str string) (uid uint32, gid uint32, err error) { - if str == "" { - return 0, 0, nil - } - parts := strings.SplitN(str, ":", 2) - if len(parts) == 1 { - return 0, 0, errors.New("groups ID is not provided") - } - if uid, err = parseUID(parts[0]); err != nil { - return 0, 0, err - } - if gid, err = parseUID(parts[1]); err != nil { - return 0, 0, err - } - return -} - -func openUserFile(root, p string) (*os.File, error) { - p, err := fs.RootPath(root, p) - if err != nil { - return nil, err - } - return os.Open(p) -} - -func parseUID(str string) (uint32, error) { - if str == "root" { - return 0, nil - } - uid, err := strconv.ParseUint(str, 10, 32) - if err != nil { - return 0, err - } - return uint32(uid), nil -} - -// WithUIDGID allows the UID and GID for the Process to be set -// FIXME: This is a temporeray fix for the missing supplementary GIDs from containerd -// once the PR in containerd is merged we should remove this function. -func WithUIDGID(uid, gid uint32, sgids []uint32) containerdoci.SpecOpts { - return func(_ context.Context, _ containerdoci.Client, _ *containers.Container, s *containerdoci.Spec) error { - setProcess(s) - s.Process.User.UID = uid - s.Process.User.GID = gid - s.Process.User.AdditionalGids = sgids - return nil - } -} - -// setProcess sets Process to empty if unset -// FIXME: Same on this one. Need to be removed after containerd fix merged -func setProcess(s *containerdoci.Spec) { - if s.Process == nil { - s.Process = &specs.Process{} - } -} diff --git a/vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go b/vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go deleted file mode 100644 index 8080f5714cdc..000000000000 --- a/vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go +++ /dev/null @@ -1,323 +0,0 @@ -package runcexecutor - -import ( - "context" - "encoding/json" - "io" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "strconv" - "strings" - "syscall" - "time" - - "github.com/containerd/containerd/contrib/seccomp" - "github.com/containerd/containerd/mount" - containerdoci "github.com/containerd/containerd/oci" - "github.com/containerd/continuity/fs" - runc "github.com/containerd/go-runc" - "github.com/moby/buildkit/cache" - "github.com/moby/buildkit/executor" - "github.com/moby/buildkit/executor/oci" - "github.com/moby/buildkit/identity" - "github.com/moby/buildkit/solver/pb" - "github.com/moby/buildkit/util/network" - rootlessspecconv "github.com/moby/buildkit/util/rootless/specconv" - "github.com/moby/buildkit/util/system" - specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -type Opt struct { - // root directory - Root string - CommandCandidates []string - // without root privileges (has nothing to do with Opt.Root directory) - Rootless bool - // DefaultCgroupParent is the cgroup-parent name for executor - DefaultCgroupParent string -} - -var defaultCommandCandidates = []string{"buildkit-runc", "runc"} - -type runcExecutor struct { - runc *runc.Runc - root string - cmd string - cgroupParent string - rootless bool - networkProviders map[pb.NetMode]network.Provider -} - -func New(opt Opt, networkProviders map[pb.NetMode]network.Provider) (executor.Executor, error) { - cmds := opt.CommandCandidates - if cmds == nil { - cmds = defaultCommandCandidates - } - - var cmd string - var found bool - for _, cmd = range cmds { - if _, err := exec.LookPath(cmd); err == nil { - found = true - break - } - } - if !found { - return nil, errors.Errorf("failed to find %s binary", cmd) - } - - root := opt.Root - - if err := os.MkdirAll(root, 0700); err != nil { - return nil, errors.Wrapf(err, "failed to create %s", root) - } - - root, err := filepath.Abs(root) - if err != nil { - return nil, err - } - root, err = filepath.EvalSymlinks(root) - if err != nil { - return nil, err - } - - // clean up old hosts/resolv.conf file. ignore errors - os.RemoveAll(filepath.Join(root, "hosts")) - os.RemoveAll(filepath.Join(root, "resolv.conf")) - - runtime := &runc.Runc{ - Command: cmd, - Log: filepath.Join(root, "runc-log.json"), - LogFormat: runc.JSON, - PdeathSignal: syscall.SIGKILL, // this can still leak the process - Setpgid: true, - // we don't execute runc with --rootless=(true|false) explicitly, - // so as to support non-runc runtimes - } - - w := &runcExecutor{ - runc: runtime, - root: root, - cgroupParent: opt.DefaultCgroupParent, - rootless: opt.Rootless, - networkProviders: networkProviders, - } - return w, nil -} - -func (w *runcExecutor) Exec(ctx context.Context, meta executor.Meta, root cache.Mountable, mounts []executor.Mount, stdin io.ReadCloser, stdout, stderr io.WriteCloser) error { - provider, ok := w.networkProviders[meta.NetMode] - if !ok { - return errors.Errorf("unknown network mode %s", meta.NetMode) - } - namespace, err := provider.New() - if err != nil { - return err - } - defer namespace.Close() - - if meta.NetMode == pb.NetMode_HOST { - logrus.Info("enabling HostNetworking") - } - - resolvConf, err := oci.GetResolvConf(ctx, w.root) - if err != nil { - return err - } - - hostsFile, clean, err := oci.GetHostsFile(ctx, w.root, meta.ExtraHosts) - if err != nil { - return err - } - if clean != nil { - defer clean() - } - - mountable, err := root.Mount(ctx, false) - if err != nil { - return err - } - - rootMount, err := mountable.Mount() - if err != nil { - return err - } - defer mountable.Release() - - id := identity.NewID() - bundle := filepath.Join(w.root, id) - - if err := os.Mkdir(bundle, 0700); err != nil { - return err - } - defer os.RemoveAll(bundle) - rootFSPath := filepath.Join(bundle, "rootfs") - if err := os.Mkdir(rootFSPath, 0700); err != nil { - return err - } - if err := mount.All(rootMount, rootFSPath); err != nil { - return err - } - defer mount.Unmount(rootFSPath, 0) - - uid, gid, sgids, err := oci.GetUser(ctx, rootFSPath, meta.User) - if err != nil { - return err - } - - f, err := os.Create(filepath.Join(bundle, "config.json")) - if err != nil { - return err - } - defer f.Close() - - opts := []containerdoci.SpecOpts{oci.WithUIDGID(uid, gid, sgids)} - if system.SeccompSupported() { - opts = append(opts, seccomp.WithDefaultProfile()) - } - if meta.ReadonlyRootFS { - opts = append(opts, containerdoci.WithRootFSReadonly()) - } - - if w.cgroupParent != "" { - var cgroupsPath string - lastSeparator := w.cgroupParent[len(w.cgroupParent)-1:] - if strings.Contains(w.cgroupParent, ".slice") && lastSeparator == ":" { - cgroupsPath = w.cgroupParent + id - } else { - cgroupsPath = filepath.Join("/", w.cgroupParent, "buildkit", id) - } - opts = append(opts, containerdoci.WithCgroup(cgroupsPath)) - } - spec, cleanup, err := oci.GenerateSpec(ctx, meta, mounts, id, resolvConf, hostsFile, namespace, opts...) - if err != nil { - return err - } - defer cleanup() - - spec.Root.Path = rootFSPath - if _, ok := root.(cache.ImmutableRef); ok { // TODO: pass in with mount, not ref type - spec.Root.Readonly = true - } - - newp, err := fs.RootPath(rootFSPath, meta.Cwd) - if err != nil { - return errors.Wrapf(err, "working dir %s points to invalid target", newp) - } - if err := os.MkdirAll(newp, 0755); err != nil { - return errors.Wrapf(err, "failed to create working directory %s", newp) - } - - if err := setOOMScoreAdj(spec); err != nil { - return err - } - if w.rootless { - if err := rootlessspecconv.ToRootless(spec); err != nil { - return err - } - } - - if err := json.NewEncoder(f).Encode(spec); err != nil { - return err - } - - // runCtx/killCtx is used for extra check in case the kill command blocks - runCtx, cancelRun := context.WithCancel(context.Background()) - defer cancelRun() - - done := make(chan struct{}) - go func() { - for { - select { - case <-ctx.Done(): - killCtx, timeout := context.WithTimeout(context.Background(), 7*time.Second) - if err := w.runc.Kill(killCtx, id, int(syscall.SIGKILL), nil); err != nil { - logrus.Errorf("failed to kill runc %s: %+v", id, err) - select { - case <-killCtx.Done(): - timeout() - cancelRun() - return - default: - } - } - timeout() - select { - case <-time.After(50 * time.Millisecond): - case <-done: - return - } - case <-done: - return - } - } - }() - - logrus.Debugf("> creating %s %v", id, meta.Args) - status, err := w.runc.Run(runCtx, id, bundle, &runc.CreateOpts{ - IO: &forwardIO{stdin: stdin, stdout: stdout, stderr: stderr}, - }) - close(done) - if err != nil { - return err - } - - if status != 0 { - err := errors.Errorf("exit code: %d", status) - select { - case <-ctx.Done(): - return errors.Wrapf(ctx.Err(), err.Error()) - default: - return err - } - } - - return nil -} - -type forwardIO struct { - stdin io.ReadCloser - stdout, stderr io.WriteCloser -} - -func (s *forwardIO) Close() error { - return nil -} - -func (s *forwardIO) Set(cmd *exec.Cmd) { - cmd.Stdin = s.stdin - cmd.Stdout = s.stdout - cmd.Stderr = s.stderr -} - -func (s *forwardIO) Stdin() io.WriteCloser { - return nil -} - -func (s *forwardIO) Stdout() io.ReadCloser { - return nil -} - -func (s *forwardIO) Stderr() io.ReadCloser { - return nil -} - -// setOOMScoreAdj comes from https://github.com/genuinetools/img/blob/2fabe60b7dc4623aa392b515e013bbc69ad510ab/executor/runc/executor.go#L182-L192 -func setOOMScoreAdj(spec *specs.Spec) error { - // Set the oom_score_adj of our children containers to that of the current process. - b, err := ioutil.ReadFile("/proc/self/oom_score_adj") - if err != nil { - return errors.Wrap(err, "failed to read /proc/self/oom_score_adj") - } - s := strings.TrimSpace(string(b)) - oom, err := strconv.Atoi(s) - if err != nil { - return errors.Wrapf(err, "failed to parse %s as int", s) - } - spec.Process.OOMScoreAdj = &oom - return nil -} diff --git a/vendor/github.com/moby/buildkit/exporter/containerimage/export.go b/vendor/github.com/moby/buildkit/exporter/containerimage/export.go deleted file mode 100644 index a440f2d04910..000000000000 --- a/vendor/github.com/moby/buildkit/exporter/containerimage/export.go +++ /dev/null @@ -1,159 +0,0 @@ -package containerimage - -import ( - "context" - "strconv" - "strings" - "time" - - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/images" - "github.com/moby/buildkit/exporter" - "github.com/moby/buildkit/session" - "github.com/moby/buildkit/util/push" - "github.com/moby/buildkit/util/resolver" - "github.com/pkg/errors" -) - -const ( - keyImageName = "name" - keyPush = "push" - keyInsecure = "registry.insecure" - ociTypes = "oci-mediatypes" -) - -type Opt struct { - SessionManager *session.Manager - ImageWriter *ImageWriter - Images images.Store - ResolverOpt resolver.ResolveOptionsFunc -} - -type imageExporter struct { - opt Opt -} - -// New returns a new containerimage exporter instance that supports exporting -// to an image store and pushing the image to registry. -// This exporter supports following values in returned kv map: -// - containerimage.digest - The digest of the root manifest for the image. -func New(opt Opt) (exporter.Exporter, error) { - im := &imageExporter{opt: opt} - return im, nil -} - -func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exporter.ExporterInstance, error) { - i := &imageExporterInstance{imageExporter: e} - for k, v := range opt { - switch k { - case keyImageName: - i.targetName = v - case keyPush: - if v == "" { - i.push = true - continue - } - b, err := strconv.ParseBool(v) - if err != nil { - return nil, errors.Wrapf(err, "non-bool value specified for %s", k) - } - i.push = b - case keyInsecure: - if v == "" { - i.insecure = true - continue - } - b, err := strconv.ParseBool(v) - if err != nil { - return nil, errors.Wrapf(err, "non-bool value specified for %s", k) - } - i.insecure = b - case ociTypes: - if v == "" { - i.ociTypes = true - continue - } - b, err := strconv.ParseBool(v) - if err != nil { - return nil, errors.Wrapf(err, "non-bool value specified for %s", k) - } - i.ociTypes = b - default: - if i.meta == nil { - i.meta = make(map[string][]byte) - } - i.meta[k] = []byte(v) - } - } - return i, nil -} - -type imageExporterInstance struct { - *imageExporter - targetName string - push bool - insecure bool - ociTypes bool - meta map[string][]byte -} - -func (e *imageExporterInstance) Name() string { - return "exporting to image" -} - -func (e *imageExporterInstance) Export(ctx context.Context, src exporter.Source) (map[string]string, error) { - if src.Metadata == nil { - src.Metadata = make(map[string][]byte) - } - for k, v := range e.meta { - src.Metadata[k] = v - } - desc, err := e.opt.ImageWriter.Commit(ctx, src, e.ociTypes) - if err != nil { - return nil, err - } - - defer func() { - e.opt.ImageWriter.ContentStore().Delete(context.TODO(), desc.Digest) - }() - - resp := make(map[string]string) - - if n, ok := src.Metadata["image.name"]; e.targetName == "*" && ok { - e.targetName = string(n) - } - - if e.targetName != "" { - targetNames := strings.Split(e.targetName, ",") - for _, targetName := range targetNames { - if e.opt.Images != nil { - tagDone := oneOffProgress(ctx, "naming to "+targetName) - img := images.Image{ - Name: targetName, - Target: *desc, - CreatedAt: time.Now(), - } - - if _, err := e.opt.Images.Update(ctx, img); err != nil { - if !errdefs.IsNotFound(err) { - return nil, tagDone(err) - } - - if _, err := e.opt.Images.Create(ctx, img); err != nil { - return nil, tagDone(err) - } - } - tagDone(nil) - } - if e.push { - if err := push.Push(ctx, e.opt.SessionManager, e.opt.ImageWriter.ContentStore(), desc.Digest, targetName, e.insecure, e.opt.ResolverOpt); err != nil { - return nil, err - } - } - } - resp["image.name"] = e.targetName - } - - resp["containerimage.digest"] = desc.Digest.String() - return resp, nil -} diff --git a/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/types.go b/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/types.go deleted file mode 100644 index 9821f377d944..000000000000 --- a/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/types.go +++ /dev/null @@ -1,15 +0,0 @@ -package exptypes - -import specs "github.com/opencontainers/image-spec/specs-go/v1" - -const ExporterImageConfigKey = "containerimage.config" -const ExporterPlatformsKey = "refs.platforms" - -type Platforms struct { - Platforms []Platform -} - -type Platform struct { - ID string - Platform specs.Platform -} diff --git a/vendor/github.com/moby/buildkit/exporter/containerimage/writer.go b/vendor/github.com/moby/buildkit/exporter/containerimage/writer.go deleted file mode 100644 index edc12b44203e..000000000000 --- a/vendor/github.com/moby/buildkit/exporter/containerimage/writer.go +++ /dev/null @@ -1,457 +0,0 @@ -package containerimage - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "runtime" - "time" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/diff" - "github.com/containerd/containerd/images" - "github.com/moby/buildkit/cache" - "github.com/moby/buildkit/cache/blobs" - "github.com/moby/buildkit/exporter" - "github.com/moby/buildkit/exporter/containerimage/exptypes" - "github.com/moby/buildkit/snapshot" - "github.com/moby/buildkit/util/progress" - "github.com/moby/buildkit/util/system" - digest "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/sync/errgroup" -) - -const ( - emptyGZLayer = digest.Digest("sha256:4f4fb700ef54461cfa02571ae0db9a0dc1e0cdb5577484a6d75e68dc38e8acc1") -) - -type WriterOpt struct { - Snapshotter snapshot.Snapshotter - ContentStore content.Store - Differ diff.Comparer -} - -func NewImageWriter(opt WriterOpt) (*ImageWriter, error) { - return &ImageWriter{opt: opt}, nil -} - -type ImageWriter struct { - opt WriterOpt -} - -func (ic *ImageWriter) Commit(ctx context.Context, inp exporter.Source, oci bool) (*ocispec.Descriptor, error) { - platformsBytes, ok := inp.Metadata[exptypes.ExporterPlatformsKey] - - if len(inp.Refs) > 0 && !ok { - return nil, errors.Errorf("unable to export multiple refs, missing platforms mapping") - } - - if len(inp.Refs) == 0 { - layers, err := ic.exportLayers(ctx, inp.Ref) - if err != nil { - return nil, err - } - return ic.commitDistributionManifest(ctx, inp.Ref, inp.Metadata[exptypes.ExporterImageConfigKey], layers[0], oci) - } - - var p exptypes.Platforms - if err := json.Unmarshal(platformsBytes, &p); err != nil { - return nil, errors.Wrapf(err, "failed to parse platforms passed to exporter") - } - - if len(p.Platforms) != len(inp.Refs) { - return nil, errors.Errorf("number of platforms does not match references %d %d", len(p.Platforms), len(inp.Refs)) - } - - refs := make([]cache.ImmutableRef, 0, len(inp.Refs)) - layersMap := make(map[string]int, len(inp.Refs)) - for id, r := range inp.Refs { - layersMap[id] = len(refs) - refs = append(refs, r) - } - - layers, err := ic.exportLayers(ctx, refs...) - if err != nil { - return nil, err - } - - idx := struct { - // MediaType is reserved in the OCI spec but - // excluded from go types. - MediaType string `json:"mediaType,omitempty"` - - ocispec.Index - }{ - MediaType: ocispec.MediaTypeImageIndex, - Index: ocispec.Index{ - Versioned: specs.Versioned{ - SchemaVersion: 2, - }, - }, - } - - if !oci { - idx.MediaType = images.MediaTypeDockerSchema2ManifestList - } - - labels := map[string]string{} - - for i, p := range p.Platforms { - r, ok := inp.Refs[p.ID] - if !ok { - return nil, errors.Errorf("failed to find ref for ID %s", p.ID) - } - config := inp.Metadata[fmt.Sprintf("%s/%s", exptypes.ExporterImageConfigKey, p.ID)] - - desc, err := ic.commitDistributionManifest(ctx, r, config, layers[layersMap[p.ID]], oci) - if err != nil { - return nil, err - } - dp := p.Platform - desc.Platform = &dp - idx.Manifests = append(idx.Manifests, *desc) - - labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i)] = desc.Digest.String() - } - - idxBytes, err := json.MarshalIndent(idx, "", " ") - if err != nil { - return nil, errors.Wrap(err, "failed to marshal index") - } - - idxDigest := digest.FromBytes(idxBytes) - idxDesc := ocispec.Descriptor{ - Digest: idxDigest, - Size: int64(len(idxBytes)), - MediaType: idx.MediaType, - } - idxDone := oneOffProgress(ctx, "exporting manifest list "+idxDigest.String()) - - if err := content.WriteBlob(ctx, ic.opt.ContentStore, idxDigest.String(), bytes.NewReader(idxBytes), idxDesc, content.WithLabels(labels)); err != nil { - return nil, idxDone(errors.Wrapf(err, "error writing manifest list blob %s", idxDigest)) - } - idxDone(nil) - - for _, desc := range idx.Manifests { - // delete manifest root. manifest will remain linked to the index - if err := ic.opt.ContentStore.Delete(context.TODO(), desc.Digest); err != nil { - return nil, errors.Wrap(err, "error removing manifest root") - } - } - - return &idxDesc, nil -} - -func (ic *ImageWriter) exportLayers(ctx context.Context, refs ...cache.ImmutableRef) ([][]blobs.DiffPair, error) { - eg, ctx := errgroup.WithContext(ctx) - layersDone := oneOffProgress(ctx, "exporting layers") - - out := make([][]blobs.DiffPair, len(refs)) - - for i, ref := range refs { - func(i int, ref cache.ImmutableRef) { - eg.Go(func() error { - diffPairs, err := blobs.GetDiffPairs(ctx, ic.opt.ContentStore, ic.opt.Snapshotter, ic.opt.Differ, ref, true) - if err != nil { - return errors.Wrap(err, "failed calculating diff pairs for exported snapshot") - } - out[i] = diffPairs - return nil - }) - }(i, ref) - } - - if err := layersDone(eg.Wait()); err != nil { - return nil, err - } - - return out, nil -} - -func (ic *ImageWriter) commitDistributionManifest(ctx context.Context, ref cache.ImmutableRef, config []byte, layers []blobs.DiffPair, oci bool) (*ocispec.Descriptor, error) { - if len(config) == 0 { - var err error - config, err = emptyImageConfig() - if err != nil { - return nil, err - } - } - - history, err := parseHistoryFromConfig(config) - if err != nil { - return nil, err - } - - diffPairs, history := normalizeLayersAndHistory(layers, history, ref) - - config, err = patchImageConfig(config, diffPairs, history) - if err != nil { - return nil, err - } - - var ( - configDigest = digest.FromBytes(config) - manifestType = ocispec.MediaTypeImageManifest - configType = ocispec.MediaTypeImageConfig - layerType = ocispec.MediaTypeImageLayerGzip - ) - - // Use docker media types for older Docker versions and registries - if !oci { - manifestType = images.MediaTypeDockerSchema2Manifest - configType = images.MediaTypeDockerSchema2Config - layerType = images.MediaTypeDockerSchema2LayerGzip - } - - mfst := struct { - // MediaType is reserved in the OCI spec but - // excluded from go types. - MediaType string `json:"mediaType,omitempty"` - - ocispec.Manifest - }{ - MediaType: manifestType, - Manifest: ocispec.Manifest{ - Versioned: specs.Versioned{ - SchemaVersion: 2, - }, - Config: ocispec.Descriptor{ - Digest: configDigest, - Size: int64(len(config)), - MediaType: configType, - }, - }, - } - - labels := map[string]string{ - "containerd.io/gc.ref.content.0": configDigest.String(), - } - - for i, dp := range diffPairs { - info, err := ic.opt.ContentStore.Info(ctx, dp.Blobsum) - if err != nil { - return nil, errors.Wrapf(err, "could not find blob %s from contentstore", dp.Blobsum) - } - mfst.Layers = append(mfst.Layers, ocispec.Descriptor{ - Digest: dp.Blobsum, - Size: info.Size, - MediaType: layerType, - }) - labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i+1)] = dp.Blobsum.String() - } - - mfstJSON, err := json.MarshalIndent(mfst, "", " ") - if err != nil { - return nil, errors.Wrap(err, "failed to marshal manifest") - } - - mfstDigest := digest.FromBytes(mfstJSON) - mfstDesc := ocispec.Descriptor{ - Digest: mfstDigest, - Size: int64(len(mfstJSON)), - } - mfstDone := oneOffProgress(ctx, "exporting manifest "+mfstDigest.String()) - - if err := content.WriteBlob(ctx, ic.opt.ContentStore, mfstDigest.String(), bytes.NewReader(mfstJSON), mfstDesc, content.WithLabels(labels)); err != nil { - return nil, mfstDone(errors.Wrapf(err, "error writing manifest blob %s", mfstDigest)) - } - mfstDone(nil) - - configDesc := ocispec.Descriptor{ - Digest: configDigest, - Size: int64(len(config)), - MediaType: configType, - } - configDone := oneOffProgress(ctx, "exporting config "+configDigest.String()) - - if err := content.WriteBlob(ctx, ic.opt.ContentStore, configDigest.String(), bytes.NewReader(config), configDesc); err != nil { - return nil, configDone(errors.Wrap(err, "error writing config blob")) - } - configDone(nil) - - // delete config root. config will remain linked to the manifest - if err := ic.opt.ContentStore.Delete(context.TODO(), configDigest); err != nil { - return nil, errors.Wrap(err, "error removing config root") - } - - return &ocispec.Descriptor{ - Digest: mfstDigest, - Size: int64(len(mfstJSON)), - MediaType: manifestType, - }, nil -} - -func (ic *ImageWriter) ContentStore() content.Store { - return ic.opt.ContentStore -} - -func emptyImageConfig() ([]byte, error) { - img := ocispec.Image{ - Architecture: runtime.GOARCH, - OS: runtime.GOOS, - } - img.RootFS.Type = "layers" - img.Config.WorkingDir = "/" - img.Config.Env = []string{"PATH=" + system.DefaultPathEnv} - dt, err := json.Marshal(img) - return dt, errors.Wrap(err, "failed to create empty image config") -} - -func parseHistoryFromConfig(dt []byte) ([]ocispec.History, error) { - var config struct { - History []ocispec.History - } - if err := json.Unmarshal(dt, &config); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal history from config") - } - return config.History, nil -} - -func patchImageConfig(dt []byte, dps []blobs.DiffPair, history []ocispec.History) ([]byte, error) { - m := map[string]json.RawMessage{} - if err := json.Unmarshal(dt, &m); err != nil { - return nil, errors.Wrap(err, "failed to parse image config for patch") - } - - var rootFS ocispec.RootFS - rootFS.Type = "layers" - for _, dp := range dps { - rootFS.DiffIDs = append(rootFS.DiffIDs, dp.DiffID) - } - dt, err := json.Marshal(rootFS) - if err != nil { - return nil, errors.Wrap(err, "failed to marshal rootfs") - } - m["rootfs"] = dt - - dt, err = json.Marshal(history) - if err != nil { - return nil, errors.Wrap(err, "failed to marshal history") - } - m["history"] = dt - - if _, ok := m["created"]; !ok { - var tm *time.Time - for _, h := range history { - if h.Created != nil { - tm = h.Created - } - } - dt, err = json.Marshal(&tm) - if err != nil { - return nil, errors.Wrap(err, "failed to marshal creation time") - } - m["created"] = dt - } - - dt, err = json.Marshal(m) - return dt, errors.Wrap(err, "failed to marshal config after patch") -} - -func normalizeLayersAndHistory(diffs []blobs.DiffPair, history []ocispec.History, ref cache.ImmutableRef) ([]blobs.DiffPair, []ocispec.History) { - - refMeta := getRefMetadata(ref, len(diffs)) - - var historyLayers int - for _, h := range history { - if !h.EmptyLayer { - historyLayers += 1 - } - } - - if historyLayers > len(diffs) { - // this case shouldn't happen but if it does force set history layers empty - // from the bottom - logrus.Warn("invalid image config with unaccounted layers") - historyCopy := make([]ocispec.History, 0, len(history)) - var l int - for _, h := range history { - if l >= len(diffs) { - h.EmptyLayer = true - } - if !h.EmptyLayer { - l++ - } - historyCopy = append(historyCopy, h) - } - history = historyCopy - } - - if len(diffs) > historyLayers { - // some history items are missing. add them based on the ref metadata - for _, md := range refMeta[historyLayers:] { - history = append(history, ocispec.History{ - Created: &md.createdAt, - CreatedBy: md.description, - Comment: "buildkit.exporter.image.v0", - }) - } - } - - var layerIndex int - for i, h := range history { - if !h.EmptyLayer { - if h.Created == nil { - h.Created = &refMeta[layerIndex].createdAt - } - if diffs[layerIndex].Blobsum == emptyGZLayer { - h.EmptyLayer = true - diffs = append(diffs[:layerIndex], diffs[layerIndex+1:]...) - } else { - layerIndex++ - } - } - history[i] = h - } - - return diffs, history -} - -type refMetadata struct { - description string - createdAt time.Time -} - -func getRefMetadata(ref cache.ImmutableRef, limit int) []refMetadata { - if limit <= 0 { - return nil - } - meta := refMetadata{ - description: "created by buildkit", // shouldn't be shown but don't fail build - createdAt: time.Now(), - } - if ref == nil { - return append(getRefMetadata(nil, limit-1), meta) - } - if descr := cache.GetDescription(ref.Metadata()); descr != "" { - meta.description = descr - } - meta.createdAt = cache.GetCreatedAt(ref.Metadata()) - p := ref.Parent() - if p != nil { - defer p.Release(context.TODO()) - } - return append(getRefMetadata(p, limit-1), meta) -} - -func oneOffProgress(ctx context.Context, id string) func(err error) error { - pw, _, _ := progress.FromContext(ctx) - now := time.Now() - st := progress.Status{ - Started: &now, - } - pw.Write(id, st) - return func(err error) error { - // TODO: set error on status - now := time.Now() - st.Completed = &now - pw.Write(id, st) - pw.Close() - return err - } -} diff --git a/vendor/github.com/moby/buildkit/exporter/exporter.go b/vendor/github.com/moby/buildkit/exporter/exporter.go deleted file mode 100644 index cd6d1930a5c5..000000000000 --- a/vendor/github.com/moby/buildkit/exporter/exporter.go +++ /dev/null @@ -1,22 +0,0 @@ -package exporter - -import ( - "context" - - "github.com/moby/buildkit/cache" -) - -type Exporter interface { - Resolve(context.Context, map[string]string) (ExporterInstance, error) -} - -type ExporterInstance interface { - Name() string - Export(context.Context, Source) (map[string]string, error) -} - -type Source struct { - Ref cache.ImmutableRef - Refs map[string]cache.ImmutableRef - Metadata map[string][]byte -} diff --git a/vendor/github.com/moby/buildkit/exporter/local/export.go b/vendor/github.com/moby/buildkit/exporter/local/export.go deleted file mode 100644 index 8140af644430..000000000000 --- a/vendor/github.com/moby/buildkit/exporter/local/export.go +++ /dev/null @@ -1,148 +0,0 @@ -package local - -import ( - "context" - "io/ioutil" - "os" - "strings" - "time" - - "github.com/moby/buildkit/cache" - "github.com/moby/buildkit/exporter" - "github.com/moby/buildkit/session" - "github.com/moby/buildkit/session/filesync" - "github.com/moby/buildkit/snapshot" - "github.com/moby/buildkit/util/progress" - "github.com/pkg/errors" - "github.com/tonistiigi/fsutil" - fstypes "github.com/tonistiigi/fsutil/types" - "golang.org/x/sync/errgroup" - "golang.org/x/time/rate" -) - -type Opt struct { - SessionManager *session.Manager -} - -type localExporter struct { - opt Opt - // session manager -} - -func New(opt Opt) (exporter.Exporter, error) { - le := &localExporter{opt: opt} - return le, nil -} - -func (e *localExporter) Resolve(ctx context.Context, opt map[string]string) (exporter.ExporterInstance, error) { - id := session.FromContext(ctx) - if id == "" { - return nil, errors.New("could not access local files without session") - } - - timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second) - defer cancel() - - caller, err := e.opt.SessionManager.Get(timeoutCtx, id) - if err != nil { - return nil, err - } - - li := &localExporterInstance{localExporter: e, caller: caller} - return li, nil -} - -type localExporterInstance struct { - *localExporter - caller session.Caller -} - -func (e *localExporterInstance) Name() string { - return "exporting to client" -} - -func (e *localExporterInstance) Export(ctx context.Context, inp exporter.Source) (map[string]string, error) { - isMap := len(inp.Refs) > 0 - - export := func(ctx context.Context, k string, ref cache.ImmutableRef) func() error { - return func() error { - var src string - var err error - if ref == nil { - src, err = ioutil.TempDir("", "buildkit") - if err != nil { - return err - } - defer os.RemoveAll(src) - } else { - mount, err := ref.Mount(ctx, true) - if err != nil { - return err - } - - lm := snapshot.LocalMounter(mount) - - src, err = lm.Mount() - if err != nil { - return err - } - defer lm.Unmount() - } - - fs := fsutil.NewFS(src, nil) - lbl := "copying files" - if isMap { - lbl += " " + k - fs = fsutil.SubDirFS(fs, fstypes.Stat{ - Mode: uint32(os.ModeDir | 0755), - Path: strings.Replace(k, "/", "_", -1), - }) - } - - progress := newProgressHandler(ctx, lbl) - if err := filesync.CopyToCaller(ctx, fs, e.caller, progress); err != nil { - return err - } - return nil - } - } - - eg, ctx := errgroup.WithContext(ctx) - - if isMap { - for k, ref := range inp.Refs { - eg.Go(export(ctx, k, ref)) - } - } else { - eg.Go(export(ctx, "", inp.Ref)) - } - - if err := eg.Wait(); err != nil { - return nil, err - } - return nil, nil -} - -func newProgressHandler(ctx context.Context, id string) func(int, bool) { - limiter := rate.NewLimiter(rate.Every(100*time.Millisecond), 1) - pw, _, _ := progress.FromContext(ctx) - now := time.Now() - st := progress.Status{ - Started: &now, - Action: "transferring", - } - pw.Write(id, st) - return func(s int, last bool) { - if last || limiter.Allow() { - st.Current = s - if last { - now := time.Now() - st.Completed = &now - } - pw.Write(id, st) - if last { - pw.Close() - } - } - } -} diff --git a/vendor/github.com/moby/buildkit/exporter/oci/export.go b/vendor/github.com/moby/buildkit/exporter/oci/export.go deleted file mode 100644 index 86d36b5a4d5b..000000000000 --- a/vendor/github.com/moby/buildkit/exporter/oci/export.go +++ /dev/null @@ -1,202 +0,0 @@ -package oci - -import ( - "context" - "strconv" - "time" - - "github.com/containerd/containerd/images" - "github.com/containerd/containerd/images/oci" - "github.com/docker/distribution/reference" - "github.com/moby/buildkit/exporter" - "github.com/moby/buildkit/exporter/containerimage" - "github.com/moby/buildkit/session" - "github.com/moby/buildkit/session/filesync" - "github.com/moby/buildkit/util/dockerexporter" - "github.com/moby/buildkit/util/progress" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -type ExporterVariant string - -const ( - keyImageName = "name" - VariantOCI = "oci" - VariantDocker = "docker" - ociTypes = "oci-mediatypes" -) - -type Opt struct { - SessionManager *session.Manager - ImageWriter *containerimage.ImageWriter - Variant ExporterVariant -} - -type imageExporter struct { - opt Opt -} - -func New(opt Opt) (exporter.Exporter, error) { - im := &imageExporter{opt: opt} - return im, nil -} - -func normalize(name string) (string, error) { - if name == "" { - return "", nil - } - parsed, err := reference.ParseNormalizedNamed(name) - if err != nil { - return "", errors.Wrapf(err, "failed to parse %s", name) - } - return reference.TagNameOnly(parsed).String(), nil -} - -func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exporter.ExporterInstance, error) { - id := session.FromContext(ctx) - if id == "" { - return nil, errors.New("could not access local files without session") - } - - timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second) - defer cancel() - - caller, err := e.opt.SessionManager.Get(timeoutCtx, id) - if err != nil { - return nil, err - } - - var ot *bool - i := &imageExporterInstance{imageExporter: e, caller: caller} - for k, v := range opt { - switch k { - case keyImageName: - i.name = v - if i.name != "*" { - i.name, err = normalize(i.name) - if err != nil { - return nil, err - } - } - case ociTypes: - ot = new(bool) - if v == "" { - *ot = true - continue - } - b, err := strconv.ParseBool(v) - if err != nil { - return nil, errors.Wrapf(err, "non-bool value specified for %s", k) - } - *ot = b - default: - if i.meta == nil { - i.meta = make(map[string][]byte) - } - i.meta[k] = []byte(v) - } - } - if ot == nil { - i.ociTypes = e.opt.Variant == VariantOCI - } else { - i.ociTypes = *ot - } - return i, nil -} - -type imageExporterInstance struct { - *imageExporter - meta map[string][]byte - caller session.Caller - name string - ociTypes bool -} - -func (e *imageExporterInstance) Name() string { - return "exporting to oci image format" -} - -func (e *imageExporterInstance) Export(ctx context.Context, src exporter.Source) (map[string]string, error) { - if e.opt.Variant == VariantDocker && len(src.Refs) > 0 { - return nil, errors.Errorf("docker exporter does not currently support exporting manifest lists") - } - - if src.Metadata == nil { - src.Metadata = make(map[string][]byte) - } - for k, v := range e.meta { - src.Metadata[k] = v - } - - desc, err := e.opt.ImageWriter.Commit(ctx, src, e.ociTypes) - if err != nil { - return nil, err - } - defer func() { - e.opt.ImageWriter.ContentStore().Delete(context.TODO(), desc.Digest) - }() - if desc.Annotations == nil { - desc.Annotations = map[string]string{} - } - desc.Annotations[ocispec.AnnotationCreated] = time.Now().UTC().Format(time.RFC3339) - - resp := make(map[string]string) - - if n, ok := src.Metadata["image.name"]; e.name == "*" && ok { - if e.name, err = normalize(string(n)); err != nil { - return nil, err - } - } - - if e.name != "" { - resp["image.name"] = e.name - } - - exp, err := getExporter(e.opt.Variant, e.name) - if err != nil { - return nil, err - } - - w, err := filesync.CopyFileWriter(ctx, e.caller) - if err != nil { - return nil, err - } - report := oneOffProgress(ctx, "sending tarball") - if err := exp.Export(ctx, e.opt.ImageWriter.ContentStore(), *desc, w); err != nil { - w.Close() - return nil, report(err) - } - return resp, report(w.Close()) -} - -func oneOffProgress(ctx context.Context, id string) func(err error) error { - pw, _, _ := progress.FromContext(ctx) - now := time.Now() - st := progress.Status{ - Started: &now, - } - pw.Write(id, st) - return func(err error) error { - // TODO: set error on status - now := time.Now() - st.Completed = &now - pw.Write(id, st) - pw.Close() - return err - } -} - -func getExporter(variant ExporterVariant, name string) (images.Exporter, error) { - switch variant { - case VariantOCI: - if name != "" { - return nil, errors.New("oci exporter cannot export named image") - } - return &oci.V1Exporter{}, nil - case VariantDocker: - return &dockerexporter.DockerExporter{Name: name}, nil - default: - return nil, errors.Errorf("invalid variant %q", variant) - } -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go deleted file mode 100644 index 7e082df0824c..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go +++ /dev/null @@ -1,480 +0,0 @@ -package builder - -import ( - "archive/tar" - "bytes" - "context" - "encoding/csv" - "encoding/json" - "fmt" - "net" - "regexp" - "strconv" - "strings" - - "github.com/containerd/containerd/platforms" - "github.com/docker/docker/builder/dockerignore" - "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/exporter/containerimage/exptypes" - "github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb" - "github.com/moby/buildkit/frontend/gateway/client" - "github.com/moby/buildkit/solver/pb" - specs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "golang.org/x/sync/errgroup" -) - -const ( - LocalNameContext = "context" - LocalNameDockerfile = "dockerfile" - keyTarget = "target" - keyFilename = "filename" - keyCacheFrom = "cache-from" - defaultDockerfileName = "Dockerfile" - dockerignoreFilename = ".dockerignore" - buildArgPrefix = "build-arg:" - labelPrefix = "label:" - keyNoCache = "no-cache" - keyTargetPlatform = "platform" - keyMultiPlatform = "multi-platform" - keyImageResolveMode = "image-resolve-mode" - keyGlobalAddHosts = "add-hosts" - keyForceNetwork = "force-network-mode" - keyOverrideCopyImage = "override-copy-image" // remove after CopyOp implemented -) - -var httpPrefix = regexp.MustCompile("^https?://") -var gitUrlPathWithFragmentSuffix = regexp.MustCompile("\\.git(?:#.+)?$") - -func Build(ctx context.Context, c client.Client) (*client.Result, error) { - opts := c.BuildOpts().Opts - caps := c.BuildOpts().LLBCaps - - marshalOpts := []llb.ConstraintsOpt{llb.WithCaps(caps)} - - defaultBuildPlatform := platforms.DefaultSpec() - if workers := c.BuildOpts().Workers; len(workers) > 0 && len(workers[0].Platforms) > 0 { - defaultBuildPlatform = workers[0].Platforms[0] - } - - buildPlatforms := []specs.Platform{defaultBuildPlatform} - targetPlatforms := []*specs.Platform{nil} - if v := opts[keyTargetPlatform]; v != "" { - var err error - targetPlatforms, err = parsePlatforms(v) - if err != nil { - return nil, err - } - } - - resolveMode, err := parseResolveMode(opts[keyImageResolveMode]) - if err != nil { - return nil, err - } - - extraHosts, err := parseExtraHosts(opts[keyGlobalAddHosts]) - if err != nil { - return nil, errors.Wrap(err, "failed to parse additional hosts") - } - - defaultNetMode, err := parseNetMode(opts[keyForceNetwork]) - if err != nil { - return nil, err - } - - filename := opts[keyFilename] - if filename == "" { - filename = defaultDockerfileName - } - - var ignoreCache []string - if v, ok := opts[keyNoCache]; ok { - if v == "" { - ignoreCache = []string{} // means all stages - } else { - ignoreCache = strings.Split(v, ",") - } - } - - name := "load build definition from " + filename - - src := llb.Local(LocalNameDockerfile, - llb.IncludePatterns([]string{filename}), - llb.SessionID(c.BuildOpts().SessionID), - llb.SharedKeyHint(defaultDockerfileName), - dockerfile2llb.WithInternalName(name), - ) - var buildContext *llb.State - isScratchContext := false - if st, ok := detectGitContext(opts[LocalNameContext]); ok { - src = *st - buildContext = &src - } else if httpPrefix.MatchString(opts[LocalNameContext]) { - httpContext := llb.HTTP(opts[LocalNameContext], llb.Filename("context"), dockerfile2llb.WithInternalName("load remote build context")) - def, err := httpContext.Marshal(marshalOpts...) - if err != nil { - return nil, errors.Wrapf(err, "failed to marshal httpcontext") - } - res, err := c.Solve(ctx, client.SolveRequest{ - Definition: def.ToPB(), - }) - if err != nil { - return nil, errors.Wrapf(err, "failed to resolve httpcontext") - } - - ref, err := res.SingleRef() - if err != nil { - return nil, err - } - - dt, err := ref.ReadFile(ctx, client.ReadRequest{ - Filename: "context", - Range: &client.FileRange{ - Length: 1024, - }, - }) - if err != nil { - return nil, errors.Errorf("failed to read downloaded context") - } - if isArchive(dt) { - copyImage := opts[keyOverrideCopyImage] - if copyImage == "" { - copyImage = dockerfile2llb.DefaultCopyImage - } - unpack := llb.Image(copyImage, dockerfile2llb.WithInternalName("helper image for file operations")). - Run(llb.Shlex("copy --unpack /src/context /out/"), llb.ReadonlyRootFS(), dockerfile2llb.WithInternalName("extracting build context")) - unpack.AddMount("/src", httpContext, llb.Readonly) - src = unpack.AddMount("/out", llb.Scratch()) - buildContext = &src - } else { - filename = "context" - src = httpContext - buildContext = &src - isScratchContext = true - } - } - - def, err := src.Marshal(marshalOpts...) - if err != nil { - return nil, errors.Wrapf(err, "failed to marshal local source") - } - - eg, ctx2 := errgroup.WithContext(ctx) - var dtDockerfile []byte - eg.Go(func() error { - res, err := c.Solve(ctx2, client.SolveRequest{ - Definition: def.ToPB(), - }) - if err != nil { - return errors.Wrapf(err, "failed to resolve dockerfile") - } - - ref, err := res.SingleRef() - if err != nil { - return err - } - - dtDockerfile, err = ref.ReadFile(ctx2, client.ReadRequest{ - Filename: filename, - }) - if err != nil { - return errors.Wrapf(err, "failed to read dockerfile") - } - return nil - }) - var excludes []string - if !isScratchContext { - eg.Go(func() error { - dockerignoreState := buildContext - if dockerignoreState == nil { - st := llb.Local(LocalNameContext, - llb.SessionID(c.BuildOpts().SessionID), - llb.IncludePatterns([]string{dockerignoreFilename}), - llb.SharedKeyHint(dockerignoreFilename), - dockerfile2llb.WithInternalName("load "+dockerignoreFilename), - ) - dockerignoreState = &st - } - def, err := dockerignoreState.Marshal(marshalOpts...) - if err != nil { - return err - } - res, err := c.Solve(ctx2, client.SolveRequest{ - Definition: def.ToPB(), - }) - if err != nil { - return err - } - ref, err := res.SingleRef() - if err != nil { - return err - } - dtDockerignore, err := ref.ReadFile(ctx2, client.ReadRequest{ - Filename: dockerignoreFilename, - }) - if err == nil { - excludes, err = dockerignore.ReadAll(bytes.NewBuffer(dtDockerignore)) - if err != nil { - return errors.Wrap(err, "failed to parse dockerignore") - } - } - return nil - }) - } - - if err := eg.Wait(); err != nil { - return nil, err - } - - if _, ok := opts["cmdline"]; !ok { - ref, cmdline, ok := dockerfile2llb.DetectSyntax(bytes.NewBuffer(dtDockerfile)) - if ok { - return forwardGateway(ctx, c, ref, cmdline) - } - } - - exportMap := len(targetPlatforms) > 1 - - if v := opts[keyMultiPlatform]; v != "" { - b, err := strconv.ParseBool(v) - if err != nil { - return nil, errors.Errorf("invalid boolean value %s", v) - } - if !b && exportMap { - return nil, errors.Errorf("returning multiple target plaforms is not allowed") - } - exportMap = b - } - - expPlatforms := &exptypes.Platforms{ - Platforms: make([]exptypes.Platform, len(targetPlatforms)), - } - res := client.NewResult() - - eg, ctx = errgroup.WithContext(ctx) - - for i, tp := range targetPlatforms { - func(i int, tp *specs.Platform) { - eg.Go(func() error { - st, img, err := dockerfile2llb.Dockerfile2LLB(ctx, dtDockerfile, dockerfile2llb.ConvertOpt{ - Target: opts[keyTarget], - MetaResolver: c, - BuildArgs: filter(opts, buildArgPrefix), - Labels: filter(opts, labelPrefix), - SessionID: c.BuildOpts().SessionID, - BuildContext: buildContext, - Excludes: excludes, - IgnoreCache: ignoreCache, - TargetPlatform: tp, - BuildPlatforms: buildPlatforms, - ImageResolveMode: resolveMode, - PrefixPlatform: exportMap, - ExtraHosts: extraHosts, - ForceNetMode: defaultNetMode, - OverrideCopyImage: opts[keyOverrideCopyImage], - LLBCaps: &caps, - }) - - if err != nil { - return errors.Wrapf(err, "failed to create LLB definition") - } - - def, err := st.Marshal() - if err != nil { - return errors.Wrapf(err, "failed to marshal LLB definition") - } - - config, err := json.Marshal(img) - if err != nil { - return errors.Wrapf(err, "failed to marshal image config") - } - - var cacheFrom []string - if cacheFromStr := opts[keyCacheFrom]; cacheFromStr != "" { - cacheFrom = strings.Split(cacheFromStr, ",") - } - - r, err := c.Solve(ctx, client.SolveRequest{ - Definition: def.ToPB(), - ImportCacheRefs: cacheFrom, - }) - if err != nil { - return err - } - - ref, err := r.SingleRef() - if err != nil { - return err - } - - if !exportMap { - res.AddMeta(exptypes.ExporterImageConfigKey, config) - res.SetRef(ref) - } else { - p := platforms.DefaultSpec() - if tp != nil { - p = *tp - } - - k := platforms.Format(p) - res.AddMeta(fmt.Sprintf("%s/%s", exptypes.ExporterImageConfigKey, k), config) - res.AddRef(k, ref) - expPlatforms.Platforms[i] = exptypes.Platform{ - ID: k, - Platform: p, - } - } - return nil - }) - }(i, tp) - } - - if err := eg.Wait(); err != nil { - return nil, err - } - - if exportMap { - dt, err := json.Marshal(expPlatforms) - if err != nil { - return nil, err - } - res.AddMeta(exptypes.ExporterPlatformsKey, dt) - } - - return res, nil -} - -func forwardGateway(ctx context.Context, c client.Client, ref string, cmdline string) (*client.Result, error) { - opts := c.BuildOpts().Opts - if opts == nil { - opts = map[string]string{} - } - opts["cmdline"] = cmdline - opts["source"] = ref - return c.Solve(ctx, client.SolveRequest{ - Frontend: "gateway.v0", - FrontendOpt: opts, - }) -} - -func filter(opt map[string]string, key string) map[string]string { - m := map[string]string{} - for k, v := range opt { - if strings.HasPrefix(k, key) { - m[strings.TrimPrefix(k, key)] = v - } - } - return m -} - -func detectGitContext(ref string) (*llb.State, bool) { - found := false - if httpPrefix.MatchString(ref) && gitUrlPathWithFragmentSuffix.MatchString(ref) { - found = true - } - - for _, prefix := range []string{"git://", "github.com/", "git@"} { - if strings.HasPrefix(ref, prefix) { - found = true - break - } - } - if !found { - return nil, false - } - - parts := strings.SplitN(ref, "#", 2) - branch := "" - if len(parts) > 1 { - branch = parts[1] - } - st := llb.Git(parts[0], branch, dockerfile2llb.WithInternalName("load git source "+ref)) - return &st, true -} - -func isArchive(header []byte) bool { - for _, m := range [][]byte{ - {0x42, 0x5A, 0x68}, // bzip2 - {0x1F, 0x8B, 0x08}, // gzip - {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, // xz - } { - if len(header) < len(m) { - continue - } - if bytes.Equal(m, header[:len(m)]) { - return true - } - } - - r := tar.NewReader(bytes.NewBuffer(header)) - _, err := r.Next() - return err == nil -} - -func parsePlatforms(v string) ([]*specs.Platform, error) { - var pp []*specs.Platform - for _, v := range strings.Split(v, ",") { - p, err := platforms.Parse(v) - if err != nil { - return nil, errors.Wrapf(err, "failed to parse target platform %s", v) - } - p = platforms.Normalize(p) - pp = append(pp, &p) - } - return pp, nil -} - -func parseResolveMode(v string) (llb.ResolveMode, error) { - switch v { - case pb.AttrImageResolveModeDefault, "": - return llb.ResolveModeDefault, nil - case pb.AttrImageResolveModeForcePull: - return llb.ResolveModeForcePull, nil - case pb.AttrImageResolveModePreferLocal: - return llb.ResolveModePreferLocal, nil - default: - return 0, errors.Errorf("invalid image-resolve-mode: %s", v) - } -} - -func parseExtraHosts(v string) ([]llb.HostIP, error) { - if v == "" { - return nil, nil - } - out := make([]llb.HostIP, 0) - csvReader := csv.NewReader(strings.NewReader(v)) - fields, err := csvReader.Read() - if err != nil { - return nil, err - } - for _, field := range fields { - parts := strings.SplitN(field, "=", 2) - if len(parts) != 2 { - return nil, errors.Errorf("invalid key-value pair %s", field) - } - key := strings.ToLower(parts[0]) - val := strings.ToLower(parts[1]) - ip := net.ParseIP(val) - if ip == nil { - return nil, errors.Errorf("failed to parse IP %s", val) - } - out = append(out, llb.HostIP{Host: key, IP: ip}) - } - return out, nil -} - -func parseNetMode(v string) (pb.NetMode, error) { - if v == "" { - return llb.NetModeSandbox, nil - } - switch v { - case "none": - return llb.NetModeNone, nil - case "host": - return llb.NetModeHost, nil - case "sandbox": - return llb.NetModeSandbox, nil - default: - return 0, errors.Errorf("invalid netmode %s", v) - } -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/cmd/dockerfile-frontend/Dockerfile b/vendor/github.com/moby/buildkit/frontend/dockerfile/cmd/dockerfile-frontend/Dockerfile deleted file mode 100644 index 347023ca2e13..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/cmd/dockerfile-frontend/Dockerfile +++ /dev/null @@ -1,44 +0,0 @@ -# syntax = tonistiigi/dockerfile:runmount20181002 - -FROM --platform=$BUILDPLATFORM tonistiigi/xx:golang@sha256:6f7d999551dd471b58f70716754290495690efa8421e0a1fcf18eb11d0c0a537 AS xgo - -FROM --platform=$BUILDPLATFORM golang:1.11 AS base -COPY --from=xgo / / -WORKDIR /go/src/github.com/moby/buildkit - -FROM base AS version -ARG CHANNEL -RUN --mount=target=. \ - PKG=github.com/moby/buildkit/frontend/dockerfile/cmd/dockerfile-frontend VERSION=$(./frontend/dockerfile/cmd/dockerfile-frontend/hack/detect "$CHANNEL") REVISION=$(git rev-parse HEAD)$(if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi); \ - echo "-X main.Version=${VERSION} -X main.Revision=${REVISION} -X main.Package=${PKG}" | tee /tmp/.ldflags; \ - echo -n "${VERSION}" | tee /tmp/.version; - -FROM base AS build -RUN apt-get update && apt-get install -y --no-install-recommends file -ARG BUILDTAGS="" -ARG TARGETPLATFORM -ENV TARGETPLATFORM=$TARGETPLATFORM -RUN --mount=target=. --mount=type=cache,target=/root/.cache \ - --mount=source=/tmp/.ldflags,target=/tmp/.ldflags,from=version \ - CGO_ENABLED=0 go build -o /dockerfile-frontend -ldflags "-d $(cat /tmp/.ldflags)" -tags "$BUILDTAGS netgo static_build osusergo" ./frontend/dockerfile/cmd/dockerfile-frontend && \ - file /dockerfile-frontend | grep "statically linked" - -FROM scratch AS release -COPY --from=build /dockerfile-frontend /bin/dockerfile-frontend -ENTRYPOINT ["/bin/dockerfile-frontend"] - - -FROM base AS buildid-check -RUN apt-get update && apt-get install -y jq -COPY /frontend/dockerfile/cmd/dockerfile-frontend/hack/check-daily-outdated . -COPY --from=r.j3ss.co/reg /usr/bin/reg /bin -COPY --from=build /dockerfile-frontend . -ARG CHANNEL -ARG REPO -ARG DATE -RUN ./check-daily-outdated $CHANNEL $REPO $DATE /out - -FROM scratch AS buildid -COPY --from=buildid-check /out/ / - -FROM release \ No newline at end of file diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/cmd/dockerfile-frontend/hack/check-daily-outdated b/vendor/github.com/moby/buildkit/frontend/dockerfile/cmd/dockerfile-frontend/hack/check-daily-outdated deleted file mode 100755 index ed7f3196d84d..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/cmd/dockerfile-frontend/hack/check-daily-outdated +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env bash - -usage() { - echo "./check-daily-outdated channel repo date outdir" - exit 1 -} - -if [ "$#" != 4 ]; then - usage -fi - -CHANNEL=$1 -REPO=$2 -DATE=$3 -OUTDIR=$4 - -mkdir -p $OUTDIR - -if [ ! -d "$OUTDIR" ]; then - echo "invalid output directory $OUTDIR" - exit 1 -fi - -set -x - -reg digest "$REPO:$DATE-$CHANNEL" -if [[ $? == 0 ]]; then - exit 0 -fi - -lastTag=$(reg tags $REPO | grep "\-$CHANNEL" | sort -r | head -n 1) - -oldBuildID="" - -if [ ! -z "$lastTag" ]; then - layer=$(reg manifest $REPO:$lastTag | jq -r ".layers[0].digest") - tmpdir=$(mktemp -d -t frontend.XXXXXXXXXX) - reg layer "$REPO@$layer" | tar xvz --strip-components=1 -C $tmpdir - oldBuildID=$(go tool buildid $tmpdir/dockerfile-frontend) - rm $tmpdir/dockerfile-frontend - rm -r $tmpdir -fi - -newBuildID=$(go tool buildid dockerfile-frontend) - -if [ "$oldBuildID" != "$newBuildID" ]; then - echo -n $newBuildID > $OUTDIR/buildid -fi diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/cmd/dockerfile-frontend/hack/detect b/vendor/github.com/moby/buildkit/frontend/dockerfile/cmd/dockerfile-frontend/hack/detect deleted file mode 100755 index ab6d22b38def..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/cmd/dockerfile-frontend/hack/detect +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env bash - -usage() { - echo "./detect channel" - exit 1 -} - -if [ "$#" == 0 ]; then - usage -fi - -channel=$1 -suffix="" - -if [ "$channel" == "mainline" ]; then - channel="" -fi - -if [ ! -z "$channel" ]; then - suffix="-$channel" -fi - -name=$(git describe --always --tags --match "dockerfile/[0-9]*$suffix") - -if [[ ! "$name" =~ "dockerfile" ]]; then - name=${name}$suffix -fi - -echo -n $name \ No newline at end of file diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/cmd/dockerfile-frontend/hack/release b/vendor/github.com/moby/buildkit/frontend/dockerfile/cmd/dockerfile-frontend/hack/release deleted file mode 100755 index 908e77a78d80..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/cmd/dockerfile-frontend/hack/release +++ /dev/null @@ -1,160 +0,0 @@ -#!/usr/bin/env bash - -: ${PLATFORMS=linux/amd64} -: ${CONTINUOUS_INTEGRATION=} -: ${DAILY_TARGETS=} - -progressFlag="" -if [ "$CONTINUOUS_INTEGRATION" == "true" ]; then progressFlag="--progress=plain"; fi - -usage() { - echo "$0 (master|tag|daily) (tag|channel) [push]" - exit 1 -} - -if [ $# != 4 ]; then - usage -fi - -parseTag() { - local prefix=$(echo $1 | cut -d/ -f 1) - if [[ "$prefix" != "dockerfile" ]]; then - echo "invalid tag $1" - exit 1 - fi - local suffix=$(echo $1 | awk -F- '{print $NF}') - local tagf=./frontend/dockerfile/release/$suffix/tags - if [ "$sufffix" == "$1" ] || [ ! -f $tagf ]; then - suffix="mainline" - fi - - local mainTag=$(echo $1 | cut -d/ -f 2) - - publishedNames=$REPO:$mainTag - - local versioned="" - # \d.\d.\d becomes latest - if [[ "$mainTag" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then - publishedNames=${publishedNames},$REPO:latest - versioned=1 - fi - - # \d.\d.\d-channel becomes - if [[ "$mainTag" =~ ^[0-9]+\.[0-9]+\.[0-9]+-$suffix$ ]] && [ -f $tagf ]; then - publishedNames=${publishedNames},$REPO:$suffix - versioned=1 - fi - - # \d.\d.\d* -> \d.\d* -> \d* (except "0") - if [ "$versioned" == "1" ]; then - publishedNames=${publishedNames},$REPO:$(echo $mainTag | sed -E 's#^([0-9]+\.[0-9]+)\.[0-9]+#\1#') - if [ "$(echo $mainTag | sed -E 's#^([0-9]+)\.[0-9]+\.[0-9]+.*$#\1#')" != "0" ]; then - publishedNames=${publishedNames},$REPO:$(echo $mainTag | sed -E 's#^([0-9]+)\.[0-9]+\.[0-9]+#\1#') - fi - fi - - TAG=$suffix -} - -TYP=$1 -TAG=$2 -REPO=$3 -PUSH=$4 - -pushFlag="" -if [ "$PUSH" = "push" ]; then - pushFlag="--exporter-opt push=true" -fi - -case $TYP in -"master") - tagf=./frontend/dockerfile/release/$TAG/tags - if [ ! -f $tagf ]; then - echo "invalid release $TAG" - exit 1 - fi - buildTags=$(cat $tagf) - pushTag="master" - if [ "$TAG" != "mainline" ]; then - pushTag=${pushTag}-$TAG - fi - set -x - buildctl build $progressFlag --frontend=dockerfile.v0 \ - --local context=. --local dockerfile=. \ - --frontend-opt filename=./frontend/dockerfile/cmd/dockerfile-frontend/Dockerfile \ - --frontend-opt platform=$PLATFORMS \ - --frontend-opt "build-arg:CHANNEL=$TAG" \ - --frontend-opt "build-arg:BUILDTAGS=$buildTags" \ - --exporter image \ - --exporter-opt name=$REPO:$pushTag $pushFlag - ;; -"tag") - publishedNames="" - parseTag $TAG - tagf=./frontend/dockerfile/release/$TAG/tags - if [ ! -f $tagf ]; then - echo "no build tags found for $TAG" - exit 1 - fi - buildTags=$(cat $tagf) - - set -x - buildctl build $progressFlag --frontend=dockerfile.v0 \ - --local context=. --local dockerfile=. \ - --frontend-opt filename=./frontend/dockerfile/cmd/dockerfile-frontend/Dockerfile \ - --frontend-opt platform=$PLATFORMS \ - --frontend-opt "build-arg:CHANNEL=$TAG" \ - --frontend-opt "build-arg:BUILDTAGS=$buildTags" \ - --exporter image \ - --exporter-opt name=$publishedNames $pushFlag - ;; -"daily") - if [ -z $DAILY_TARGETS ]; then - DAILY_TARGETS="mounts secrets ssh" - fi - - for TAG in $DAILY_TARGETS; do - - tagf=./frontend/dockerfile/release/$TAG/tags - if [ ! -f $tagf ]; then - echo "invalid release $TAG" - exit 1 - fi - buildTags=$(cat $tagf) - - # find the buildID of the last pushed image - # returns a BuildID if rebuild needed - - tmp=$(mktemp -d -t buildid.XXXXXXXXXX) - set -x - dt=$(date +%Y%m%d) - buildctl build $progressFlag --progress=plain --frontend=dockerfile.v0 \ - --local context=. --local dockerfile=. \ - --frontend-opt filename=./frontend/dockerfile/cmd/dockerfile-frontend/Dockerfile \ - --frontend-opt "build-arg:CHANNEL=$TAG" \ - --frontend-opt "build-arg:REPO=$REPO" \ - --frontend-opt "build-arg:DATE=$dt" \ - --frontend-opt "build-arg:BUILDTAGS=$buildTags" \ - --frontend-opt target=buildid\ - --exporter=local --exporter-opt=output=$tmp - - if [ -f $tmp/buildid ]; then - buildid=$(cat $tmp/buildid) - echo "buildid: $buildid" - - buildctl build $progressFlag --frontend=dockerfile.v0 \ - --local context=. --local dockerfile=. \ - --frontend-opt filename=./frontend/dockerfile/cmd/dockerfile-frontend/Dockerfile \ - --frontend-opt platform=$PLATFORMS \ - --frontend-opt "build-arg:CHANNEL=$TAG" \ - --frontend-opt "build-arg:BUILDTAGS=$buildTags" \ - --exporter image \ - --exporter-opt name=$REPO:$dt-$TAG $pushFlag - rm $tmp/buildid - fi - rm -r $tmp - - done - - ;; -esac diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/cmd/dockerfile-frontend/main.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/cmd/dockerfile-frontend/main.go deleted file mode 100644 index 9da10ee254e3..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/cmd/dockerfile-frontend/main.go +++ /dev/null @@ -1,28 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "os" - - dockerfile "github.com/moby/buildkit/frontend/dockerfile/builder" - "github.com/moby/buildkit/frontend/gateway/grpcclient" - "github.com/moby/buildkit/util/appcontext" - "github.com/sirupsen/logrus" -) - -func main() { - var version bool - flag.BoolVar(&version, "version", false, "show version") - flag.Parse() - - if version { - fmt.Printf("%s %s %s %s\n", os.Args[0], Package, Version, Revision) - os.Exit(0) - } - - if err := grpcclient.RunFromEnvironment(appcontext.Context(), dockerfile.Build); err != nil { - logrus.Errorf("fatal error: %+v", err) - panic(err) - } -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/cmd/dockerfile-frontend/version.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/cmd/dockerfile-frontend/version.go deleted file mode 100644 index 1669cb5bdb05..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/cmd/dockerfile-frontend/version.go +++ /dev/null @@ -1,13 +0,0 @@ -package main - -var ( - // Package is filled at linking time - Package = "github.com/moby/buildkit/frontend/dockerfile/cmd/dockerfile-frontend" - - // Version holds the complete version number. Filled in at linking time. - Version = "0.0.0+unknown" - - // Revision is filled with the VCS (e.g. git) revision being used to build - // the program at linking time. - Revision = "" -) diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/command/command.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/command/command.go deleted file mode 100644 index f23c6874b55e..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/command/command.go +++ /dev/null @@ -1,46 +0,0 @@ -// Package command contains the set of Dockerfile commands. -package command - -// Define constants for the command strings -const ( - Add = "add" - Arg = "arg" - Cmd = "cmd" - Copy = "copy" - Entrypoint = "entrypoint" - Env = "env" - Expose = "expose" - From = "from" - Healthcheck = "healthcheck" - Label = "label" - Maintainer = "maintainer" - Onbuild = "onbuild" - Run = "run" - Shell = "shell" - StopSignal = "stopsignal" - User = "user" - Volume = "volume" - Workdir = "workdir" -) - -// Commands is list of all Dockerfile commands -var Commands = map[string]struct{}{ - Add: {}, - Arg: {}, - Cmd: {}, - Copy: {}, - Entrypoint: {}, - Env: {}, - Expose: {}, - From: {}, - Healthcheck: {}, - Label: {}, - Maintainer: {}, - Onbuild: {}, - Run: {}, - Shell: {}, - StopSignal: {}, - User: {}, - Volume: {}, - Workdir: {}, -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile.go deleted file mode 100644 index a529f585fdff..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile.go +++ /dev/null @@ -1 +0,0 @@ -package dockerfile diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go deleted file mode 100644 index 4c3a52622bdc..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go +++ /dev/null @@ -1,1172 +0,0 @@ -package dockerfile2llb - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "net/url" - "path" - "path/filepath" - "sort" - "strconv" - "strings" - - "github.com/containerd/containerd/platforms" - "github.com/docker/distribution/reference" - "github.com/docker/docker/pkg/signal" - "github.com/docker/go-connections/nat" - "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/client/llb/imagemetaresolver" - "github.com/moby/buildkit/frontend/dockerfile/instructions" - "github.com/moby/buildkit/frontend/dockerfile/parser" - "github.com/moby/buildkit/frontend/dockerfile/shell" - gw "github.com/moby/buildkit/frontend/gateway/client" - "github.com/moby/buildkit/solver/pb" - "github.com/moby/buildkit/util/apicaps" - "github.com/moby/buildkit/util/system" - specs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "golang.org/x/sync/errgroup" -) - -const ( - emptyImageName = "scratch" - localNameContext = "context" - historyComment = "buildkit.dockerfile.v0" - - DefaultCopyImage = "docker/dockerfile-copy:v0.1.9@sha256:e8f159d3f00786604b93c675ee2783f8dc194bb565e61ca5788f6a6e9d304061" -) - -type ConvertOpt struct { - Target string - MetaResolver llb.ImageMetaResolver - BuildArgs map[string]string - Labels map[string]string - SessionID string - BuildContext *llb.State - Excludes []string - // IgnoreCache contains names of the stages that should not use build cache. - // Empty slice means ignore cache for all stages. Nil doesn't disable cache. - IgnoreCache []string - // CacheIDNamespace scopes the IDs for different cache mounts - CacheIDNamespace string - ImageResolveMode llb.ResolveMode - TargetPlatform *specs.Platform - BuildPlatforms []specs.Platform - PrefixPlatform bool - ExtraHosts []llb.HostIP - ForceNetMode pb.NetMode - OverrideCopyImage string - LLBCaps *apicaps.CapSet -} - -func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, *Image, error) { - if len(dt) == 0 { - return nil, nil, errors.Errorf("the Dockerfile cannot be empty") - } - - platformOpt := buildPlatformOpt(&opt) - - optMetaArgs := getPlatformArgs(platformOpt) - for i, arg := range optMetaArgs { - optMetaArgs[i] = setKVValue(arg, opt.BuildArgs) - } - - dockerfile, err := parser.Parse(bytes.NewReader(dt)) - if err != nil { - return nil, nil, err - } - - proxyEnv := proxyEnvFromBuildArgs(opt.BuildArgs) - - stages, metaArgs, err := instructions.Parse(dockerfile.AST) - if err != nil { - return nil, nil, err - } - - shlex := shell.NewLex(dockerfile.EscapeToken) - - for _, metaArg := range metaArgs { - if metaArg.Value != nil { - *metaArg.Value, _ = shlex.ProcessWordWithMap(*metaArg.Value, metaArgsToMap(optMetaArgs)) - } - optMetaArgs = append(optMetaArgs, setKVValue(metaArg.KeyValuePairOptional, opt.BuildArgs)) - } - - metaResolver := opt.MetaResolver - if metaResolver == nil { - metaResolver = imagemetaresolver.Default() - } - - allDispatchStates := newDispatchStates() - - // set base state for every image - for i, st := range stages { - name, err := shlex.ProcessWordWithMap(st.BaseName, metaArgsToMap(optMetaArgs)) - if err != nil { - return nil, nil, err - } - if name == "" { - return nil, nil, errors.Errorf("base name (%s) should not be blank", st.BaseName) - } - st.BaseName = name - - ds := &dispatchState{ - stage: st, - deps: make(map[*dispatchState]struct{}), - ctxPaths: make(map[string]struct{}), - stageName: st.Name, - prefixPlatform: opt.PrefixPlatform, - } - - if st.Name == "" { - ds.stageName = fmt.Sprintf("stage-%d", i) - } - - if v := st.Platform; v != "" { - v, err := shlex.ProcessWordWithMap(v, metaArgsToMap(optMetaArgs)) - if err != nil { - return nil, nil, errors.Wrapf(err, "failed to process arguments for platform %s", v) - } - - p, err := platforms.Parse(v) - if err != nil { - return nil, nil, errors.Wrapf(err, "failed to parse platform %s", v) - } - ds.platform = &p - } - allDispatchStates.addState(ds) - - total := 0 - if ds.stage.BaseName != emptyImageName && ds.base == nil { - total = 1 - } - for _, cmd := range ds.stage.Commands { - switch cmd.(type) { - case *instructions.AddCommand, *instructions.CopyCommand, *instructions.RunCommand: - total++ - } - } - ds.cmdTotal = total - - if opt.IgnoreCache != nil { - if len(opt.IgnoreCache) == 0 { - ds.ignoreCache = true - } else if st.Name != "" { - for _, n := range opt.IgnoreCache { - if strings.EqualFold(n, st.Name) { - ds.ignoreCache = true - } - } - } - } - } - - if len(allDispatchStates.states) == 1 { - allDispatchStates.states[0].stageName = "" - } - - var target *dispatchState - if opt.Target == "" { - target = allDispatchStates.lastTarget() - } else { - var ok bool - target, ok = allDispatchStates.findStateByName(opt.Target) - if !ok { - return nil, nil, errors.Errorf("target stage %s could not be found", opt.Target) - } - } - - // fill dependencies to stages so unreachable ones can avoid loading image configs - for _, d := range allDispatchStates.states { - d.commands = make([]command, len(d.stage.Commands)) - for i, cmd := range d.stage.Commands { - newCmd, err := toCommand(cmd, allDispatchStates) - if err != nil { - return nil, nil, err - } - d.commands[i] = newCmd - for _, src := range newCmd.sources { - if src != nil { - d.deps[src] = struct{}{} - if src.unregistered { - allDispatchStates.addState(src) - } - } - } - } - } - - eg, ctx := errgroup.WithContext(ctx) - for i, d := range allDispatchStates.states { - reachable := isReachable(target, d) - // resolve image config for every stage - if d.base == nil { - if d.stage.BaseName == emptyImageName { - d.state = llb.Scratch() - d.image = emptyImage(platformOpt.targetPlatform) - continue - } - func(i int, d *dispatchState) { - eg.Go(func() error { - ref, err := reference.ParseNormalizedNamed(d.stage.BaseName) - if err != nil { - return errors.Wrapf(err, "failed to parse stage name %q", d.stage.BaseName) - } - platform := d.platform - if platform == nil { - platform = &platformOpt.targetPlatform - } - d.stage.BaseName = reference.TagNameOnly(ref).String() - var isScratch bool - if metaResolver != nil && reachable && !d.unregistered { - prefix := "[" - if opt.PrefixPlatform && platform != nil { - prefix += platforms.Format(*platform) + " " - } - prefix += "internal]" - dgst, dt, err := metaResolver.ResolveImageConfig(ctx, d.stage.BaseName, gw.ResolveImageConfigOpt{ - Platform: platform, - ResolveMode: opt.ImageResolveMode.String(), - LogName: fmt.Sprintf("%s load metadata for %s", prefix, d.stage.BaseName), - }) - if err == nil { // handle the error while builder is actually running - var img Image - if err := json.Unmarshal(dt, &img); err != nil { - return err - } - img.Created = nil - // if there is no explicit target platform, try to match based on image config - if d.platform == nil && platformOpt.implicitTarget { - p := autoDetectPlatform(img, *platform, platformOpt.buildPlatforms) - platform = &p - } - d.image = img - if dgst != "" { - ref, err = reference.WithDigest(ref, dgst) - if err != nil { - return err - } - } - d.stage.BaseName = ref.String() - _ = ref - if len(img.RootFS.DiffIDs) == 0 { - isScratch = true - // schema1 images can't return diffIDs so double check :( - for _, h := range img.History { - if !h.EmptyLayer { - isScratch = false - break - } - } - } - } - } - if isScratch { - d.state = llb.Scratch() - } else { - d.state = llb.Image(d.stage.BaseName, dfCmd(d.stage.SourceCode), llb.Platform(*platform), opt.ImageResolveMode, llb.WithCustomName(prefixCommand(d, "FROM "+d.stage.BaseName, opt.PrefixPlatform, platform))) - } - d.platform = platform - return nil - }) - }(i, d) - } - } - - if err := eg.Wait(); err != nil { - return nil, nil, err - } - - buildContext := &mutableOutput{} - ctxPaths := map[string]struct{}{} - - for _, d := range allDispatchStates.states { - if !isReachable(target, d) { - continue - } - if d.base != nil { - d.state = d.base.state - d.platform = d.base.platform - d.image = clone(d.base.image) - } - - // make sure that PATH is always set - if _, ok := shell.BuildEnvs(d.image.Config.Env)["PATH"]; !ok { - d.image.Config.Env = append(d.image.Config.Env, "PATH="+system.DefaultPathEnv) - } - - // initialize base metadata from image conf - for _, env := range d.image.Config.Env { - k, v := parseKeyValue(env) - d.state = d.state.AddEnv(k, v) - } - if d.image.Config.WorkingDir != "" { - if err = dispatchWorkdir(d, &instructions.WorkdirCommand{Path: d.image.Config.WorkingDir}, false); err != nil { - return nil, nil, err - } - } - if d.image.Config.User != "" { - if err = dispatchUser(d, &instructions.UserCommand{User: d.image.Config.User}, false); err != nil { - return nil, nil, err - } - } - d.state = d.state.Network(opt.ForceNetMode) - - opt := dispatchOpt{ - allDispatchStates: allDispatchStates, - metaArgs: optMetaArgs, - buildArgValues: opt.BuildArgs, - shlex: shlex, - sessionID: opt.SessionID, - buildContext: llb.NewState(buildContext), - proxyEnv: proxyEnv, - cacheIDNamespace: opt.CacheIDNamespace, - buildPlatforms: platformOpt.buildPlatforms, - targetPlatform: platformOpt.targetPlatform, - extraHosts: opt.ExtraHosts, - copyImage: opt.OverrideCopyImage, - llbCaps: opt.LLBCaps, - } - if opt.copyImage == "" { - opt.copyImage = DefaultCopyImage - } - - if err = dispatchOnBuild(d, d.image.Config.OnBuild, opt); err != nil { - return nil, nil, err - } - - for _, cmd := range d.commands { - if err := dispatch(d, cmd, opt); err != nil { - return nil, nil, err - } - } - - for p := range d.ctxPaths { - ctxPaths[p] = struct{}{} - } - } - - if len(opt.Labels) != 0 && target.image.Config.Labels == nil { - target.image.Config.Labels = make(map[string]string, len(opt.Labels)) - } - for k, v := range opt.Labels { - target.image.Config.Labels[k] = v - } - - opts := []llb.LocalOption{ - llb.SessionID(opt.SessionID), - llb.ExcludePatterns(opt.Excludes), - llb.SharedKeyHint(localNameContext), - WithInternalName("load build context"), - } - if includePatterns := normalizeContextPaths(ctxPaths); includePatterns != nil { - opts = append(opts, llb.FollowPaths(includePatterns)) - } - - bc := llb.Local(localNameContext, opts...) - if opt.BuildContext != nil { - bc = *opt.BuildContext - } - buildContext.Output = bc.Output() - - defaults := []llb.ConstraintsOpt{ - llb.Platform(platformOpt.targetPlatform), - } - if opt.LLBCaps != nil { - defaults = append(defaults, llb.WithCaps(*opt.LLBCaps)) - } - st := target.state.SetMarshalDefaults(defaults...) - - if !platformOpt.implicitTarget { - target.image.OS = platformOpt.targetPlatform.OS - target.image.Architecture = platformOpt.targetPlatform.Architecture - } - - return &st, &target.image, nil -} - -func metaArgsToMap(metaArgs []instructions.KeyValuePairOptional) map[string]string { - m := map[string]string{} - - for _, arg := range metaArgs { - m[arg.Key] = arg.ValueString() - } - - return m -} - -func toCommand(ic instructions.Command, allDispatchStates *dispatchStates) (command, error) { - cmd := command{Command: ic} - if c, ok := ic.(*instructions.CopyCommand); ok { - if c.From != "" { - var stn *dispatchState - index, err := strconv.Atoi(c.From) - if err != nil { - stn, ok = allDispatchStates.findStateByName(c.From) - if !ok { - stn = &dispatchState{ - stage: instructions.Stage{BaseName: c.From}, - deps: make(map[*dispatchState]struct{}), - unregistered: true, - } - } - } else { - stn, err = allDispatchStates.findStateByIndex(index) - if err != nil { - return command{}, err - } - } - cmd.sources = []*dispatchState{stn} - } - } - - if ok := detectRunMount(&cmd, allDispatchStates); ok { - return cmd, nil - } - - return cmd, nil -} - -type dispatchOpt struct { - allDispatchStates *dispatchStates - metaArgs []instructions.KeyValuePairOptional - buildArgValues map[string]string - shlex *shell.Lex - sessionID string - buildContext llb.State - proxyEnv *llb.ProxyEnv - cacheIDNamespace string - targetPlatform specs.Platform - buildPlatforms []specs.Platform - extraHosts []llb.HostIP - copyImage string - llbCaps *apicaps.CapSet -} - -func dispatch(d *dispatchState, cmd command, opt dispatchOpt) error { - if ex, ok := cmd.Command.(instructions.SupportsSingleWordExpansion); ok { - err := ex.Expand(func(word string) (string, error) { - return opt.shlex.ProcessWordWithMap(word, toEnvMap(d.buildArgs, d.image.Config.Env)) - }) - if err != nil { - return err - } - } - - var err error - switch c := cmd.Command.(type) { - case *instructions.MaintainerCommand: - err = dispatchMaintainer(d, c) - case *instructions.EnvCommand: - err = dispatchEnv(d, c) - case *instructions.RunCommand: - err = dispatchRun(d, c, opt.proxyEnv, cmd.sources, opt) - case *instructions.WorkdirCommand: - err = dispatchWorkdir(d, c, true) - case *instructions.AddCommand: - err = dispatchCopy(d, c.SourcesAndDest, opt.buildContext, true, c, "", opt) - if err == nil { - for _, src := range c.Sources() { - d.ctxPaths[path.Join("/", filepath.ToSlash(src))] = struct{}{} - } - } - case *instructions.LabelCommand: - err = dispatchLabel(d, c) - case *instructions.OnbuildCommand: - err = dispatchOnbuild(d, c) - case *instructions.CmdCommand: - err = dispatchCmd(d, c) - case *instructions.EntrypointCommand: - err = dispatchEntrypoint(d, c) - case *instructions.HealthCheckCommand: - err = dispatchHealthcheck(d, c) - case *instructions.ExposeCommand: - err = dispatchExpose(d, c, opt.shlex) - case *instructions.UserCommand: - err = dispatchUser(d, c, true) - case *instructions.VolumeCommand: - err = dispatchVolume(d, c) - case *instructions.StopSignalCommand: - err = dispatchStopSignal(d, c) - case *instructions.ShellCommand: - err = dispatchShell(d, c) - case *instructions.ArgCommand: - err = dispatchArg(d, c, opt.metaArgs, opt.buildArgValues) - case *instructions.CopyCommand: - l := opt.buildContext - if len(cmd.sources) != 0 { - l = cmd.sources[0].state - } - err = dispatchCopy(d, c.SourcesAndDest, l, false, c, c.Chown, opt) - if err == nil && len(cmd.sources) == 0 { - for _, src := range c.Sources() { - d.ctxPaths[path.Join("/", filepath.ToSlash(src))] = struct{}{} - } - } - default: - } - return err -} - -type dispatchState struct { - state llb.State - image Image - platform *specs.Platform - stage instructions.Stage - base *dispatchState - deps map[*dispatchState]struct{} - buildArgs []instructions.KeyValuePairOptional - commands []command - ctxPaths map[string]struct{} - ignoreCache bool - cmdSet bool - unregistered bool - stageName string - cmdIndex int - cmdTotal int - prefixPlatform bool -} - -type dispatchStates struct { - states []*dispatchState - statesByName map[string]*dispatchState -} - -func newDispatchStates() *dispatchStates { - return &dispatchStates{statesByName: map[string]*dispatchState{}} -} - -func (dss *dispatchStates) addState(ds *dispatchState) { - dss.states = append(dss.states, ds) - - if d, ok := dss.statesByName[ds.stage.BaseName]; ok { - ds.base = d - } - if ds.stage.Name != "" { - dss.statesByName[strings.ToLower(ds.stage.Name)] = ds - } -} - -func (dss *dispatchStates) findStateByName(name string) (*dispatchState, bool) { - ds, ok := dss.statesByName[strings.ToLower(name)] - return ds, ok -} - -func (dss *dispatchStates) findStateByIndex(index int) (*dispatchState, error) { - if index < 0 || index >= len(dss.states) { - return nil, errors.Errorf("invalid stage index %d", index) - } - - return dss.states[index], nil -} - -func (dss *dispatchStates) lastTarget() *dispatchState { - return dss.states[len(dss.states)-1] -} - -type command struct { - instructions.Command - sources []*dispatchState -} - -func dispatchOnBuild(d *dispatchState, triggers []string, opt dispatchOpt) error { - for _, trigger := range triggers { - ast, err := parser.Parse(strings.NewReader(trigger)) - if err != nil { - return err - } - if len(ast.AST.Children) != 1 { - return errors.New("onbuild trigger should be a single expression") - } - ic, err := instructions.ParseCommand(ast.AST.Children[0]) - if err != nil { - return err - } - cmd, err := toCommand(ic, opt.allDispatchStates) - if err != nil { - return err - } - if err := dispatch(d, cmd, opt); err != nil { - return err - } - } - return nil -} - -func dispatchEnv(d *dispatchState, c *instructions.EnvCommand) error { - commitMessage := bytes.NewBufferString("ENV") - for _, e := range c.Env { - commitMessage.WriteString(" " + e.String()) - d.state = d.state.AddEnv(e.Key, e.Value) - d.image.Config.Env = addEnv(d.image.Config.Env, e.Key, e.Value) - } - return commitToHistory(&d.image, commitMessage.String(), false, nil) -} - -func dispatchRun(d *dispatchState, c *instructions.RunCommand, proxy *llb.ProxyEnv, sources []*dispatchState, dopt dispatchOpt) error { - var args []string = c.CmdLine - if c.PrependShell { - args = withShell(d.image, args) - } - env := d.state.Env() - opt := []llb.RunOption{llb.Args(args)} - for _, arg := range d.buildArgs { - env = append(env, fmt.Sprintf("%s=%s", arg.Key, arg.ValueString())) - opt = append(opt, llb.AddEnv(arg.Key, arg.ValueString())) - } - opt = append(opt, dfCmd(c)) - if d.ignoreCache { - opt = append(opt, llb.IgnoreCache) - } - if proxy != nil { - opt = append(opt, llb.WithProxy(*proxy)) - } - - runMounts, err := dispatchRunMounts(d, c, sources, dopt) - if err != nil { - return err - } - opt = append(opt, runMounts...) - - shlex := *dopt.shlex - shlex.RawQuotes = true - shlex.SkipUnsetEnv = true - - opt = append(opt, llb.WithCustomName(prefixCommand(d, uppercaseCmd(processCmdEnv(&shlex, c.String(), env)), d.prefixPlatform, d.state.GetPlatform()))) - for _, h := range dopt.extraHosts { - opt = append(opt, llb.AddExtraHost(h.Host, h.IP)) - } - d.state = d.state.Run(opt...).Root() - return commitToHistory(&d.image, "RUN "+runCommandString(args, d.buildArgs), true, &d.state) -} - -func dispatchWorkdir(d *dispatchState, c *instructions.WorkdirCommand, commit bool) error { - d.state = d.state.Dir(c.Path) - wd := c.Path - if !path.IsAbs(c.Path) { - wd = path.Join("/", d.image.Config.WorkingDir, wd) - } - d.image.Config.WorkingDir = wd - if commit { - return commitToHistory(&d.image, "WORKDIR "+wd, false, nil) - } - return nil -} - -func dispatchCopy(d *dispatchState, c instructions.SourcesAndDest, sourceState llb.State, isAddCommand bool, cmdToPrint fmt.Stringer, chown string, opt dispatchOpt) error { - // TODO: this should use CopyOp instead. Current implementation is inefficient - img := llb.Image(opt.copyImage, llb.MarkImageInternal, llb.Platform(opt.buildPlatforms[0]), WithInternalName("helper image for file operations")) - - dest := path.Join(".", pathRelativeToWorkingDir(d.state, c.Dest())) - if c.Dest() == "." || c.Dest() == "" || c.Dest()[len(c.Dest())-1] == filepath.Separator { - dest += string(filepath.Separator) - } - args := []string{"copy"} - unpack := isAddCommand - - mounts := make([]llb.RunOption, 0, len(c.Sources())) - if chown != "" { - args = append(args, fmt.Sprintf("--chown=%s", chown)) - _, _, err := parseUser(chown) - if err != nil { - mounts = append(mounts, llb.AddMount("/etc/passwd", d.state, llb.SourcePath("/etc/passwd"), llb.Readonly)) - mounts = append(mounts, llb.AddMount("/etc/group", d.state, llb.SourcePath("/etc/group"), llb.Readonly)) - } - } - - commitMessage := bytes.NewBufferString("") - if isAddCommand { - commitMessage.WriteString("ADD") - } else { - commitMessage.WriteString("COPY") - } - - for i, src := range c.Sources() { - commitMessage.WriteString(" " + src) - if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") { - if !isAddCommand { - return errors.New("source can't be a URL for COPY") - } - - // Resources from remote URLs are not decompressed. - // https://docs.docker.com/engine/reference/builder/#add - // - // Note: mixing up remote archives and local archives in a single ADD instruction - // would result in undefined behavior: https://github.com/moby/buildkit/pull/387#discussion_r189494717 - unpack = false - u, err := url.Parse(src) - f := "__unnamed__" - if err == nil { - if base := path.Base(u.Path); base != "." && base != "/" { - f = base - } - } - target := path.Join(fmt.Sprintf("/src-%d", i), f) - args = append(args, target) - mounts = append(mounts, llb.AddMount(path.Dir(target), llb.HTTP(src, llb.Filename(f), dfCmd(c)), llb.Readonly)) - } else { - d, f := splitWildcards(src) - targetCmd := fmt.Sprintf("/src-%d", i) - targetMount := targetCmd - if f == "" { - f = path.Base(src) - targetMount = path.Join(targetMount, f) - } - targetCmd = path.Join(targetCmd, f) - args = append(args, targetCmd) - mounts = append(mounts, llb.AddMount(targetMount, sourceState, llb.SourcePath(d), llb.Readonly)) - } - } - - commitMessage.WriteString(" " + c.Dest()) - - args = append(args, dest) - if unpack { - args = append(args[:1], append([]string{"--unpack"}, args[1:]...)...) - } - - platform := opt.targetPlatform - if d.platform != nil { - platform = *d.platform - } - - runOpt := []llb.RunOption{llb.Args(args), llb.Dir("/dest"), llb.ReadonlyRootFS(), dfCmd(cmdToPrint), llb.WithCustomName(prefixCommand(d, uppercaseCmd(processCmdEnv(opt.shlex, cmdToPrint.String(), d.state.Env())), d.prefixPlatform, &platform))} - if d.ignoreCache { - runOpt = append(runOpt, llb.IgnoreCache) - } - - if opt.llbCaps != nil { - if err := opt.llbCaps.Supports(pb.CapExecMetaNetwork); err == nil { - runOpt = append(runOpt, llb.Network(llb.NetModeNone)) - } - } - - run := img.Run(append(runOpt, mounts...)...) - d.state = run.AddMount("/dest", d.state).Platform(platform) - - return commitToHistory(&d.image, commitMessage.String(), true, &d.state) -} - -func dispatchMaintainer(d *dispatchState, c *instructions.MaintainerCommand) error { - d.image.Author = c.Maintainer - return commitToHistory(&d.image, fmt.Sprintf("MAINTAINER %v", c.Maintainer), false, nil) -} - -func dispatchLabel(d *dispatchState, c *instructions.LabelCommand) error { - commitMessage := bytes.NewBufferString("LABEL") - if d.image.Config.Labels == nil { - d.image.Config.Labels = make(map[string]string, len(c.Labels)) - } - for _, v := range c.Labels { - d.image.Config.Labels[v.Key] = v.Value - commitMessage.WriteString(" " + v.String()) - } - return commitToHistory(&d.image, commitMessage.String(), false, nil) -} - -func dispatchOnbuild(d *dispatchState, c *instructions.OnbuildCommand) error { - d.image.Config.OnBuild = append(d.image.Config.OnBuild, c.Expression) - return nil -} - -func dispatchCmd(d *dispatchState, c *instructions.CmdCommand) error { - var args []string = c.CmdLine - if c.PrependShell { - args = withShell(d.image, args) - } - d.image.Config.Cmd = args - d.image.Config.ArgsEscaped = true - d.cmdSet = true - return commitToHistory(&d.image, fmt.Sprintf("CMD %q", args), false, nil) -} - -func dispatchEntrypoint(d *dispatchState, c *instructions.EntrypointCommand) error { - var args []string = c.CmdLine - if c.PrependShell { - args = withShell(d.image, args) - } - d.image.Config.Entrypoint = args - if !d.cmdSet { - d.image.Config.Cmd = nil - } - return commitToHistory(&d.image, fmt.Sprintf("ENTRYPOINT %q", args), false, nil) -} - -func dispatchHealthcheck(d *dispatchState, c *instructions.HealthCheckCommand) error { - d.image.Config.Healthcheck = &HealthConfig{ - Test: c.Health.Test, - Interval: c.Health.Interval, - Timeout: c.Health.Timeout, - StartPeriod: c.Health.StartPeriod, - Retries: c.Health.Retries, - } - return commitToHistory(&d.image, fmt.Sprintf("HEALTHCHECK %q", d.image.Config.Healthcheck), false, nil) -} - -func dispatchExpose(d *dispatchState, c *instructions.ExposeCommand, shlex *shell.Lex) error { - ports := []string{} - for _, p := range c.Ports { - ps, err := shlex.ProcessWordsWithMap(p, toEnvMap(d.buildArgs, d.image.Config.Env)) - if err != nil { - return err - } - ports = append(ports, ps...) - } - c.Ports = ports - - ps, _, err := nat.ParsePortSpecs(c.Ports) - if err != nil { - return err - } - - if d.image.Config.ExposedPorts == nil { - d.image.Config.ExposedPorts = make(map[string]struct{}) - } - for p := range ps { - d.image.Config.ExposedPorts[string(p)] = struct{}{} - } - - return commitToHistory(&d.image, fmt.Sprintf("EXPOSE %v", ps), false, nil) -} - -func dispatchUser(d *dispatchState, c *instructions.UserCommand, commit bool) error { - d.state = d.state.User(c.User) - d.image.Config.User = c.User - if commit { - return commitToHistory(&d.image, fmt.Sprintf("USER %v", c.User), false, nil) - } - return nil -} - -func dispatchVolume(d *dispatchState, c *instructions.VolumeCommand) error { - if d.image.Config.Volumes == nil { - d.image.Config.Volumes = map[string]struct{}{} - } - for _, v := range c.Volumes { - if v == "" { - return errors.New("VOLUME specified can not be an empty string") - } - d.image.Config.Volumes[v] = struct{}{} - } - return commitToHistory(&d.image, fmt.Sprintf("VOLUME %v", c.Volumes), false, nil) -} - -func dispatchStopSignal(d *dispatchState, c *instructions.StopSignalCommand) error { - if _, err := signal.ParseSignal(c.Signal); err != nil { - return err - } - d.image.Config.StopSignal = c.Signal - return commitToHistory(&d.image, fmt.Sprintf("STOPSIGNAL %v", c.Signal), false, nil) -} - -func dispatchShell(d *dispatchState, c *instructions.ShellCommand) error { - d.image.Config.Shell = c.Shell - return commitToHistory(&d.image, fmt.Sprintf("SHELL %v", c.Shell), false, nil) -} - -func dispatchArg(d *dispatchState, c *instructions.ArgCommand, metaArgs []instructions.KeyValuePairOptional, buildArgValues map[string]string) error { - commitStr := "ARG " + c.Key - buildArg := setKVValue(c.KeyValuePairOptional, buildArgValues) - - if c.Value != nil { - commitStr += "=" + *c.Value - } - if buildArg.Value == nil { - for _, ma := range metaArgs { - if ma.Key == buildArg.Key { - buildArg.Value = ma.Value - } - } - } - - d.buildArgs = append(d.buildArgs, buildArg) - return commitToHistory(&d.image, commitStr, false, nil) -} - -func pathRelativeToWorkingDir(s llb.State, p string) string { - if path.IsAbs(p) { - return p - } - return path.Join(s.GetDir(), p) -} - -func splitWildcards(name string) (string, string) { - i := 0 - for ; i < len(name); i++ { - ch := name[i] - if ch == '\\' { - i++ - } else if ch == '*' || ch == '?' || ch == '[' { - break - } - } - if i == len(name) { - return name, "" - } - - base := path.Base(name[:i]) - if name[:i] == "" || strings.HasSuffix(name[:i], string(filepath.Separator)) { - base = "" - } - return path.Dir(name[:i]), base + name[i:] -} - -func addEnv(env []string, k, v string) []string { - gotOne := false - for i, envVar := range env { - key, _ := parseKeyValue(envVar) - if shell.EqualEnvKeys(key, k) { - env[i] = k + "=" + v - gotOne = true - break - } - } - if !gotOne { - env = append(env, k+"="+v) - } - return env -} - -func parseKeyValue(env string) (string, string) { - parts := strings.SplitN(env, "=", 2) - v := "" - if len(parts) > 1 { - v = parts[1] - } - - return parts[0], v -} - -func setKVValue(kvpo instructions.KeyValuePairOptional, values map[string]string) instructions.KeyValuePairOptional { - if v, ok := values[kvpo.Key]; ok { - kvpo.Value = &v - } - return kvpo -} - -func toEnvMap(args []instructions.KeyValuePairOptional, env []string) map[string]string { - m := shell.BuildEnvs(env) - - for _, arg := range args { - // If key already exists, keep previous value. - if _, ok := m[arg.Key]; ok { - continue - } - m[arg.Key] = arg.ValueString() - } - return m -} - -func dfCmd(cmd interface{}) llb.ConstraintsOpt { - // TODO: add fmt.Stringer to instructions.Command to remove interface{} - var cmdStr string - if cmd, ok := cmd.(fmt.Stringer); ok { - cmdStr = cmd.String() - } - if cmd, ok := cmd.(string); ok { - cmdStr = cmd - } - return llb.WithDescription(map[string]string{ - "com.docker.dockerfile.v1.command": cmdStr, - }) -} - -func runCommandString(args []string, buildArgs []instructions.KeyValuePairOptional) string { - var tmpBuildEnv []string - for _, arg := range buildArgs { - tmpBuildEnv = append(tmpBuildEnv, arg.Key+"="+arg.ValueString()) - } - if len(tmpBuildEnv) > 0 { - tmpBuildEnv = append([]string{fmt.Sprintf("|%d", len(tmpBuildEnv))}, tmpBuildEnv...) - } - - return strings.Join(append(tmpBuildEnv, args...), " ") -} - -func commitToHistory(img *Image, msg string, withLayer bool, st *llb.State) error { - if st != nil { - msg += " # buildkit" - } - - img.History = append(img.History, specs.History{ - CreatedBy: msg, - Comment: historyComment, - EmptyLayer: !withLayer, - }) - return nil -} - -func isReachable(from, to *dispatchState) (ret bool) { - if from == nil { - return false - } - if from == to || isReachable(from.base, to) { - return true - } - for d := range from.deps { - if isReachable(d, to) { - return true - } - } - return false -} - -func parseUser(str string) (uid uint32, gid uint32, err error) { - if str == "" { - return 0, 0, nil - } - parts := strings.SplitN(str, ":", 2) - for i, v := range parts { - switch i { - case 0: - uid, err = parseUID(v) - if err != nil { - return 0, 0, err - } - if len(parts) == 1 { - gid = uid - } - case 1: - gid, err = parseUID(v) - if err != nil { - return 0, 0, err - } - } - } - return -} - -func parseUID(str string) (uint32, error) { - if str == "root" { - return 0, nil - } - uid, err := strconv.ParseUint(str, 10, 32) - if err != nil { - return 0, err - } - return uint32(uid), nil -} - -func normalizeContextPaths(paths map[string]struct{}) []string { - pathSlice := make([]string, 0, len(paths)) - for p := range paths { - if p == "/" { - return nil - } - pathSlice = append(pathSlice, p) - } - - toDelete := map[string]struct{}{} - for i := range pathSlice { - for j := range pathSlice { - if i == j { - continue - } - if strings.HasPrefix(pathSlice[j], pathSlice[i]+"/") { - delete(paths, pathSlice[j]) - } - } - } - - toSort := make([]string, 0, len(paths)) - for p := range paths { - if _, ok := toDelete[p]; !ok { - toSort = append(toSort, path.Join(".", p)) - } - } - sort.Slice(toSort, func(i, j int) bool { - return toSort[i] < toSort[j] - }) - return toSort -} - -func proxyEnvFromBuildArgs(args map[string]string) *llb.ProxyEnv { - pe := &llb.ProxyEnv{} - isNil := true - for k, v := range args { - if strings.EqualFold(k, "http_proxy") { - pe.HttpProxy = v - isNil = false - } - if strings.EqualFold(k, "https_proxy") { - pe.HttpsProxy = v - isNil = false - } - if strings.EqualFold(k, "ftp_proxy") { - pe.FtpProxy = v - isNil = false - } - if strings.EqualFold(k, "no_proxy") { - pe.NoProxy = v - isNil = false - } - } - if isNil { - return nil - } - return pe -} - -type mutableOutput struct { - llb.Output -} - -func withShell(img Image, args []string) []string { - var shell []string - if len(img.Config.Shell) > 0 { - shell = append([]string{}, img.Config.Shell...) - } else { - shell = defaultShell() - } - return append(shell, strings.Join(args, " ")) -} - -func autoDetectPlatform(img Image, target specs.Platform, supported []specs.Platform) specs.Platform { - os := img.OS - arch := img.Architecture - if target.OS == os && target.Architecture == arch { - return target - } - for _, p := range supported { - if p.OS == os && p.Architecture == arch { - return p - } - } - return target -} - -func WithInternalName(name string, a ...interface{}) llb.ConstraintsOpt { - return llb.WithCustomName("[internal] "+name, a...) -} - -func uppercaseCmd(str string) string { - p := strings.SplitN(str, " ", 2) - p[0] = strings.ToUpper(p[0]) - return strings.Join(p, " ") -} - -func processCmdEnv(shlex *shell.Lex, cmd string, env []string) string { - w, err := shlex.ProcessWord(cmd, env) - if err != nil { - return cmd - } - return w -} - -func prefixCommand(ds *dispatchState, str string, prefixPlatform bool, platform *specs.Platform) string { - if ds.cmdTotal == 0 { - return str - } - out := "[" - if prefixPlatform && platform != nil { - out += platforms.Format(*platform) + " " - } - if ds.stageName != "" { - out += ds.stageName + " " - } - ds.cmdIndex++ - out += fmt.Sprintf("%d/%d] ", ds.cmdIndex, ds.cmdTotal) - return out + str -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_norunmount.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_norunmount.go deleted file mode 100644 index 5f0cd086023a..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_norunmount.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build !dfrunmount - -package dockerfile2llb - -import ( - "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/frontend/dockerfile/instructions" -) - -func detectRunMount(cmd *command, allDispatchStates *dispatchStates) bool { - return false -} - -func dispatchRunMounts(d *dispatchState, c *instructions.RunCommand, sources []*dispatchState, opt dispatchOpt) ([]llb.RunOption, error) { - return nil, nil -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_nosecrets.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_nosecrets.go deleted file mode 100644 index d75470215fd3..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_nosecrets.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build dfrunmount,!dfsecrets - -package dockerfile2llb - -import ( - "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/frontend/dockerfile/instructions" - "github.com/pkg/errors" -) - -func dispatchSecret(m *instructions.Mount) (llb.RunOption, error) { - return nil, errors.Errorf("secret mounts not allowed") -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_nossh.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_nossh.go deleted file mode 100644 index 8b8afdc38c10..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_nossh.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build dfrunmount,!dfssh - -package dockerfile2llb - -import ( - "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/frontend/dockerfile/instructions" - "github.com/pkg/errors" -) - -func dispatchSSH(m *instructions.Mount) (llb.RunOption, error) { - return nil, errors.Errorf("ssh mounts not allowed") -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runmount.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runmount.go deleted file mode 100644 index 8214e188e84f..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runmount.go +++ /dev/null @@ -1,104 +0,0 @@ -// +build dfrunmount - -package dockerfile2llb - -import ( - "path" - "path/filepath" - - "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/frontend/dockerfile/instructions" - "github.com/pkg/errors" -) - -func detectRunMount(cmd *command, allDispatchStates *dispatchStates) bool { - if c, ok := cmd.Command.(*instructions.RunCommand); ok { - mounts := instructions.GetMounts(c) - sources := make([]*dispatchState, len(mounts)) - for i, mount := range mounts { - if mount.From == "" && mount.Type == instructions.MountTypeCache { - mount.From = emptyImageName - } - from := mount.From - if from == "" || mount.Type == instructions.MountTypeTmpfs { - continue - } - stn, ok := allDispatchStates.findStateByName(from) - if !ok { - stn = &dispatchState{ - stage: instructions.Stage{BaseName: from}, - deps: make(map[*dispatchState]struct{}), - unregistered: true, - } - } - sources[i] = stn - } - cmd.sources = sources - return true - } - - return false -} - -func dispatchRunMounts(d *dispatchState, c *instructions.RunCommand, sources []*dispatchState, opt dispatchOpt) ([]llb.RunOption, error) { - var out []llb.RunOption - mounts := instructions.GetMounts(c) - - for i, mount := range mounts { - if mount.From == "" && mount.Type == instructions.MountTypeCache { - mount.From = emptyImageName - } - st := opt.buildContext - if mount.From != "" { - st = sources[i].state - } - var mountOpts []llb.MountOption - if mount.Type == instructions.MountTypeTmpfs { - st = llb.Scratch() - mountOpts = append(mountOpts, llb.Tmpfs()) - } - if mount.Type == instructions.MountTypeSecret { - secret, err := dispatchSecret(mount) - if err != nil { - return nil, err - } - out = append(out, secret) - continue - } - if mount.Type == instructions.MountTypeSSH { - ssh, err := dispatchSSH(mount) - if err != nil { - return nil, err - } - out = append(out, ssh) - continue - } - if mount.ReadOnly { - mountOpts = append(mountOpts, llb.Readonly) - } - if mount.Type == instructions.MountTypeCache { - sharing := llb.CacheMountShared - if mount.CacheSharing == instructions.MountSharingPrivate { - sharing = llb.CacheMountPrivate - } - if mount.CacheSharing == instructions.MountSharingLocked { - sharing = llb.CacheMountLocked - } - mountOpts = append(mountOpts, llb.AsPersistentCacheDir(opt.cacheIDNamespace+"/"+mount.CacheID, sharing)) - } - target := mount.Target - if !filepath.IsAbs(filepath.Clean(mount.Target)) { - target = filepath.Join("/", d.state.GetDir(), mount.Target) - } - if target == "/" { - return nil, errors.Errorf("invalid mount target %q", target) - } - if src := path.Join("/", mount.Source); src != "/" { - mountOpts = append(mountOpts, llb.SourcePath(src)) - } - out = append(out, llb.AddMount(target, st, mountOpts...)) - - d.ctxPaths[path.Join("/", filepath.ToSlash(mount.Source))] = struct{}{} - } - return out, nil -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_secrets.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_secrets.go deleted file mode 100644 index 3e354d5b619a..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_secrets.go +++ /dev/null @@ -1,38 +0,0 @@ -// +build dfsecrets - -package dockerfile2llb - -import ( - "path" - - "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/frontend/dockerfile/instructions" - "github.com/pkg/errors" -) - -func dispatchSecret(m *instructions.Mount) (llb.RunOption, error) { - id := m.CacheID - if m.Source != "" { - id = m.Source - } - - if id == "" { - if m.Target == "" { - return nil, errors.Errorf("one of source, target required") - } - id = path.Base(m.Target) - } - - target := m.Target - if target == "" { - target = "/run/secrets/" + path.Base(id) - } - - opts := []llb.SecretOption{llb.SecretID(id)} - - if !m.Required { - opts = append(opts, llb.SecretOptional) - } - - return llb.AddSecret(target, opts...), nil -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_ssh.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_ssh.go deleted file mode 100644 index e7606ff5a29f..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_ssh.go +++ /dev/null @@ -1,27 +0,0 @@ -// +build dfssh - -package dockerfile2llb - -import ( - "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/frontend/dockerfile/instructions" - "github.com/pkg/errors" -) - -func dispatchSSH(m *instructions.Mount) (llb.RunOption, error) { - if m.Source != "" { - return nil, errors.Errorf("ssh does not support source") - } - opts := []llb.SSHOption{llb.SSHID(m.CacheID)} - - if m.Target != "" { - // TODO(AkihiroSuda): support specifying permission bits - opts = append(opts, llb.SSHSocketTarget(m.Target)) - } - - if !m.Required { - opts = append(opts, llb.SSHOptional) - } - - return llb.AddSSHSocket(opts...), nil -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_test.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_test.go deleted file mode 100644 index 2f9ac7a34e9b..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_test.go +++ /dev/null @@ -1,154 +0,0 @@ -package dockerfile2llb - -import ( - "testing" - - "github.com/moby/buildkit/frontend/dockerfile/instructions" - "github.com/moby/buildkit/util/appcontext" - "github.com/stretchr/testify/assert" -) - -func TestDockerfileParsing(t *testing.T) { - t.Parallel() - df := `FROM busybox -ENV FOO bar -COPY f1 f2 /sub/ -RUN ls -l -` - _, _, err := Dockerfile2LLB(appcontext.Context(), []byte(df), ConvertOpt{}) - assert.NoError(t, err) - - df = `FROM busybox AS foo -ENV FOO bar -FROM foo -COPY --from=foo f1 / -COPY --from=0 f2 / - ` - _, _, err = Dockerfile2LLB(appcontext.Context(), []byte(df), ConvertOpt{}) - assert.NoError(t, err) - - df = `FROM busybox AS foo -ENV FOO bar -FROM foo -COPY --from=foo f1 / -COPY --from=0 f2 / - ` - _, _, err = Dockerfile2LLB(appcontext.Context(), []byte(df), ConvertOpt{ - Target: "Foo", - }) - assert.NoError(t, err) - - _, _, err = Dockerfile2LLB(appcontext.Context(), []byte(df), ConvertOpt{ - Target: "nosuch", - }) - assert.Error(t, err) - - df = `FROM busybox - ADD http://github.com/moby/buildkit/blob/master/README.md / - ` - _, _, err = Dockerfile2LLB(appcontext.Context(), []byte(df), ConvertOpt{}) - assert.NoError(t, err) - - df = `FROM busybox - COPY http://github.com/moby/buildkit/blob/master/README.md / - ` - _, _, err = Dockerfile2LLB(appcontext.Context(), []byte(df), ConvertOpt{}) - assert.EqualError(t, err, "source can't be a URL for COPY") - - df = `FROM "" AS foo` - _, _, err = Dockerfile2LLB(appcontext.Context(), []byte(df), ConvertOpt{}) - assert.Error(t, err) - - df = `FROM ${BLANK} AS foo` - _, _, err = Dockerfile2LLB(appcontext.Context(), []byte(df), ConvertOpt{}) - assert.Error(t, err) -} - -func TestAddEnv(t *testing.T) { - // k exists in env as key - // override = true - env := []string{"key1=val1", "key2=val2"} - result := addEnv(env, "key1", "value1") - assert.Equal(t, []string{"key1=value1", "key2=val2"}, result) - - // k does not exist in env as key - // override = true - env = []string{"key1=val1", "key2=val2"} - result = addEnv(env, "key3", "val3") - assert.Equal(t, []string{"key1=val1", "key2=val2", "key3=val3"}, result) - - // env has same keys - // override = true - env = []string{"key1=val1", "key1=val2"} - result = addEnv(env, "key1", "value1") - assert.Equal(t, []string{"key1=value1", "key1=val2"}, result) - - // k matches with key only string in env - // override = true - env = []string{"key1=val1", "key2=val2", "key3"} - result = addEnv(env, "key3", "val3") - assert.Equal(t, []string{"key1=val1", "key2=val2", "key3=val3"}, result) -} - -func TestParseKeyValue(t *testing.T) { - k, v := parseKeyValue("key=val") - assert.Equal(t, "key", k) - assert.Equal(t, "val", v) - - k, v = parseKeyValue("key=") - assert.Equal(t, "key", k) - assert.Equal(t, "", v) - - k, v = parseKeyValue("key") - assert.Equal(t, "key", k) - assert.Equal(t, "", v) -} - -func TestToEnvList(t *testing.T) { - // args has no duplicated key with env - v := "val2" - args := []instructions.KeyValuePairOptional{{Key: "key2", Value: &v}} - env := []string{"key1=val1"} - resutl := toEnvMap(args, env) - assert.Equal(t, map[string]string{"key1": "val1", "key2": "val2"}, resutl) - - // value of args is nil - args = []instructions.KeyValuePairOptional{{Key: "key2", Value: nil}} - env = []string{"key1=val1"} - resutl = toEnvMap(args, env) - assert.Equal(t, map[string]string{"key1": "val1", "key2": ""}, resutl) - - // args has duplicated key with env - v = "val2" - args = []instructions.KeyValuePairOptional{{Key: "key1", Value: &v}} - env = []string{"key1=val1"} - resutl = toEnvMap(args, env) - assert.Equal(t, map[string]string{"key1": "val1"}, resutl) - - v = "val2" - args = []instructions.KeyValuePairOptional{{Key: "key1", Value: &v}} - env = []string{"key1="} - resutl = toEnvMap(args, env) - assert.Equal(t, map[string]string{"key1": ""}, resutl) - - v = "val2" - args = []instructions.KeyValuePairOptional{{Key: "key1", Value: &v}} - env = []string{"key1"} - resutl = toEnvMap(args, env) - assert.Equal(t, map[string]string{"key1": ""}, resutl) - - // env has duplicated keys - v = "val2" - args = []instructions.KeyValuePairOptional{{Key: "key2", Value: &v}} - env = []string{"key1=val1", "key1=val1_2"} - resutl = toEnvMap(args, env) - assert.Equal(t, map[string]string{"key1": "val1", "key2": "val2"}, resutl) - - // args has duplicated keys - v1 := "v1" - v2 := "v2" - args = []instructions.KeyValuePairOptional{{Key: "key2", Value: &v1}, {Key: "key2", Value: &v2}} - env = []string{"key1=val1"} - resutl = toEnvMap(args, env) - assert.Equal(t, map[string]string{"key1": "val1", "key2": "v1"}, resutl) -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/defaultshell_unix.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/defaultshell_unix.go deleted file mode 100644 index b5d541d1f5db..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/defaultshell_unix.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !windows - -package dockerfile2llb - -func defaultShell() []string { - return []string{"/bin/sh", "-c"} -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/defaultshell_windows.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/defaultshell_windows.go deleted file mode 100644 index 7693e050863a..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/defaultshell_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build windows - -package dockerfile2llb - -func defaultShell() []string { - return []string{"cmd", "/S", "/C"} -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/directives.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/directives.go deleted file mode 100644 index cf06b5ad85bb..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/directives.go +++ /dev/null @@ -1,38 +0,0 @@ -package dockerfile2llb - -import ( - "bufio" - "io" - "regexp" - "strings" -) - -const keySyntax = "syntax" - -var reDirective = regexp.MustCompile(`^#\s*([a-zA-Z][a-zA-Z0-9]*)\s*=\s*(.+?)\s*$`) - -func DetectSyntax(r io.Reader) (string, string, bool) { - directives := ParseDirectives(r) - if len(directives) == 0 { - return "", "", false - } - v, ok := directives[keySyntax] - if !ok { - return "", "", false - } - p := strings.SplitN(v, " ", 2) - return p[0], v, true -} - -func ParseDirectives(r io.Reader) map[string]string { - m := map[string]string{} - s := bufio.NewScanner(r) - for s.Scan() { - match := reDirective.FindStringSubmatch(s.Text()) - if len(match) == 0 { - return m - } - m[strings.ToLower(match[1])] = match[2] - } - return m -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/directives_test.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/directives_test.go deleted file mode 100644 index 3297fa9c9d76..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/directives_test.go +++ /dev/null @@ -1,70 +0,0 @@ -package dockerfile2llb - -import ( - "bytes" - "fmt" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestDirectives(t *testing.T) { - t.Parallel() - - dt := `#escape=\ -# key = FOO bar - -# smth -` - - d := ParseDirectives(bytes.NewBuffer([]byte(dt))) - require.Equal(t, len(d), 2, fmt.Sprintf("%+v", d)) - - v, ok := d["escape"] - require.True(t, ok) - require.Equal(t, v, "\\") - - v, ok = d["key"] - require.True(t, ok) - require.Equal(t, v, "FOO bar") - - // for some reason Moby implementation in case insensitive for escape - dt = `# EScape=\ -# KEY = FOO bar - -# smth -` - - d = ParseDirectives(bytes.NewBuffer([]byte(dt))) - require.Equal(t, len(d), 2, fmt.Sprintf("%+v", d)) - - v, ok = d["escape"] - require.True(t, ok) - require.Equal(t, v, "\\") - - v, ok = d["key"] - require.True(t, ok) - require.Equal(t, v, "FOO bar") -} - -func TestSyntaxDirective(t *testing.T) { - t.Parallel() - - dt := `# syntax = dockerfile:experimental // opts -FROM busybox -` - - ref, cmdline, ok := DetectSyntax(bytes.NewBuffer([]byte(dt))) - require.True(t, ok) - require.Equal(t, ref, "dockerfile:experimental") - require.Equal(t, cmdline, "dockerfile:experimental // opts") - - dt = `FROM busybox -RUN ls -` - ref, cmdline, ok = DetectSyntax(bytes.NewBuffer([]byte(dt))) - require.False(t, ok) - require.Equal(t, ref, "") - require.Equal(t, cmdline, "") - -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/image.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/image.go deleted file mode 100644 index e83e58b657a5..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/image.go +++ /dev/null @@ -1,75 +0,0 @@ -package dockerfile2llb - -import ( - "time" - - "github.com/docker/docker/api/types/strslice" - "github.com/moby/buildkit/util/system" - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - -// HealthConfig holds configuration settings for the HEALTHCHECK feature. -type HealthConfig struct { - // Test is the test to perform to check that the container is healthy. - // An empty slice means to inherit the default. - // The options are: - // {} : inherit healthcheck - // {"NONE"} : disable healthcheck - // {"CMD", args...} : exec arguments directly - // {"CMD-SHELL", command} : run command with system's default shell - Test []string `json:",omitempty"` - - // Zero means to inherit. Durations are expressed as integer nanoseconds. - Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. - Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. - StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. - - // Retries is the number of consecutive failures needed to consider a container as unhealthy. - // Zero means inherit. - Retries int `json:",omitempty"` -} - -// ImageConfig is a docker compatible config for an image -type ImageConfig struct { - specs.ImageConfig - - Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy - ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) - - // NetworkDisabled bool `json:",omitempty"` // Is network disabled - // MacAddress string `json:",omitempty"` // Mac Address of the container - OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile - StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container - Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT -} - -// Image is the JSON structure which describes some basic information about the image. -// This provides the `application/vnd.oci.image.config.v1+json` mediatype when marshalled to JSON. -type Image struct { - specs.Image - - // Config defines the execution parameters which should be used as a base when running a container using the image. - Config ImageConfig `json:"config,omitempty"` -} - -func clone(src Image) Image { - img := src - img.Config = src.Config - img.Config.Env = append([]string{}, src.Config.Env...) - img.Config.Cmd = append([]string{}, src.Config.Cmd...) - img.Config.Entrypoint = append([]string{}, src.Config.Entrypoint...) - return img -} - -func emptyImage(platform specs.Platform) Image { - img := Image{ - Image: specs.Image{ - Architecture: platform.Architecture, - OS: platform.OS, - }, - } - img.RootFS.Type = "layers" - img.Config.WorkingDir = "/" - img.Config.Env = []string{"PATH=" + system.DefaultPathEnv} - return img -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/platform.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/platform.go deleted file mode 100644 index e1ef78f8bd73..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/platform.go +++ /dev/null @@ -1,58 +0,0 @@ -package dockerfile2llb - -import ( - "github.com/containerd/containerd/platforms" - "github.com/moby/buildkit/frontend/dockerfile/instructions" - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - -type platformOpt struct { - targetPlatform specs.Platform - buildPlatforms []specs.Platform - implicitTarget bool -} - -func buildPlatformOpt(opt *ConvertOpt) *platformOpt { - buildPlatforms := opt.BuildPlatforms - targetPlatform := opt.TargetPlatform - implicitTargetPlatform := false - - if opt.TargetPlatform != nil && opt.BuildPlatforms == nil { - buildPlatforms = []specs.Platform{*opt.TargetPlatform} - } - if len(buildPlatforms) == 0 { - buildPlatforms = []specs.Platform{platforms.DefaultSpec()} - } - - if opt.TargetPlatform == nil { - implicitTargetPlatform = true - targetPlatform = &buildPlatforms[0] - } - - return &platformOpt{ - targetPlatform: *targetPlatform, - buildPlatforms: buildPlatforms, - implicitTarget: implicitTargetPlatform, - } -} - -func getPlatformArgs(po *platformOpt) []instructions.KeyValuePairOptional { - bp := po.buildPlatforms[0] - tp := po.targetPlatform - m := map[string]string{ - "BUILDPLATFORM": platforms.Format(bp), - "BUILDOS": bp.OS, - "BUILDARCH": bp.Architecture, - "BUILDVARIANT": bp.Variant, - "TARGETPLATFORM": platforms.Format(tp), - "TARGETOS": tp.OS, - "TARGETARCH": tp.Architecture, - "TARGETVARIANT": tp.Variant, - } - opts := make([]instructions.KeyValuePairOptional, 0, len(m)) - for k, v := range m { - s := v - opts = append(opts, instructions.KeyValuePairOptional{Key: k, Value: &s}) - } - return opts -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/platform_test.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/platform_test.go deleted file mode 100644 index 4b3e55c2a317..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/platform_test.go +++ /dev/null @@ -1,62 +0,0 @@ -package dockerfile2llb - -import ( - "testing" - - "github.com/containerd/containerd/platforms" - specs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/stretchr/testify/assert" -) - -func TestResolveBuildPlatforms(t *testing.T) { - dummyPlatform1 := specs.Platform{Architecture: "DummyArchitecture1", OS: "DummyOS1"} - dummyPlatform2 := specs.Platform{Architecture: "DummyArchitecture2", OS: "DummyOS2"} - - // BuildPlatforms is set and TargetPlatform is set - opt := ConvertOpt{TargetPlatform: &dummyPlatform1, BuildPlatforms: []specs.Platform{dummyPlatform2}} - result := buildPlatformOpt(&opt).buildPlatforms - assert.Equal(t, []specs.Platform{dummyPlatform2}, result) - - // BuildPlatforms is not set and TargetPlatform is set - opt = ConvertOpt{TargetPlatform: &dummyPlatform1, BuildPlatforms: nil} - result = buildPlatformOpt(&opt).buildPlatforms - assert.Equal(t, []specs.Platform{dummyPlatform1}, result) - - // BuildPlatforms is set and TargetPlatform is not set - opt = ConvertOpt{TargetPlatform: nil, BuildPlatforms: []specs.Platform{dummyPlatform2}} - result = buildPlatformOpt(&opt).buildPlatforms - assert.Equal(t, []specs.Platform{dummyPlatform2}, result) - - // BuildPlatforms is not set and TargetPlatform is not set - opt = ConvertOpt{TargetPlatform: nil, BuildPlatforms: nil} - result = buildPlatformOpt(&opt).buildPlatforms - assert.Equal(t, []specs.Platform{platforms.DefaultSpec()}, result) -} - -func TestResolveTargetPlatform(t *testing.T) { - dummyPlatform := specs.Platform{Architecture: "DummyArchitecture", OS: "DummyOS"} - - // TargetPlatform is set - opt := ConvertOpt{TargetPlatform: &dummyPlatform} - result := buildPlatformOpt(&opt) - assert.Equal(t, dummyPlatform, result.targetPlatform) - - // TargetPlatform is not set - opt = ConvertOpt{TargetPlatform: nil} - result = buildPlatformOpt(&opt) - assert.Equal(t, result.buildPlatforms[0], result.targetPlatform) -} - -func TestImplicitTargetPlatform(t *testing.T) { - dummyPlatform := specs.Platform{Architecture: "DummyArchitecture", OS: "DummyOS"} - - // TargetPlatform is set - opt := ConvertOpt{TargetPlatform: &dummyPlatform} - result := buildPlatformOpt(&opt).implicitTarget - assert.Equal(t, false, result) - - // TargetPlatform is not set - opt = ConvertOpt{TargetPlatform: nil} - result = buildPlatformOpt(&opt).implicitTarget - assert.Equal(t, true, result) -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile_mount_test.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile_mount_test.go deleted file mode 100644 index cde23c609262..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile_mount_test.go +++ /dev/null @@ -1,51 +0,0 @@ -// +build dfrunmount - -package dockerfile - -import ( - "context" - "os" - "testing" - - "github.com/containerd/continuity/fs/fstest" - "github.com/moby/buildkit/client" - "github.com/moby/buildkit/frontend/dockerfile/builder" - "github.com/moby/buildkit/util/testutil/integration" - "github.com/stretchr/testify/require" -) - -var mountTests = []integration.Test{ - testMountContext, -} - -func init() { - allTests = append(allTests, mountTests...) -} - -func testMountContext(t *testing.T, sb integration.Sandbox) { - f := getFrontend(t, sb) - - dockerfile := []byte(` -FROM busybox -RUN --mount=target=/context [ "$(cat /context/testfile)" == "contents0" ] -`) - - dir, err := tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - fstest.CreateFile("testfile", []byte("contents0"), 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - c, err := client.New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - _, err = f.Solve(context.TODO(), c, client.SolveOpt{ - LocalDirs: map[string]string{ - builder.LocalNameDockerfile: dir, - builder.LocalNameContext: dir, - }, - }, nil) - require.NoError(t, err) -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile_test.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile_test.go deleted file mode 100644 index f6adb8ad82aa..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile_test.go +++ /dev/null @@ -1,2947 +0,0 @@ -package dockerfile - -import ( - "archive/tar" - "bytes" - "compress/gzip" - "context" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/http/httptest" - "os" - "os/exec" - "path/filepath" - "runtime" - "sort" - "strings" - "testing" - "time" - - "github.com/containerd/containerd" - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/namespaces" - "github.com/containerd/containerd/platforms" - "github.com/containerd/continuity/fs/fstest" - "github.com/moby/buildkit/client" - "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/frontend/dockerfile/builder" - "github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb" - "github.com/moby/buildkit/identity" - "github.com/moby/buildkit/util/testutil" - "github.com/moby/buildkit/util/testutil/httpserver" - "github.com/moby/buildkit/util/testutil/integration" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/stretchr/testify/require" -) - -var allTests = []integration.Test{ - testNoSnapshotLeak, - testCmdShell, - testGlobalArg, - testDockerfileDirs, - testDockerfileInvalidCommand, - testDockerfileADDFromURL, - testDockerfileAddArchive, - testDockerfileScratchConfig, - testExportedHistory, - testExposeExpansion, - testUser, - testDockerignore, - testDockerignoreInvalid, - testDockerfileFromGit, - testCopyChown, - testCopyWildcards, - testCopyOverrideFiles, - testMultiStageImplicitFrom, - testCopyVarSubstitution, - testMultiStageCaseInsensitive, - testLabels, - testCacheImportExport, - testReproducibleIDs, - testImportExportReproducibleIDs, - testNoCache, - testDockerfileFromHTTP, - testBuiltinArgs, - testPullScratch, - testSymlinkDestination, - testHTTPDockerfile, - testNoSnapshotLeak, - testCopySymlinks, - testContextChangeDirToFile, - testPlatformArgsImplicit, - testPlatformArgsExplicit, - testExportMultiPlatform, - testQuotedMetaArgs, - testIgnoreEntrypoint, - testCopyThroughSymlinkContext, - testCopyThroughSymlinkMultiStage, - testCopyChownCreateDest, - testEmptyDestDir, -} - -var opts []integration.TestOpt - -type frontend interface { - Solve(context.Context, *client.Client, client.SolveOpt, chan *client.SolveStatus) (*client.SolveResponse, error) - DFCmdArgs(string, string) (string, string) - RequiresBuildctl(t *testing.T) -} - -func init() { - frontends := map[string]interface{}{} - - opts = []integration.TestOpt{ - integration.WithMirroredImages(integration.OfficialImages("busybox:latest")), - integration.WithMirroredImages(map[string]string{ - "docker/dockerfile-copy:v0.1.9": "docker.io/" + dockerfile2llb.DefaultCopyImage, - }), - integration.WithMatrix("frontend", frontends), - } - - if os.Getenv("FRONTEND_BUILTIN_ONLY") == "1" { - frontends["builtin"] = &builtinFrontend{} - } else if os.Getenv("FRONTEND_CLIENT_ONLY") == "1" { - frontends["client"] = &clientFrontend{} - } else if gw := os.Getenv("FRONTEND_GATEWAY_ONLY"); gw != "" { - name := "buildkit_test/" + identity.NewID() + ":latest" - opts = append(opts, integration.WithMirroredImages(map[string]string{ - name: gw, - })) - frontends["gateway"] = &gatewayFrontend{gw: name} - } else { - frontends["builtin"] = &builtinFrontend{} - frontends["client"] = &clientFrontend{} - } -} - -func TestIntegration(t *testing.T) { - integration.Run(t, allTests, opts...) -} - -func testEmptyDestDir(t *testing.T, sb integration.Sandbox) { - f := getFrontend(t, sb) - - dockerfile := []byte(` -FROM busybox -ENV empty="" -COPY testfile $empty -RUN [ "$(cat testfile)" == "contents0" ] -`) - - dir, err := tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - fstest.CreateFile("testfile", []byte("contents0"), 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - c, err := client.New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - _, err = f.Solve(context.TODO(), c, client.SolveOpt{ - LocalDirs: map[string]string{ - builder.LocalNameDockerfile: dir, - builder.LocalNameContext: dir, - }, - }, nil) - require.NoError(t, err) -} - -func testCopyChownCreateDest(t *testing.T, sb integration.Sandbox) { - f := getFrontend(t, sb) - - dockerfile := []byte(` -FROM busybox -RUN adduser -D user -COPY --chown=user:user . /dest -RUN [ "$(stat -c "%U %G" /dest)" == "user user" ] -`) - - dir, err := tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - c, err := client.New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - _, err = f.Solve(context.TODO(), c, client.SolveOpt{ - LocalDirs: map[string]string{ - builder.LocalNameDockerfile: dir, - builder.LocalNameContext: dir, - }, - }, nil) - require.NoError(t, err) -} - -func testCopyThroughSymlinkContext(t *testing.T, sb integration.Sandbox) { - f := getFrontend(t, sb) - - dockerfile := []byte(` -FROM scratch -COPY link/foo . -`) - - dir, err := tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - fstest.Symlink("sub", "link"), - fstest.CreateDir("sub", 0700), - fstest.CreateFile("sub/foo", []byte(`contents`), 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - c, err := client.New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - _, err = f.Solve(context.TODO(), c, client.SolveOpt{ - Exporter: client.ExporterLocal, - ExporterOutputDir: destDir, - LocalDirs: map[string]string{ - builder.LocalNameDockerfile: dir, - builder.LocalNameContext: dir, - }, - }, nil) - require.NoError(t, err) - - dt, err := ioutil.ReadFile(filepath.Join(destDir, "foo")) - require.NoError(t, err) - require.Equal(t, "contents", string(dt)) -} - -func testCopyThroughSymlinkMultiStage(t *testing.T, sb integration.Sandbox) { - f := getFrontend(t, sb) - - dockerfile := []byte(` -FROM busybox AS build -RUN mkdir -p /out/sub && ln -s /out/sub /sub && ln -s out/sub /sub2 && echo -n "data" > /sub/foo -FROM scratch -COPY --from=build /sub/foo . -COPY --from=build /sub2/foo bar -`) - - dir, err := tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - c, err := client.New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - _, err = f.Solve(context.TODO(), c, client.SolveOpt{ - Exporter: client.ExporterLocal, - ExporterOutputDir: destDir, - LocalDirs: map[string]string{ - builder.LocalNameDockerfile: dir, - builder.LocalNameContext: dir, - }, - }, nil) - require.NoError(t, err) - - dt, err := ioutil.ReadFile(filepath.Join(destDir, "foo")) - require.NoError(t, err) - require.Equal(t, "data", string(dt)) -} - -func testIgnoreEntrypoint(t *testing.T, sb integration.Sandbox) { - f := getFrontend(t, sb) - - dockerfile := []byte(` -FROM busybox -ENTRYPOINT ["/nosuchcmd"] -RUN ["ls"] -`) - - dir, err := tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - c, err := client.New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - _, err = f.Solve(context.TODO(), c, client.SolveOpt{ - LocalDirs: map[string]string{ - builder.LocalNameDockerfile: dir, - builder.LocalNameContext: dir, - }, - }, nil) - require.NoError(t, err) -} - -func testQuotedMetaArgs(t *testing.T, sb integration.Sandbox) { - f := getFrontend(t, sb) - - dockerfile := []byte(` -ARG a1="box" -ARG a2="$a1-foo" -FROM busy$a1 AS build -ARG a2 -ARG a3="bar-$a2" -RUN echo -n $a3 > /out -FROM scratch -COPY --from=build /out . -`) - - dir, err := tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - c, err := client.New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - _, err = f.Solve(context.TODO(), c, client.SolveOpt{ - LocalDirs: map[string]string{ - builder.LocalNameDockerfile: dir, - builder.LocalNameContext: dir, - }, - Exporter: client.ExporterLocal, - ExporterOutputDir: destDir, - }, nil) - require.NoError(t, err) - - dt, err := ioutil.ReadFile(filepath.Join(destDir, "out")) - require.NoError(t, err) - require.Equal(t, "bar-box-foo", string(dt)) -} - -func testExportMultiPlatform(t *testing.T, sb integration.Sandbox) { - f := getFrontend(t, sb) - - dockerfile := []byte(` -FROM scratch -ARG TARGETARCH -ARG TARGETPLATFORM -LABEL target=$TARGETPLATFORM -COPY arch-$TARGETARCH whoami -`) - - dir, err := tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - fstest.CreateFile("arch-arm", []byte(`i am arm`), 0600), - fstest.CreateFile("arch-amd64", []byte(`i am amd64`), 0600), - fstest.CreateFile("arch-s390x", []byte(`i am s390x`), 0600), - fstest.CreateFile("arch-ppc64le", []byte(`i am ppc64le`), 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - c, err := client.New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - _, err = f.Solve(context.TODO(), c, client.SolveOpt{ - LocalDirs: map[string]string{ - builder.LocalNameDockerfile: dir, - builder.LocalNameContext: dir, - }, - FrontendAttrs: map[string]string{ - "platform": "windows/amd64,linux/arm,linux/s390x", - }, - Exporter: client.ExporterLocal, - ExporterOutputDir: destDir, - }, nil) - require.NoError(t, err) - - dt, err := ioutil.ReadFile(filepath.Join(destDir, "windows_amd64/whoami")) - require.NoError(t, err) - require.Equal(t, "i am amd64", string(dt)) - - dt, err = ioutil.ReadFile(filepath.Join(destDir, "linux_arm_v7/whoami")) - require.NoError(t, err) - require.Equal(t, "i am arm", string(dt)) - - dt, err = ioutil.ReadFile(filepath.Join(destDir, "linux_s390x/whoami")) - require.NoError(t, err) - require.Equal(t, "i am s390x", string(dt)) - - // repeat with oci exporter - - destDir, err = ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - out := filepath.Join(destDir, "out.tar") - outW, err := os.Create(out) - require.NoError(t, err) - - _, err = f.Solve(context.TODO(), c, client.SolveOpt{ - LocalDirs: map[string]string{ - builder.LocalNameDockerfile: dir, - builder.LocalNameContext: dir, - }, - FrontendAttrs: map[string]string{ - "platform": "windows/amd64,linux/arm/v6,linux/ppc64le", - }, - Exporter: client.ExporterOCI, - ExporterOutput: outW, - }, nil) - require.NoError(t, err) - - dt, err = ioutil.ReadFile(filepath.Join(destDir, "out.tar")) - require.NoError(t, err) - - m, err := testutil.ReadTarToMap(dt, false) - require.NoError(t, err) - - var idx ocispec.Index - err = json.Unmarshal(m["index.json"].Data, &idx) - require.NoError(t, err) - - mlistHex := idx.Manifests[0].Digest.Hex() - - idx = ocispec.Index{} - err = json.Unmarshal(m["blobs/sha256/"+mlistHex].Data, &idx) - require.NoError(t, err) - - require.Equal(t, 3, len(idx.Manifests)) - - for i, exp := range []struct { - p string - os string - arch string - dt string - }{ - {p: "windows/amd64", os: "windows", arch: "amd64", dt: "i am amd64"}, - {p: "linux/arm/v6", os: "linux", arch: "arm", dt: "i am arm"}, - {p: "linux/ppc64le", os: "linux", arch: "ppc64le", dt: "i am ppc64le"}, - } { - t.Run(exp.p, func(t *testing.T) { - require.Equal(t, exp.p, platforms.Format(*idx.Manifests[i].Platform)) - - var mfst ocispec.Manifest - err = json.Unmarshal(m["blobs/sha256/"+idx.Manifests[i].Digest.Hex()].Data, &mfst) - require.NoError(t, err) - - require.Equal(t, 1, len(mfst.Layers)) - - m2, err := testutil.ReadTarToMap(m["blobs/sha256/"+mfst.Layers[0].Digest.Hex()].Data, true) - require.NoError(t, err) - require.Equal(t, exp.dt, string(m2["whoami"].Data)) - - var img ocispec.Image - err = json.Unmarshal(m["blobs/sha256/"+mfst.Config.Digest.Hex()].Data, &img) - require.NoError(t, err) - - require.Equal(t, exp.os, img.OS) - require.Equal(t, exp.arch, img.Architecture) - v, ok := img.Config.Labels["target"] - require.True(t, ok) - require.Equal(t, exp.p, v) - }) - } -} - -// tonistiigi/fsutil#46 -func testContextChangeDirToFile(t *testing.T, sb integration.Sandbox) { - f := getFrontend(t, sb) - - dockerfile := []byte(` -FROM scratch -COPY foo / -`) - - dir, err := tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - fstest.CreateDir("foo", 0700), - fstest.CreateFile("foo/bar", []byte(`contents`), 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - c, err := client.New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - _, err = f.Solve(context.TODO(), c, client.SolveOpt{ - LocalDirs: map[string]string{ - builder.LocalNameDockerfile: dir, - builder.LocalNameContext: dir, - }, - }, nil) - require.NoError(t, err) - - dir, err = tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - fstest.CreateFile("foo", []byte(`contents2`), 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - _, err = f.Solve(context.TODO(), c, client.SolveOpt{ - Exporter: client.ExporterLocal, - ExporterOutputDir: destDir, - LocalDirs: map[string]string{ - builder.LocalNameDockerfile: dir, - builder.LocalNameContext: dir, - }, - }, nil) - require.NoError(t, err) - - dt, err := ioutil.ReadFile(filepath.Join(destDir, "foo")) - require.NoError(t, err) - require.Equal(t, "contents2", string(dt)) -} - -func testNoSnapshotLeak(t *testing.T, sb integration.Sandbox) { - f := getFrontend(t, sb) - - dockerfile := []byte(` -FROM scratch -COPY foo / -`) - - dir, err := tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - fstest.CreateFile("foo", []byte(`contents`), 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - c, err := client.New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - _, err = f.Solve(context.TODO(), c, client.SolveOpt{ - LocalDirs: map[string]string{ - builder.LocalNameDockerfile: dir, - builder.LocalNameContext: dir, - }, - }, nil) - require.NoError(t, err) - - du, err := c.DiskUsage(context.TODO()) - require.NoError(t, err) - - _, err = f.Solve(context.TODO(), c, client.SolveOpt{ - LocalDirs: map[string]string{ - builder.LocalNameDockerfile: dir, - builder.LocalNameContext: dir, - }, - }, nil) - require.NoError(t, err) - - du2, err := c.DiskUsage(context.TODO()) - require.NoError(t, err) - - require.Equal(t, len(du), len(du2)) -} - -func testCopySymlinks(t *testing.T, sb integration.Sandbox) { - f := getFrontend(t, sb) - - dockerfile := []byte(` -FROM scratch -COPY foo / -COPY sub/l* alllinks/ -`) - - dir, err := tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - fstest.CreateFile("bar", []byte(`bar-contents`), 0600), - fstest.Symlink("bar", "foo"), - fstest.CreateDir("sub", 0700), - fstest.CreateFile("sub/lfile", []byte(`baz-contents`), 0600), - fstest.Symlink("subfile", "sub/l0"), - fstest.CreateFile("sub/subfile", []byte(`subfile-contents`), 0600), - fstest.Symlink("second", "sub/l1"), - fstest.Symlink("baz", "sub/second"), - fstest.CreateFile("sub/baz", []byte(`baz-contents`), 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - c, err := client.New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - _, err = f.Solve(context.TODO(), c, client.SolveOpt{ - LocalDirs: map[string]string{ - builder.LocalNameDockerfile: dir, - builder.LocalNameContext: dir, - }, - }, nil) - require.NoError(t, err) -} - -func testHTTPDockerfile(t *testing.T, sb integration.Sandbox) { - f := getFrontend(t, sb) - - dockerfile := []byte(` -FROM busybox -RUN echo -n "foo-contents" > /foo -FROM scratch -COPY --from=0 /foo /foo -`) - - srcDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(srcDir) - - err = ioutil.WriteFile(filepath.Join(srcDir, "Dockerfile"), dockerfile, 0600) - require.NoError(t, err) - - resp := httpserver.Response{ - Etag: identity.NewID(), - Content: dockerfile, - } - - server := httpserver.NewTestServer(map[string]httpserver.Response{ - "/df": resp, - }) - defer server.Close() - - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - c, err := client.New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - _, err = f.Solve(context.TODO(), c, client.SolveOpt{ - FrontendAttrs: map[string]string{ - "context": server.URL + "/df", - "filename": "mydockerfile", // this is bogus, any name should work - }, - Exporter: client.ExporterLocal, - ExporterOutputDir: destDir, - }, nil) - require.NoError(t, err) - - dt, err := ioutil.ReadFile(filepath.Join(destDir, "foo")) - require.NoError(t, err) - require.Equal(t, "foo-contents", string(dt)) - -} - -func testCmdShell(t *testing.T, sb integration.Sandbox) { - f := getFrontend(t, sb) - - var cdAddress string - if cd, ok := sb.(interface { - ContainerdAddress() string - }); !ok { - t.Skip("requires local image store") - } else { - cdAddress = cd.ContainerdAddress() - } - - dockerfile := []byte(` -FROM scratch -CMD ["test"] -`) - - dir, err := tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - c, err := client.New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - target := "docker.io/moby/cmdoverridetest:latest" - _, err = f.Solve(context.TODO(), c, client.SolveOpt{ - Exporter: client.ExporterImage, - ExporterAttrs: map[string]string{ - "name": target, - }, - LocalDirs: map[string]string{ - builder.LocalNameDockerfile: dir, - builder.LocalNameContext: dir, - }, - }, nil) - require.NoError(t, err) - - dockerfile = []byte(` -FROM docker.io/moby/cmdoverridetest:latest -SHELL ["ls"] -ENTRYPOINT my entrypoint -`) - - dir, err = tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - target = "docker.io/moby/cmdoverridetest2:latest" - _, err = f.Solve(context.TODO(), c, client.SolveOpt{ - Exporter: client.ExporterImage, - ExporterAttrs: map[string]string{ - "name": target, - }, - LocalDirs: map[string]string{ - builder.LocalNameDockerfile: dir, - builder.LocalNameContext: dir, - }, - }, nil) - require.NoError(t, err) - - ctr, err := newContainerd(cdAddress) - require.NoError(t, err) - defer ctr.Close() - - ctx := namespaces.WithNamespace(context.Background(), "buildkit") - - img, err := ctr.ImageService().Get(ctx, target) - require.NoError(t, err) - - desc, err := img.Config(ctx, ctr.ContentStore(), platforms.Default()) - require.NoError(t, err) - - dt, err := content.ReadBlob(ctx, ctr.ContentStore(), desc) - require.NoError(t, err) - - var ociimg ocispec.Image - err = json.Unmarshal(dt, &ociimg) - require.NoError(t, err) - - require.Equal(t, []string(nil), ociimg.Config.Cmd) - require.Equal(t, []string{"ls", "my entrypoint"}, ociimg.Config.Entrypoint) -} - -func testPullScratch(t *testing.T, sb integration.Sandbox) { - f := getFrontend(t, sb) - - var cdAddress string - if cd, ok := sb.(interface { - ContainerdAddress() string - }); !ok { - t.Skip("requires local image store") - } else { - cdAddress = cd.ContainerdAddress() - } - - dockerfile := []byte(` -FROM scratch -LABEL foo=bar -`) - - dir, err := tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - c, err := client.New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - target := "docker.io/moby/testpullscratch:latest" - _, err = f.Solve(context.TODO(), c, client.SolveOpt{ - Exporter: client.ExporterImage, - ExporterAttrs: map[string]string{ - "name": target, - }, - LocalDirs: map[string]string{ - builder.LocalNameDockerfile: dir, - builder.LocalNameContext: dir, - }, - }, nil) - require.NoError(t, err) - - dockerfile = []byte(` -FROM docker.io/moby/testpullscratch:latest -LABEL bar=baz -COPY foo . -`) - - dir, err = tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - fstest.CreateFile("foo", []byte("foo-contents"), 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - target = "docker.io/moby/testpullscratch2:latest" - _, err = f.Solve(context.TODO(), c, client.SolveOpt{ - Exporter: client.ExporterImage, - ExporterAttrs: map[string]string{ - "name": target, - }, - LocalDirs: map[string]string{ - builder.LocalNameDockerfile: dir, - builder.LocalNameContext: dir, - }, - }, nil) - require.NoError(t, err) - - ctr, err := newContainerd(cdAddress) - require.NoError(t, err) - defer ctr.Close() - - ctx := namespaces.WithNamespace(context.Background(), "buildkit") - - img, err := ctr.ImageService().Get(ctx, target) - require.NoError(t, err) - - desc, err := img.Config(ctx, ctr.ContentStore(), platforms.Default()) - require.NoError(t, err) - - dt, err := content.ReadBlob(ctx, ctr.ContentStore(), desc) - require.NoError(t, err) - - var ociimg ocispec.Image - err = json.Unmarshal(dt, &ociimg) - require.NoError(t, err) - - require.Equal(t, "layers", ociimg.RootFS.Type) - require.Equal(t, 1, len(ociimg.RootFS.DiffIDs)) - v, ok := ociimg.Config.Labels["foo"] - require.True(t, ok) - require.Equal(t, "bar", v) - v, ok = ociimg.Config.Labels["bar"] - require.True(t, ok) - require.Equal(t, "baz", v) - - echo := llb.Image("busybox"). - Run(llb.Shlex(`sh -c "echo -n foo0 > /empty/foo"`)). - AddMount("/empty", llb.Image("docker.io/moby/testpullscratch:latest")) - - def, err := echo.Marshal() - require.NoError(t, err) - - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - _, err = c.Solve(context.TODO(), def, client.SolveOpt{ - Exporter: client.ExporterLocal, - ExporterOutputDir: destDir, - LocalDirs: map[string]string{ - builder.LocalNameDockerfile: dir, - builder.LocalNameContext: dir, - }, - }, nil) - require.NoError(t, err) - - dt, err = ioutil.ReadFile(filepath.Join(destDir, "foo")) - require.NoError(t, err) - require.Equal(t, "foo0", string(dt)) -} - -func testGlobalArg(t *testing.T, sb integration.Sandbox) { - f := getFrontend(t, sb) - - dockerfile := []byte(` -ARG tag=nosuchtag -FROM busybox:${tag} -`) - - dir, err := tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - c, err := client.New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - _, err = f.Solve(context.TODO(), c, client.SolveOpt{ - FrontendAttrs: map[string]string{ - "build-arg:tag": "latest", - }, - LocalDirs: map[string]string{ - builder.LocalNameDockerfile: dir, - builder.LocalNameContext: dir, - }, - }, nil) - require.NoError(t, err) -} - -func testDockerfileDirs(t *testing.T, sb integration.Sandbox) { - f := getFrontend(t, sb) - f.RequiresBuildctl(t) - - dockerfile := []byte(` - FROM busybox - COPY foo /foo2 - COPY foo / - RUN echo -n bar > foo3 - RUN test -f foo - RUN cmp -s foo foo2 - RUN cmp -s foo foo3 -`) - - dir, err := tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - fstest.CreateFile("foo", []byte("bar"), 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - args, trace := f.DFCmdArgs(dir, dir) - defer os.RemoveAll(trace) - - cmd := sb.Cmd(args) - require.NoError(t, cmd.Run()) - - _, err = os.Stat(trace) - require.NoError(t, err) - - // relative urls - args, trace = f.DFCmdArgs(".", ".") - defer os.RemoveAll(trace) - - cmd = sb.Cmd(args) - cmd.Dir = dir - require.NoError(t, cmd.Run()) - - _, err = os.Stat(trace) - require.NoError(t, err) - - // different context and dockerfile directories - dir1, err := tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir1) - - dir2, err := tmpdir( - fstest.CreateFile("foo", []byte("bar"), 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir2) - - args, trace = f.DFCmdArgs(dir2, dir1) - defer os.RemoveAll(trace) - - cmd = sb.Cmd(args) - cmd.Dir = dir - require.NoError(t, cmd.Run()) - - _, err = os.Stat(trace) - require.NoError(t, err) - - // TODO: test trace file output, cache hits, logs etc. - // TODO: output metadata about original dockerfile command in trace -} - -func testDockerfileInvalidCommand(t *testing.T, sb integration.Sandbox) { - f := getFrontend(t, sb) - f.RequiresBuildctl(t) - dockerfile := []byte(` - FROM busybox - RUN invalidcmd -`) - - dir, err := tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - args, trace := f.DFCmdArgs(dir, dir) - defer os.RemoveAll(trace) - - cmd := sb.Cmd(args) - stdout := new(bytes.Buffer) - cmd.Stderr = stdout - err = cmd.Run() - require.Error(t, err) - require.Contains(t, stdout.String(), "/bin/sh -c invalidcmd") - require.Contains(t, stdout.String(), "executor failed running") -} - -func testDockerfileADDFromURL(t *testing.T, sb integration.Sandbox) { - f := getFrontend(t, sb) - f.RequiresBuildctl(t) - - modTime := time.Now().Add(-24 * time.Hour) // avoid falso positive with current time - - resp := httpserver.Response{ - Etag: identity.NewID(), - Content: []byte("content1"), - } - - resp2 := httpserver.Response{ - Etag: identity.NewID(), - LastModified: &modTime, - Content: []byte("content2"), - } - - server := httpserver.NewTestServer(map[string]httpserver.Response{ - "/foo": resp, - "/": resp2, - }) - defer server.Close() - - dockerfile := []byte(fmt.Sprintf(` -FROM scratch -ADD %s /dest/ -`, server.URL+"/foo")) - - dir, err := tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - args, trace := f.DFCmdArgs(dir, dir) - defer os.RemoveAll(trace) - - destDir, err := tmpdir() - require.NoError(t, err) - defer os.RemoveAll(destDir) - - cmd := sb.Cmd(args + fmt.Sprintf(" --exporter=local --exporter-opt output=%s", destDir)) - err = cmd.Run() - require.NoError(t, err) - - dt, err := ioutil.ReadFile(filepath.Join(destDir, "dest/foo")) - require.NoError(t, err) - require.Equal(t, []byte("content1"), dt) - - // test the default properties - dockerfile = []byte(fmt.Sprintf(` -FROM scratch -ADD %s /dest/ -`, server.URL+"/")) - - dir, err = tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - args, trace = f.DFCmdArgs(dir, dir) - defer os.RemoveAll(trace) - - destDir, err = tmpdir() - require.NoError(t, err) - defer os.RemoveAll(destDir) - - cmd = sb.Cmd(args + fmt.Sprintf(" --exporter=local --exporter-opt output=%s", destDir)) - err = cmd.Run() - require.NoError(t, err) - - destFile := filepath.Join(destDir, "dest/__unnamed__") - dt, err = ioutil.ReadFile(destFile) - require.NoError(t, err) - require.Equal(t, []byte("content2"), dt) - - fi, err := os.Stat(destFile) - require.NoError(t, err) - require.Equal(t, modTime.Format(http.TimeFormat), fi.ModTime().Format(http.TimeFormat)) -} - -func testDockerfileAddArchive(t *testing.T, sb integration.Sandbox) { - f := getFrontend(t, sb) - f.RequiresBuildctl(t) - - buf := bytes.NewBuffer(nil) - tw := tar.NewWriter(buf) - expectedContent := []byte("content0") - err := tw.WriteHeader(&tar.Header{ - Name: "foo", - Typeflag: tar.TypeReg, - Size: int64(len(expectedContent)), - Mode: 0644, - }) - require.NoError(t, err) - _, err = tw.Write(expectedContent) - require.NoError(t, err) - err = tw.Close() - require.NoError(t, err) - - dockerfile := []byte(` -FROM scratch -ADD t.tar / -`) - - dir, err := tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - fstest.CreateFile("t.tar", buf.Bytes(), 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - args, trace := f.DFCmdArgs(dir, dir) - defer os.RemoveAll(trace) - - destDir, err := tmpdir() - require.NoError(t, err) - defer os.RemoveAll(destDir) - - cmd := sb.Cmd(args + fmt.Sprintf(" --exporter=local --exporter-opt output=%s", destDir)) - require.NoError(t, cmd.Run()) - - dt, err := ioutil.ReadFile(filepath.Join(destDir, "foo")) - require.NoError(t, err) - require.Equal(t, expectedContent, dt) - - // add gzip tar - buf2 := bytes.NewBuffer(nil) - gz := gzip.NewWriter(buf2) - _, err = gz.Write(buf.Bytes()) - require.NoError(t, err) - err = gz.Close() - require.NoError(t, err) - - dockerfile = []byte(` -FROM scratch -ADD t.tar.gz / -`) - - dir, err = tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - fstest.CreateFile("t.tar.gz", buf2.Bytes(), 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - args, trace = f.DFCmdArgs(dir, dir) - defer os.RemoveAll(trace) - - destDir, err = tmpdir() - require.NoError(t, err) - defer os.RemoveAll(destDir) - - cmd = sb.Cmd(args + fmt.Sprintf(" --exporter=local --exporter-opt output=%s", destDir)) - require.NoError(t, cmd.Run()) - - dt, err = ioutil.ReadFile(filepath.Join(destDir, "foo")) - require.NoError(t, err) - require.Equal(t, expectedContent, dt) - - // COPY doesn't extract - dockerfile = []byte(` -FROM scratch -COPY t.tar.gz / -`) - - dir, err = tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - fstest.CreateFile("t.tar.gz", buf2.Bytes(), 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - args, trace = f.DFCmdArgs(dir, dir) - defer os.RemoveAll(trace) - - destDir, err = tmpdir() - require.NoError(t, err) - defer os.RemoveAll(destDir) - - cmd = sb.Cmd(args + fmt.Sprintf(" --exporter=local --exporter-opt output=%s", destDir)) - require.NoError(t, cmd.Run()) - - dt, err = ioutil.ReadFile(filepath.Join(destDir, "t.tar.gz")) - require.NoError(t, err) - require.Equal(t, buf2.Bytes(), dt) - - // ADD from URL doesn't extract - resp := httpserver.Response{ - Etag: identity.NewID(), - Content: buf2.Bytes(), - } - - server := httpserver.NewTestServer(map[string]httpserver.Response{ - "/t.tar.gz": resp, - }) - defer server.Close() - - dockerfile = []byte(fmt.Sprintf(` -FROM scratch -ADD %s / -`, server.URL+"/t.tar.gz")) - - dir, err = tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - args, trace = f.DFCmdArgs(dir, dir) - defer os.RemoveAll(trace) - - destDir, err = tmpdir() - require.NoError(t, err) - defer os.RemoveAll(destDir) - - cmd = sb.Cmd(args + fmt.Sprintf(" --exporter=local --exporter-opt output=%s", destDir)) - require.NoError(t, cmd.Run()) - - dt, err = ioutil.ReadFile(filepath.Join(destDir, "t.tar.gz")) - require.NoError(t, err) - require.Equal(t, buf2.Bytes(), dt) - - // https://github.com/moby/buildkit/issues/386 - dockerfile = []byte(fmt.Sprintf(` -FROM scratch -ADD %s /newname.tar.gz -`, server.URL+"/t.tar.gz")) - - dir, err = tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - args, trace = f.DFCmdArgs(dir, dir) - defer os.RemoveAll(trace) - - destDir, err = tmpdir() - require.NoError(t, err) - defer os.RemoveAll(destDir) - - cmd = sb.Cmd(args + fmt.Sprintf(" --exporter=local --exporter-opt output=%s", destDir)) - require.NoError(t, cmd.Run()) - - dt, err = ioutil.ReadFile(filepath.Join(destDir, "newname.tar.gz")) - require.NoError(t, err) - require.Equal(t, buf2.Bytes(), dt) -} - -func testSymlinkDestination(t *testing.T, sb integration.Sandbox) { - f := getFrontend(t, sb) - f.RequiresBuildctl(t) - - buf := bytes.NewBuffer(nil) - tw := tar.NewWriter(buf) - expectedContent := []byte("content0") - err := tw.WriteHeader(&tar.Header{ - Name: "symlink", - Typeflag: tar.TypeSymlink, - Linkname: "../tmp/symlink-target", - Mode: 0755, - }) - require.NoError(t, err) - err = tw.Close() - require.NoError(t, err) - - dockerfile := []byte(` -FROM scratch -ADD t.tar / -COPY foo /symlink/ -`) - - dir, err := tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - fstest.CreateFile("foo", expectedContent, 0600), - fstest.CreateFile("t.tar", buf.Bytes(), 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - args, trace := f.DFCmdArgs(dir, dir) - defer os.RemoveAll(trace) - - destDir, err := tmpdir() - require.NoError(t, err) - defer os.RemoveAll(destDir) - - cmd := sb.Cmd(args + fmt.Sprintf(" --exporter=local --exporter-opt output=%s", destDir)) - require.NoError(t, cmd.Run()) - - dt, err := ioutil.ReadFile(filepath.Join(destDir, "tmp/symlink-target/foo")) - require.NoError(t, err) - require.Equal(t, expectedContent, dt) -} - -func testDockerfileScratchConfig(t *testing.T, sb integration.Sandbox) { - var cdAddress string - if cd, ok := sb.(interface { - ContainerdAddress() string - }); !ok { - t.Skip("only for containerd worker") - } else { - cdAddress = cd.ContainerdAddress() - } - - f := getFrontend(t, sb) - f.RequiresBuildctl(t) - dockerfile := []byte(` -FROM scratch -ENV foo=bar -`) - - dir, err := tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - args, trace := f.DFCmdArgs(dir, dir) - defer os.RemoveAll(trace) - - target := "example.com/moby/dockerfilescratch:test" - cmd := sb.Cmd(args + " --exporter=image --exporter-opt=name=" + target) - err = cmd.Run() - require.NoError(t, err) - - client, err := newContainerd(cdAddress) - require.NoError(t, err) - defer client.Close() - - ctx := namespaces.WithNamespace(context.Background(), "buildkit") - - img, err := client.ImageService().Get(ctx, target) - require.NoError(t, err) - - desc, err := img.Config(ctx, client.ContentStore(), platforms.Default()) - require.NoError(t, err) - - dt, err := content.ReadBlob(ctx, client.ContentStore(), desc) - require.NoError(t, err) - - var ociimg ocispec.Image - err = json.Unmarshal(dt, &ociimg) - require.NoError(t, err) - - require.NotEqual(t, "", ociimg.OS) - require.NotEqual(t, "", ociimg.Architecture) - require.NotEqual(t, "", ociimg.Config.WorkingDir) - require.Equal(t, "layers", ociimg.RootFS.Type) - require.Equal(t, 0, len(ociimg.RootFS.DiffIDs)) - - require.Equal(t, 1, len(ociimg.History)) - require.Contains(t, ociimg.History[0].CreatedBy, "ENV foo=bar") - require.Equal(t, true, ociimg.History[0].EmptyLayer) - - require.Contains(t, ociimg.Config.Env, "foo=bar") - require.Condition(t, func() bool { - for _, env := range ociimg.Config.Env { - if strings.HasPrefix(env, "PATH=") { - return true - } - } - return false - }) -} - -func testExposeExpansion(t *testing.T, sb integration.Sandbox) { - f := getFrontend(t, sb) - - dockerfile := []byte(` -FROM scratch -ARG PORTS="3000 4000/udp" -EXPOSE $PORTS -EXPOSE 5000 -`) - - dir, err := tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - c, err := client.New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - target := "example.com/moby/dockerfileexpansion:test" - _, err = f.Solve(context.TODO(), c, client.SolveOpt{ - Exporter: client.ExporterImage, - ExporterAttrs: map[string]string{ - "name": target, - }, - LocalDirs: map[string]string{ - builder.LocalNameDockerfile: dir, - builder.LocalNameContext: dir, - }, - }, nil) - require.NoError(t, err) - - var cdAddress string - if cd, ok := sb.(interface { - ContainerdAddress() string - }); !ok { - return - } else { - cdAddress = cd.ContainerdAddress() - } - - client, err := newContainerd(cdAddress) - require.NoError(t, err) - defer client.Close() - - ctx := namespaces.WithNamespace(context.Background(), "buildkit") - - img, err := client.ImageService().Get(ctx, target) - require.NoError(t, err) - - desc, err := img.Config(ctx, client.ContentStore(), platforms.Default()) - require.NoError(t, err) - - dt, err := content.ReadBlob(ctx, client.ContentStore(), desc) - require.NoError(t, err) - - var ociimg ocispec.Image - err = json.Unmarshal(dt, &ociimg) - require.NoError(t, err) - - require.Equal(t, 3, len(ociimg.Config.ExposedPorts)) - - var ports []string - for p := range ociimg.Config.ExposedPorts { - ports = append(ports, p) - } - - sort.Strings(ports) - - require.Equal(t, "3000/tcp", ports[0]) - require.Equal(t, "4000/udp", ports[1]) - require.Equal(t, "5000/tcp", ports[2]) -} - -func testDockerignore(t *testing.T, sb integration.Sandbox) { - f := getFrontend(t, sb) - - dockerfile := []byte(` -FROM scratch -COPY . . -`) - - dockerignore := []byte(` -ba* -Dockerfile -!bay -.dockerignore -`) - - dir, err := tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - fstest.CreateFile("foo", []byte(`foo-contents`), 0600), - fstest.CreateFile("bar", []byte(`bar-contents`), 0600), - fstest.CreateFile("baz", []byte(`baz-contents`), 0600), - fstest.CreateFile("bay", []byte(`bay-contents`), 0600), - fstest.CreateFile(".dockerignore", dockerignore, 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - c, err := client.New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - _, err = f.Solve(context.TODO(), c, client.SolveOpt{ - Exporter: client.ExporterLocal, - ExporterOutputDir: destDir, - LocalDirs: map[string]string{ - builder.LocalNameDockerfile: dir, - builder.LocalNameContext: dir, - }, - }, nil) - require.NoError(t, err) - - dt, err := ioutil.ReadFile(filepath.Join(destDir, "foo")) - require.NoError(t, err) - require.Equal(t, "foo-contents", string(dt)) - - _, err = os.Stat(filepath.Join(destDir, ".dockerignore")) - require.Error(t, err) - require.True(t, os.IsNotExist(err)) - - _, err = os.Stat(filepath.Join(destDir, "Dockerfile")) - require.Error(t, err) - require.True(t, os.IsNotExist(err)) - - _, err = os.Stat(filepath.Join(destDir, "bar")) - require.Error(t, err) - require.True(t, os.IsNotExist(err)) - - _, err = os.Stat(filepath.Join(destDir, "baz")) - require.Error(t, err) - require.True(t, os.IsNotExist(err)) - - dt, err = ioutil.ReadFile(filepath.Join(destDir, "bay")) - require.NoError(t, err) - require.Equal(t, "bay-contents", string(dt)) -} - -func testDockerignoreInvalid(t *testing.T, sb integration.Sandbox) { - f := getFrontend(t, sb) - - dockerfile := []byte(` -FROM scratch -COPY . . -`) - - dir, err := tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - fstest.CreateFile(".dockerignore", []byte("!\n"), 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) - defer cancel() - - c, err := client.New(ctx, sb.Address()) - require.NoError(t, err) - defer c.Close() - - _, err = f.Solve(ctx, c, client.SolveOpt{ - LocalDirs: map[string]string{ - builder.LocalNameDockerfile: dir, - builder.LocalNameContext: dir, - }, - }, nil) - // err is either the expected error due to invalid dockerignore or error from the timeout - require.Error(t, err) - select { - case <-ctx.Done(): - t.Fatal("timed out") - default: - } -} - -func testExportedHistory(t *testing.T, sb integration.Sandbox) { - f := getFrontend(t, sb) - f.RequiresBuildctl(t) - - // using multi-stage to test that history is scoped to one stage - dockerfile := []byte(` -FROM busybox AS base -ENV foo=bar -COPY foo /foo2 -FROM busybox -COPY --from=base foo2 foo3 -WORKDIR / -RUN echo bar > foo4 -RUN ["ls"] -`) - - dir, err := tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - fstest.CreateFile("foo", []byte("contents0"), 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - args, trace := f.DFCmdArgs(dir, dir) - defer os.RemoveAll(trace) - - target := "example.com/moby/dockerfilescratch:test" - cmd := sb.Cmd(args + " --exporter=image --exporter-opt=name=" + target) - require.NoError(t, cmd.Run()) - - // TODO: expose this test to OCI worker - - var cdAddress string - if cd, ok := sb.(interface { - ContainerdAddress() string - }); !ok { - t.Skip("only for containerd worker") - } else { - cdAddress = cd.ContainerdAddress() - } - - client, err := newContainerd(cdAddress) - require.NoError(t, err) - defer client.Close() - - ctx := namespaces.WithNamespace(context.Background(), "buildkit") - - img, err := client.ImageService().Get(ctx, target) - require.NoError(t, err) - - desc, err := img.Config(ctx, client.ContentStore(), platforms.Default()) - require.NoError(t, err) - - dt, err := content.ReadBlob(ctx, client.ContentStore(), desc) - require.NoError(t, err) - - var ociimg ocispec.Image - err = json.Unmarshal(dt, &ociimg) - require.NoError(t, err) - - require.Equal(t, "layers", ociimg.RootFS.Type) - // this depends on busybox. should be ok after freezing images - require.Equal(t, 3, len(ociimg.RootFS.DiffIDs)) - - require.Equal(t, 6, len(ociimg.History)) - require.Contains(t, ociimg.History[2].CreatedBy, "COPY foo2 foo3") - require.Equal(t, false, ociimg.History[2].EmptyLayer) - require.Contains(t, ociimg.History[3].CreatedBy, "WORKDIR /") - require.Equal(t, true, ociimg.History[3].EmptyLayer) - require.Contains(t, ociimg.History[4].CreatedBy, "echo bar > foo4") - require.Equal(t, false, ociimg.History[4].EmptyLayer) - require.Contains(t, ociimg.History[5].CreatedBy, "RUN ls") - require.Equal(t, true, ociimg.History[5].EmptyLayer) -} - -func testUser(t *testing.T, sb integration.Sandbox) { - f := getFrontend(t, sb) - - dockerfile := []byte(` -FROM busybox AS base -RUN mkdir -m 0777 /out -RUN id -un > /out/rootuser - -# Make sure our defaults work -RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)" = '0:0/root:root' ] - -# TODO decide if "args.user = strconv.Itoa(syscall.Getuid())" is acceptable behavior for changeUser in sysvinit instead of "return nil" when "USER" isn't specified (so that we get the proper group list even if that is the empty list, even in the default case of not supplying an explicit USER to run as, which implies USER 0) -USER root -RUN [ "$(id -G):$(id -Gn)" = '0 10:root wheel' ] - -# Setup dockerio user and group -RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd && \ - echo 'dockerio:x:1001:' >> /etc/group - -# Make sure we can switch to our user and all the information is exactly as we expect it to be -USER dockerio -RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] - -# Switch back to root and double check that worked exactly as we might expect it to -USER root -RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '0:0/root:root/0 10:root wheel' ] && \ - # Add a "supplementary" group for our dockerio user - echo 'supplementary:x:1002:dockerio' >> /etc/group - -# ... and then go verify that we get it like we expect -USER dockerio -RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001 1002:dockerio supplementary' ] -USER 1001 -RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001 1002:dockerio supplementary' ] - -# super test the new "user:group" syntax -USER dockerio:dockerio -RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] -USER 1001:dockerio -RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] -USER dockerio:1001 -RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] -USER 1001:1001 -RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] -USER dockerio:supplementary -RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] -USER dockerio:1002 -RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] -USER 1001:supplementary -RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] -USER 1001:1002 -RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] - -# make sure unknown uid/gid still works properly -USER 1042:1043 -RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1042:1043/1042:1043/1043:1043' ] -USER daemon -RUN id -un > /out/daemonuser -FROM scratch -COPY --from=base /out / -USER nobody -`) - - dir, err := tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - c, err := client.New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - _, err = f.Solve(context.TODO(), c, client.SolveOpt{ - Exporter: client.ExporterLocal, - ExporterOutputDir: destDir, - LocalDirs: map[string]string{ - builder.LocalNameDockerfile: dir, - builder.LocalNameContext: dir, - }, - }, nil) - require.NoError(t, err) - - dt, err := ioutil.ReadFile(filepath.Join(destDir, "rootuser")) - require.NoError(t, err) - require.Equal(t, "root\n", string(dt)) - - dt, err = ioutil.ReadFile(filepath.Join(destDir, "daemonuser")) - require.NoError(t, err) - require.Equal(t, "daemon\n", string(dt)) - - // test user in exported - target := "example.com/moby/dockerfileuser:test" - _, err = f.Solve(context.TODO(), c, client.SolveOpt{ - Exporter: client.ExporterImage, - ExporterAttrs: map[string]string{ - "name": target, - }, - LocalDirs: map[string]string{ - builder.LocalNameDockerfile: dir, - builder.LocalNameContext: dir, - }, - }, nil) - require.NoError(t, err) - - var cdAddress string - if cd, ok := sb.(interface { - ContainerdAddress() string - }); !ok { - return - } else { - cdAddress = cd.ContainerdAddress() - } - - client, err := newContainerd(cdAddress) - require.NoError(t, err) - defer client.Close() - - ctx := namespaces.WithNamespace(context.Background(), "buildkit") - - img, err := client.ImageService().Get(ctx, target) - require.NoError(t, err) - - desc, err := img.Config(ctx, client.ContentStore(), platforms.Default()) - require.NoError(t, err) - - dt, err = content.ReadBlob(ctx, client.ContentStore(), desc) - require.NoError(t, err) - - var ociimg ocispec.Image - err = json.Unmarshal(dt, &ociimg) - require.NoError(t, err) - - require.Equal(t, "nobody", ociimg.Config.User) -} - -func testCopyChown(t *testing.T, sb integration.Sandbox) { - f := getFrontend(t, sb) - - dockerfile := []byte(` -FROM busybox AS base -RUN mkdir -m 0777 /out -COPY --chown=daemon foo / -COPY --chown=1000:nogroup bar /baz -RUN stat -c "%U %G" /foo > /out/fooowner -RUN stat -c "%u %G" /baz/sub > /out/subowner -FROM scratch -COPY --from=base /out / -`) - - dir, err := tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - fstest.CreateFile("foo", []byte(`foo-contents`), 0600), - fstest.CreateDir("bar", 0700), - fstest.CreateFile("bar/sub", nil, 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - c, err := client.New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - _, err = f.Solve(context.TODO(), c, client.SolveOpt{ - Exporter: client.ExporterLocal, - ExporterOutputDir: destDir, - LocalDirs: map[string]string{ - builder.LocalNameDockerfile: dir, - builder.LocalNameContext: dir, - }, - }, nil) - require.NoError(t, err) - - dt, err := ioutil.ReadFile(filepath.Join(destDir, "fooowner")) - require.NoError(t, err) - require.Equal(t, "daemon daemon\n", string(dt)) - - dt, err = ioutil.ReadFile(filepath.Join(destDir, "subowner")) - require.NoError(t, err) - require.Equal(t, "1000 nogroup\n", string(dt)) -} - -func testCopyOverrideFiles(t *testing.T, sb integration.Sandbox) { - f := getFrontend(t, sb) - - dockerfile := []byte(` -FROM scratch AS base -COPY sub sub -COPY sub sub -COPY files/foo.go dest/foo.go -COPY files/foo.go dest/foo.go -COPY files dest -`) - - dir, err := tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - fstest.CreateDir("sub", 0700), - fstest.CreateDir("sub/dir1", 0700), - fstest.CreateDir("sub/dir1/dir2", 0700), - fstest.CreateFile("sub/dir1/dir2/foo", []byte(`foo-contents`), 0600), - fstest.CreateDir("files", 0700), - fstest.CreateFile("files/foo.go", []byte(`foo.go-contents`), 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - c, err := client.New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - _, err = f.Solve(context.TODO(), c, client.SolveOpt{ - Exporter: client.ExporterLocal, - ExporterOutputDir: destDir, - LocalDirs: map[string]string{ - builder.LocalNameDockerfile: dir, - builder.LocalNameContext: dir, - }, - }, nil) - require.NoError(t, err) - - dt, err := ioutil.ReadFile(filepath.Join(destDir, "sub/dir1/dir2/foo")) - require.NoError(t, err) - require.Equal(t, "foo-contents", string(dt)) - - dt, err = ioutil.ReadFile(filepath.Join(destDir, "dest/foo.go")) - require.NoError(t, err) - require.Equal(t, "foo.go-contents", string(dt)) -} - -func testCopyVarSubstitution(t *testing.T, sb integration.Sandbox) { - f := getFrontend(t, sb) - - dockerfile := []byte(` -FROM scratch AS base -ENV FOO bar -COPY $FOO baz -`) - - dir, err := tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - fstest.CreateFile("bar", []byte(`bar-contents`), 0600), - ) - - require.NoError(t, err) - defer os.RemoveAll(dir) - - c, err := client.New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - _, err = f.Solve(context.TODO(), c, client.SolveOpt{ - Exporter: client.ExporterLocal, - ExporterOutputDir: destDir, - LocalDirs: map[string]string{ - builder.LocalNameDockerfile: dir, - builder.LocalNameContext: dir, - }, - }, nil) - require.NoError(t, err) - - dt, err := ioutil.ReadFile(filepath.Join(destDir, "baz")) - require.NoError(t, err) - require.Equal(t, "bar-contents", string(dt)) -} - -func testCopyWildcards(t *testing.T, sb integration.Sandbox) { - f := getFrontend(t, sb) - - dockerfile := []byte(` -FROM scratch AS base -COPY *.go /gofiles/ -COPY f*.go foo2.go -COPY sub/* /subdest/ -COPY sub/*/dir2/foo /subdest2/ -COPY sub/*/dir2/foo /subdest3/bar -COPY . all/ -COPY sub/dir1/ subdest4 -COPY sub/dir1/. subdest5 -COPY sub/dir1 subdest6 -`) - - dir, err := tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - fstest.CreateFile("foo.go", []byte(`foo-contents`), 0600), - fstest.CreateFile("bar.go", []byte(`bar-contents`), 0600), - fstest.CreateDir("sub", 0700), - fstest.CreateDir("sub/dir1", 0700), - fstest.CreateDir("sub/dir1/dir2", 0700), - fstest.CreateFile("sub/dir1/dir2/foo", []byte(`foo-contents`), 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - c, err := client.New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - _, err = f.Solve(context.TODO(), c, client.SolveOpt{ - Exporter: client.ExporterLocal, - ExporterOutputDir: destDir, - LocalDirs: map[string]string{ - builder.LocalNameDockerfile: dir, - builder.LocalNameContext: dir, - }, - }, nil) - require.NoError(t, err) - - dt, err := ioutil.ReadFile(filepath.Join(destDir, "gofiles/foo.go")) - require.NoError(t, err) - require.Equal(t, "foo-contents", string(dt)) - - dt, err = ioutil.ReadFile(filepath.Join(destDir, "gofiles/bar.go")) - require.NoError(t, err) - require.Equal(t, "bar-contents", string(dt)) - - dt, err = ioutil.ReadFile(filepath.Join(destDir, "foo2.go")) - require.NoError(t, err) - require.Equal(t, "foo-contents", string(dt)) - - dt, err = ioutil.ReadFile(filepath.Join(destDir, "subdest/dir1/dir2/foo")) - require.NoError(t, err) - require.Equal(t, "foo-contents", string(dt)) - - dt, err = ioutil.ReadFile(filepath.Join(destDir, "subdest2/foo")) - require.NoError(t, err) - require.Equal(t, "foo-contents", string(dt)) - - dt, err = ioutil.ReadFile(filepath.Join(destDir, "subdest3/bar")) - require.NoError(t, err) - require.Equal(t, "foo-contents", string(dt)) - - dt, err = ioutil.ReadFile(filepath.Join(destDir, "all/foo.go")) - require.NoError(t, err) - require.Equal(t, "foo-contents", string(dt)) - - dt, err = ioutil.ReadFile(filepath.Join(destDir, "subdest4/dir2/foo")) - require.NoError(t, err) - require.Equal(t, "foo-contents", string(dt)) - - dt, err = ioutil.ReadFile(filepath.Join(destDir, "subdest5/dir2/foo")) - require.NoError(t, err) - require.Equal(t, "foo-contents", string(dt)) - - dt, err = ioutil.ReadFile(filepath.Join(destDir, "subdest6/dir2/foo")) - require.NoError(t, err) - require.Equal(t, "foo-contents", string(dt)) -} - -func testDockerfileFromGit(t *testing.T, sb integration.Sandbox) { - f := getFrontend(t, sb) - - gitDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(gitDir) - - dockerfile := ` -FROM busybox AS build -RUN echo -n fromgit > foo -FROM scratch -COPY --from=build foo bar -` - - err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte(dockerfile), 0600) - require.NoError(t, err) - - err = runShell(gitDir, - "git init", - "git config --local user.email test", - "git config --local user.name test", - "git add Dockerfile", - "git commit -m initial", - "git branch first", - ) - require.NoError(t, err) - - dockerfile += ` -COPY --from=build foo bar2 -` - - err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte(dockerfile), 0600) - require.NoError(t, err) - - err = runShell(gitDir, - "git add Dockerfile", - "git commit -m second", - "git update-server-info", - ) - require.NoError(t, err) - - server := httptest.NewServer(http.FileServer(http.Dir(filepath.Join(gitDir)))) - defer server.Close() - - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - c, err := client.New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - _, err = f.Solve(context.TODO(), c, client.SolveOpt{ - FrontendAttrs: map[string]string{ - "context": server.URL + "/.git#first", - }, - Exporter: client.ExporterLocal, - ExporterOutputDir: destDir, - }, nil) - require.NoError(t, err) - - dt, err := ioutil.ReadFile(filepath.Join(destDir, "bar")) - require.NoError(t, err) - require.Equal(t, "fromgit", string(dt)) - - _, err = os.Stat(filepath.Join(destDir, "bar2")) - require.Error(t, err) - require.True(t, os.IsNotExist(err)) - - // second request from master branch contains both files - destDir, err = ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - _, err = f.Solve(context.TODO(), c, client.SolveOpt{ - FrontendAttrs: map[string]string{ - "context": server.URL + "/.git", - }, - Exporter: client.ExporterLocal, - ExporterOutputDir: destDir, - }, nil) - require.NoError(t, err) - - dt, err = ioutil.ReadFile(filepath.Join(destDir, "bar")) - require.NoError(t, err) - require.Equal(t, "fromgit", string(dt)) - - dt, err = ioutil.ReadFile(filepath.Join(destDir, "bar2")) - require.NoError(t, err) - require.Equal(t, "fromgit", string(dt)) -} - -func testDockerfileFromHTTP(t *testing.T, sb integration.Sandbox) { - f := getFrontend(t, sb) - - buf := bytes.NewBuffer(nil) - w := tar.NewWriter(buf) - - writeFile := func(fn, dt string) { - err := w.WriteHeader(&tar.Header{ - Name: fn, - Mode: 0600, - Size: int64(len(dt)), - Typeflag: tar.TypeReg, - }) - require.NoError(t, err) - _, err = w.Write([]byte(dt)) - require.NoError(t, err) - } - - writeFile("mydockerfile", `FROM scratch -COPY foo bar -`) - - writeFile("foo", "foo-contents") - - require.NoError(t, w.Flush()) - - resp := httpserver.Response{ - Etag: identity.NewID(), - Content: buf.Bytes(), - } - - server := httpserver.NewTestServer(map[string]httpserver.Response{ - "/myurl": resp, - }) - defer server.Close() - - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - c, err := client.New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - _, err = f.Solve(context.TODO(), c, client.SolveOpt{ - FrontendAttrs: map[string]string{ - "context": server.URL + "/myurl", - "filename": "mydockerfile", - }, - Exporter: client.ExporterLocal, - ExporterOutputDir: destDir, - }, nil) - require.NoError(t, err) - - dt, err := ioutil.ReadFile(filepath.Join(destDir, "bar")) - require.NoError(t, err) - require.Equal(t, "foo-contents", string(dt)) -} - -func testMultiStageImplicitFrom(t *testing.T, sb integration.Sandbox) { - f := getFrontend(t, sb) - - dockerfile := []byte(` -FROM scratch -COPY --from=busybox /etc/passwd test -`) - - dir, err := tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - c, err := client.New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - _, err = f.Solve(context.TODO(), c, client.SolveOpt{ - Exporter: client.ExporterLocal, - ExporterOutputDir: destDir, - LocalDirs: map[string]string{ - builder.LocalNameDockerfile: dir, - builder.LocalNameContext: dir, - }, - }, nil) - require.NoError(t, err) - - dt, err := ioutil.ReadFile(filepath.Join(destDir, "test")) - require.NoError(t, err) - require.Contains(t, string(dt), "root") - - // testing masked image will load actual stage - - dockerfile = []byte(` -FROM busybox AS golang -RUN mkdir /usr/bin && echo -n foo > /usr/bin/go - -FROM scratch -COPY --from=golang /usr/bin/go go -`) - - dir, err = tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - destDir, err = ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - _, err = f.Solve(context.TODO(), c, client.SolveOpt{ - Exporter: client.ExporterLocal, - ExporterOutputDir: destDir, - LocalDirs: map[string]string{ - builder.LocalNameDockerfile: dir, - builder.LocalNameContext: dir, - }, - }, nil) - require.NoError(t, err) - - dt, err = ioutil.ReadFile(filepath.Join(destDir, "go")) - require.NoError(t, err) - require.Contains(t, string(dt), "foo") -} - -func testMultiStageCaseInsensitive(t *testing.T, sb integration.Sandbox) { - f := getFrontend(t, sb) - - dockerfile := []byte(` -FROM scratch AS STAge0 -COPY foo bar -FROM scratch AS staGE1 -COPY --from=staGE0 bar baz -FROM scratch -COPY --from=stage1 baz bax -`) - dir, err := tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - fstest.CreateFile("foo", []byte("foo-contents"), 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - c, err := client.New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - _, err = f.Solve(context.TODO(), c, client.SolveOpt{ - Exporter: client.ExporterLocal, - ExporterOutputDir: destDir, - LocalDirs: map[string]string{ - builder.LocalNameDockerfile: dir, - builder.LocalNameContext: dir, - }, - FrontendAttrs: map[string]string{ - "target": "Stage1", - }, - }, nil) - require.NoError(t, err) - - dt, err := ioutil.ReadFile(filepath.Join(destDir, "baz")) - require.NoError(t, err) - require.Contains(t, string(dt), "foo-contents") -} - -func testLabels(t *testing.T, sb integration.Sandbox) { - f := getFrontend(t, sb) - - dockerfile := []byte(` -FROM scratch -LABEL foo=bar -`) - dir, err := tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - c, err := client.New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - target := "example.com/moby/dockerfilelabels:test" - _, err = f.Solve(context.TODO(), c, client.SolveOpt{ - FrontendAttrs: map[string]string{ - "label:bar": "baz", - }, - Exporter: client.ExporterImage, - ExporterAttrs: map[string]string{ - "name": target, - }, - LocalDirs: map[string]string{ - builder.LocalNameDockerfile: dir, - builder.LocalNameContext: dir, - }, - }, nil) - require.NoError(t, err) - - var cdAddress string - if cd, ok := sb.(interface { - ContainerdAddress() string - }); !ok { - t.Skip("only for containerd worker") - } else { - cdAddress = cd.ContainerdAddress() - } - - client, err := newContainerd(cdAddress) - require.NoError(t, err) - defer client.Close() - - ctx := namespaces.WithNamespace(context.Background(), "buildkit") - - img, err := client.ImageService().Get(ctx, target) - require.NoError(t, err) - - desc, err := img.Config(ctx, client.ContentStore(), platforms.Default()) - require.NoError(t, err) - - dt, err := content.ReadBlob(ctx, client.ContentStore(), desc) - require.NoError(t, err) - - var ociimg ocispec.Image - err = json.Unmarshal(dt, &ociimg) - require.NoError(t, err) - - v, ok := ociimg.Config.Labels["foo"] - require.True(t, ok) - require.Equal(t, "bar", v) - - v, ok = ociimg.Config.Labels["bar"] - require.True(t, ok) - require.Equal(t, "baz", v) -} - -func testCacheImportExport(t *testing.T, sb integration.Sandbox) { - f := getFrontend(t, sb) - - registry, err := sb.NewRegistry() - if errors.Cause(err) == integration.ErrorRequirements { - t.Skip(err.Error()) - } - require.NoError(t, err) - - dockerfile := []byte(` -FROM busybox AS base -COPY foo const -#RUN echo -n foobar > const -RUN cat /dev/urandom | head -c 100 | sha256sum > unique -FROM scratch -COPY --from=base const / -COPY --from=base unique / -`) - - dir, err := tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - fstest.CreateFile("foo", []byte("foobar"), 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - c, err := client.New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - target := registry + "/buildkit/testexportdf:latest" - - _, err = f.Solve(context.TODO(), c, client.SolveOpt{ - Exporter: client.ExporterLocal, - ExporterOutputDir: destDir, - ExportCache: target, - LocalDirs: map[string]string{ - builder.LocalNameDockerfile: dir, - builder.LocalNameContext: dir, - }, - }, nil) - require.NoError(t, err) - - dt, err := ioutil.ReadFile(filepath.Join(destDir, "const")) - require.NoError(t, err) - require.Equal(t, "foobar", string(dt)) - - dt, err = ioutil.ReadFile(filepath.Join(destDir, "unique")) - require.NoError(t, err) - - err = c.Prune(context.TODO(), nil, client.PruneAll) - require.NoError(t, err) - - checkAllRemoved(t, c, sb) - - destDir, err = ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - _, err = f.Solve(context.TODO(), c, client.SolveOpt{ - FrontendAttrs: map[string]string{ - "cache-from": target, - }, - Exporter: client.ExporterLocal, - ExporterOutputDir: destDir, - LocalDirs: map[string]string{ - builder.LocalNameDockerfile: dir, - builder.LocalNameContext: dir, - }, - }, nil) - require.NoError(t, err) - - dt2, err := ioutil.ReadFile(filepath.Join(destDir, "const")) - require.NoError(t, err) - require.Equal(t, "foobar", string(dt2)) - - dt2, err = ioutil.ReadFile(filepath.Join(destDir, "unique")) - require.NoError(t, err) - require.Equal(t, string(dt), string(dt2)) - - destDir, err = ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) -} - -func testReproducibleIDs(t *testing.T, sb integration.Sandbox) { - f := getFrontend(t, sb) - - dockerfile := []byte(` -FROM busybox -ENV foo=bar -COPY foo / -RUN echo bar > bar -`) - dir, err := tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - fstest.CreateFile("foo", []byte("foo-contents"), 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - c, err := client.New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - target := "example.com/moby/dockerfileids:test" - opt := client.SolveOpt{ - FrontendAttrs: map[string]string{}, - Exporter: client.ExporterImage, - ExporterAttrs: map[string]string{ - "name": target, - }, - LocalDirs: map[string]string{ - builder.LocalNameDockerfile: dir, - builder.LocalNameContext: dir, - }, - } - - _, err = f.Solve(context.TODO(), c, opt, nil) - require.NoError(t, err) - - target2 := "example.com/moby/dockerfileids2:test" - opt.ExporterAttrs["name"] = target2 - - _, err = f.Solve(context.TODO(), c, opt, nil) - require.NoError(t, err) - - var cdAddress string - if cd, ok := sb.(interface { - ContainerdAddress() string - }); !ok { - t.Skip("only for containerd worker") - } else { - cdAddress = cd.ContainerdAddress() - } - - client, err := newContainerd(cdAddress) - require.NoError(t, err) - defer client.Close() - - ctx := namespaces.WithNamespace(context.Background(), "buildkit") - - img, err := client.ImageService().Get(ctx, target) - require.NoError(t, err) - img2, err := client.ImageService().Get(ctx, target2) - require.NoError(t, err) - - require.Equal(t, img.Target, img2.Target) -} - -func testImportExportReproducibleIDs(t *testing.T, sb integration.Sandbox) { - var cdAddress string - if cd, ok := sb.(interface { - ContainerdAddress() string - }); !ok { - t.Skip("only for containerd worker") - } else { - cdAddress = cd.ContainerdAddress() - } - - f := getFrontend(t, sb) - - registry, err := sb.NewRegistry() - if errors.Cause(err) == integration.ErrorRequirements { - t.Skip(err.Error()) - } - require.NoError(t, err) - - dockerfile := []byte(` -FROM busybox -ENV foo=bar -COPY foo / -RUN echo bar > bar -`) - - dir, err := tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - fstest.CreateFile("foo", []byte("foobar"), 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - c, err := client.New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - target := "example.com/moby/dockerfileexpids:test" - cacheTarget := registry + "/test/dockerfileexpids:cache" - opt := client.SolveOpt{ - FrontendAttrs: map[string]string{}, - Exporter: client.ExporterImage, - ExportCache: cacheTarget, - ExporterAttrs: map[string]string{ - "name": target, - }, - LocalDirs: map[string]string{ - builder.LocalNameDockerfile: dir, - builder.LocalNameContext: dir, - }, - } - - ctd, err := newContainerd(cdAddress) - require.NoError(t, err) - defer ctd.Close() - - ctx := namespaces.WithNamespace(context.Background(), "buildkit") - - _, err = f.Solve(context.TODO(), c, opt, nil) - require.NoError(t, err) - - img, err := ctd.ImageService().Get(ctx, target) - require.NoError(t, err) - - err = ctd.ImageService().Delete(ctx, target) - require.NoError(t, err) - - err = c.Prune(context.TODO(), nil, client.PruneAll) - require.NoError(t, err) - - checkAllRemoved(t, c, sb) - - target2 := "example.com/moby/dockerfileexpids2:test" - - opt.ExporterAttrs["name"] = target2 - opt.FrontendAttrs["cache-from"] = cacheTarget - - _, err = f.Solve(context.TODO(), c, opt, nil) - require.NoError(t, err) - - img2, err := ctd.ImageService().Get(ctx, target2) - require.NoError(t, err) - - require.Equal(t, img.Target, img2.Target) -} - -func testNoCache(t *testing.T, sb integration.Sandbox) { - f := getFrontend(t, sb) - - dockerfile := []byte(` -FROM busybox AS s0 -RUN cat /dev/urandom | head -c 100 | sha256sum | tee unique -FROM busybox AS s1 -RUN cat /dev/urandom | head -c 100 | sha256sum | tee unique2 -FROM scratch -COPY --from=s0 unique / -COPY --from=s1 unique2 / -`) - dir, err := tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - c, err := client.New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - opt := client.SolveOpt{ - FrontendAttrs: map[string]string{}, - Exporter: client.ExporterLocal, - ExporterOutputDir: destDir, - LocalDirs: map[string]string{ - builder.LocalNameDockerfile: dir, - builder.LocalNameContext: dir, - }, - } - - _, err = f.Solve(context.TODO(), c, opt, nil) - require.NoError(t, err) - - destDir2, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - opt.FrontendAttrs["no-cache"] = "" - opt.ExporterOutputDir = destDir2 - - _, err = f.Solve(context.TODO(), c, opt, nil) - require.NoError(t, err) - - unique1Dir1, err := ioutil.ReadFile(filepath.Join(destDir, "unique")) - require.NoError(t, err) - - unique1Dir2, err := ioutil.ReadFile(filepath.Join(destDir2, "unique")) - require.NoError(t, err) - - unique2Dir1, err := ioutil.ReadFile(filepath.Join(destDir, "unique2")) - require.NoError(t, err) - - unique2Dir2, err := ioutil.ReadFile(filepath.Join(destDir2, "unique2")) - require.NoError(t, err) - - require.NotEqual(t, string(unique1Dir1), string(unique1Dir2)) - require.NotEqual(t, string(unique2Dir1), string(unique2Dir2)) - - destDir3, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - opt.FrontendAttrs["no-cache"] = "s1" - opt.ExporterOutputDir = destDir3 - - _, err = f.Solve(context.TODO(), c, opt, nil) - require.NoError(t, err) - - unique1Dir3, err := ioutil.ReadFile(filepath.Join(destDir3, "unique")) - require.NoError(t, err) - - unique2Dir3, err := ioutil.ReadFile(filepath.Join(destDir3, "unique2")) - require.NoError(t, err) - - require.Equal(t, string(unique1Dir2), string(unique1Dir3)) - require.NotEqual(t, string(unique2Dir1), string(unique2Dir3)) -} - -func testPlatformArgsImplicit(t *testing.T, sb integration.Sandbox) { - f := getFrontend(t, sb) - - dockerfile := []byte(fmt.Sprintf(` -FROM scratch AS build-%s -COPY foo bar -FROM build-${TARGETOS} -COPY foo2 bar2 -`, runtime.GOOS)) - - dir, err := tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - fstest.CreateFile("foo", []byte("d0"), 0600), - fstest.CreateFile("foo2", []byte("d1"), 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - c, err := client.New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - opt := client.SolveOpt{ - Exporter: client.ExporterLocal, - ExporterOutputDir: destDir, - LocalDirs: map[string]string{ - builder.LocalNameDockerfile: dir, - builder.LocalNameContext: dir, - }, - } - - _, err = f.Solve(context.TODO(), c, opt, nil) - require.NoError(t, err) - - dt, err := ioutil.ReadFile(filepath.Join(destDir, "bar")) - require.NoError(t, err) - require.Equal(t, "d0", string(dt)) - - dt, err = ioutil.ReadFile(filepath.Join(destDir, "bar2")) - require.NoError(t, err) - require.Equal(t, "d1", string(dt)) -} - -func testPlatformArgsExplicit(t *testing.T, sb integration.Sandbox) { - f := getFrontend(t, sb) - - dockerfile := []byte(` -FROM --platform=$BUILDPLATFORM busybox AS build -ARG TARGETPLATFORM -ARG TARGETOS -RUN mkdir /out && echo -n $TARGETPLATFORM > /out/platform && echo -n $TARGETOS > /out/os -FROM scratch -COPY --from=build out . -`) - - dir, err := tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - c, err := client.New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - opt := client.SolveOpt{ - Exporter: client.ExporterLocal, - FrontendAttrs: map[string]string{ - "platform": "darwin/ppc64le", - "build-arg:TARGETOS": "freebsd", - }, - ExporterOutputDir: destDir, - LocalDirs: map[string]string{ - builder.LocalNameDockerfile: dir, - builder.LocalNameContext: dir, - }, - } - - _, err = f.Solve(context.TODO(), c, opt, nil) - require.NoError(t, err) - - dt, err := ioutil.ReadFile(filepath.Join(destDir, "platform")) - require.NoError(t, err) - require.Equal(t, "darwin/ppc64le", string(dt)) - - dt, err = ioutil.ReadFile(filepath.Join(destDir, "os")) - require.NoError(t, err) - require.Equal(t, "freebsd", string(dt)) -} - -func testBuiltinArgs(t *testing.T, sb integration.Sandbox) { - f := getFrontend(t, sb) - - dockerfile := []byte(` -FROM busybox AS build -ARG FOO -ARG BAR -ARG BAZ=bazcontent -RUN echo -n $HTTP_PROXY::$NO_PROXY::$FOO::$BAR::$BAZ > /out -FROM scratch -COPY --from=build /out / - -`) - dir, err := tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - c, err := client.New(context.TODO(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - opt := client.SolveOpt{ - FrontendAttrs: map[string]string{ - "build-arg:FOO": "foocontents", - "build-arg:http_proxy": "hpvalue", - "build-arg:NO_PROXY": "npvalue", - }, - Exporter: client.ExporterLocal, - ExporterOutputDir: destDir, - LocalDirs: map[string]string{ - builder.LocalNameDockerfile: dir, - builder.LocalNameContext: dir, - }, - } - - _, err = f.Solve(context.TODO(), c, opt, nil) - require.NoError(t, err) - - dt, err := ioutil.ReadFile(filepath.Join(destDir, "out")) - require.NoError(t, err) - require.Equal(t, "hpvalue::npvalue::foocontents::::bazcontent", string(dt)) - - // repeat with changed default args should match the old cache - destDir, err = ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - opt = client.SolveOpt{ - FrontendAttrs: map[string]string{ - "build-arg:FOO": "foocontents", - "build-arg:http_proxy": "hpvalue2", - }, - Exporter: client.ExporterLocal, - ExporterOutputDir: destDir, - LocalDirs: map[string]string{ - builder.LocalNameDockerfile: dir, - builder.LocalNameContext: dir, - }, - } - - _, err = f.Solve(context.TODO(), c, opt, nil) - require.NoError(t, err) - - dt, err = ioutil.ReadFile(filepath.Join(destDir, "out")) - require.NoError(t, err) - require.Equal(t, "hpvalue::npvalue::foocontents::::bazcontent", string(dt)) - - // changing actual value invalidates cache - destDir, err = ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - opt = client.SolveOpt{ - FrontendAttrs: map[string]string{ - "build-arg:FOO": "foocontents2", - "build-arg:http_proxy": "hpvalue2", - }, - Exporter: client.ExporterLocal, - ExporterOutputDir: destDir, - LocalDirs: map[string]string{ - builder.LocalNameDockerfile: dir, - builder.LocalNameContext: dir, - }, - } - - _, err = f.Solve(context.TODO(), c, opt, nil) - require.NoError(t, err) - - dt, err = ioutil.ReadFile(filepath.Join(destDir, "out")) - require.NoError(t, err) - require.Equal(t, "hpvalue2::::foocontents2::::bazcontent", string(dt)) -} - -func tmpdir(appliers ...fstest.Applier) (string, error) { - tmpdir, err := ioutil.TempDir("", "buildkit-dockerfile") - if err != nil { - return "", err - } - if err := fstest.Apply(appliers...).Apply(tmpdir); err != nil { - return "", err - } - return tmpdir, nil -} - -func runShell(dir string, cmds ...string) error { - for _, args := range cmds { - cmd := exec.Command("sh", "-c", args) - cmd.Dir = dir - if err := cmd.Run(); err != nil { - return errors.Wrapf(err, "error running %v", args) - } - } - return nil -} - -func checkAllRemoved(t *testing.T, c *client.Client, sb integration.Sandbox) { - retries := 0 - for { - require.True(t, 20 > retries) - retries++ - du, err := c.DiskUsage(context.TODO()) - require.NoError(t, err) - if len(du) > 0 { - time.Sleep(500 * time.Millisecond) - continue - } - break - } -} - -func newContainerd(cdAddress string) (*containerd.Client, error) { - return containerd.New(cdAddress, containerd.WithTimeout(60*time.Second)) -} - -func dfCmdArgs(ctx, dockerfile, args string) (string, string) { - traceFile := filepath.Join(os.TempDir(), "trace"+identity.NewID()) - return fmt.Sprintf("build --progress=plain %s --local context=%s --local dockerfile=%s --trace=%s", args, ctx, dockerfile, traceFile), traceFile -} - -type builtinFrontend struct{} - -var _ frontend = &builtinFrontend{} - -func (f *builtinFrontend) Solve(ctx context.Context, c *client.Client, opt client.SolveOpt, statusChan chan *client.SolveStatus) (*client.SolveResponse, error) { - opt.Frontend = "dockerfile.v0" - return c.Solve(ctx, nil, opt, statusChan) -} - -func (f *builtinFrontend) DFCmdArgs(ctx, dockerfile string) (string, string) { - return dfCmdArgs(ctx, dockerfile, "--frontend dockerfile.v0") -} - -func (f *builtinFrontend) RequiresBuildctl(t *testing.T) {} - -type clientFrontend struct{} - -var _ frontend = &clientFrontend{} - -func (f *clientFrontend) Solve(ctx context.Context, c *client.Client, opt client.SolveOpt, statusChan chan *client.SolveStatus) (*client.SolveResponse, error) { - return c.Build(ctx, opt, "", builder.Build, statusChan) -} - -func (f *clientFrontend) DFCmdArgs(ctx, dockerfile string) (string, string) { - return "", "" -} -func (f *clientFrontend) RequiresBuildctl(t *testing.T) { - t.Skip() -} - -type gatewayFrontend struct { - gw string -} - -var _ frontend = &gatewayFrontend{} - -func (f *gatewayFrontend) Solve(ctx context.Context, c *client.Client, opt client.SolveOpt, statusChan chan *client.SolveStatus) (*client.SolveResponse, error) { - opt.Frontend = "gateway.v0" - if opt.FrontendAttrs == nil { - opt.FrontendAttrs = make(map[string]string) - } - opt.FrontendAttrs["source"] = f.gw - return c.Solve(ctx, nil, opt, statusChan) -} - -func (f *gatewayFrontend) DFCmdArgs(ctx, dockerfile string) (string, string) { - return dfCmdArgs(ctx, dockerfile, "--frontend gateway.v0 --frontend-opt=source="+f.gw) -} - -func (f *gatewayFrontend) RequiresBuildctl(t *testing.T) {} - -func getFrontend(t *testing.T, sb integration.Sandbox) frontend { - v := sb.Value("frontend") - require.NotNil(t, v) - fn, ok := v.(frontend) - require.True(t, ok) - return fn -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/docs/experimental.md b/vendor/github.com/moby/buildkit/frontend/dockerfile/docs/experimental.md deleted file mode 100644 index 47dc6ae77f91..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/docs/experimental.md +++ /dev/null @@ -1,129 +0,0 @@ -# Dockerfile frontend experimental syntaxes - -## Note for Docker users - -If you are using Docker v18.06 or later, BuildKit mode can be enabled by setting `export DOCKER_BUILDKIT=1` on the client side. -Docker v18.06 also requires the daemon to be [running in experimental mode](https://docs.docker.com/engine/reference/commandline/dockerd/#description). - -You need to use `docker build` CLI instead of `buildctl` CLI mentioned in this document. -See [the `docker build` document](https://docs.docker.com/engine/reference/commandline/build/) for the usage. - -## Use experimental Dockerfile frontend -The features mentioned in this document are experimentally available as [`docker/dockerfile-upstream:experimental`](https://hub.docker.com/r/docker/dockerfile-upstream/tags/) image. - -To use the experimental features, the first line of your Dockerfile needs to be `# syntax=docker/dockerfile-upstream:experimental`. -As the experimental syntaxes may change in future revisions, you may want to pin the image to a specific revision. - -See also [#528](https://github.com/moby/buildkit/issues/528) for further information about planned `docker/dockerfile` releases. - -## Experimental syntaxes - -### `RUN --mount=type=bind` (the default mount type) - -This mount type allows binding directories (read-only) in the context or in an image to the build container. - -|Option |Description| -|---------------------|-----------| -|`target` (required) | Mount path.| -|`source` | Source path in the `from`. Defaults to the root of the `from`.| -|`from` | Build stage or image name for the root of the source. Defaults to the build context.| - - -### `RUN --mount=type=cache` - -This mount type allows the build container to cache directories for compilers and package managers. - -|Option |Description| -|---------------------|-----------| -|`id` | Optional ID to identify separate/different caches| -|`target` (required) | Mount path.| -|`ro`,`readonly` | Read-only if set.| -|`sharing` | One of `shared`, `private`, or `locked`. Defaults to `shared`. A `shared` cache mount can be used concurrently by multiple writers. `private` creates a new mount if there are multiple writers. `locked` pauses the second writer until the first one releases the mount.| - - -#### Example: cache Go packages - -```dockerfile -# syntax = docker/dockerfile-upstream:experimental -FROM golang -... -RUN --mount=type=cache,target=/root/.cache/go-build go build ... -``` - -#### Example: cache apt packages - -```dockerfile -# syntax = docker/dockerfile-upstream:experimental -FROM ubuntu -RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache -RUN --mount=type=cache,target=/var/cache/apt --mount=type=cache,target=/var/lib/apt \ - apt update && apt install -y gcc -``` - -### `RUN --mount=type=tmpfs` - -This mount type allows mounting tmpfs in the build container. - -|Option |Description| -|---------------------|-----------| -|`target` (required) | Mount path.| - - -### `RUN --mount=type=secret` - -This mount type allows the build container to access secure files such as private keys without baking them into the image. - -|Option |Description| -|---------------------|-----------| -|`id` | ID of the secret. Defaults to basename of the target path.| -|`target` | Mount path. Defaults to `/run/secrets/` + `id`.| -|`required` | If set to `true`, the instruction errors out when the secret is unavailable. Defaults to `false`.| - - -#### Example: access to S3 - -```dockerfile -# syntax = docker/dockerfile-upstream:experimental -FROM python:3 -RUN pip install awscli -RUN --mount=type=secret,id=aws,target=/root/.aws/credentials aws s3 cp s3://... ... -``` - -```console -$ buildctl build --frontend=dockerfile.v0 --local context=. --local dockerfile=. \ - --secret id=aws,src=$HOME/.aws/credentials -``` - -### `RUN --mount=type=ssh` - -This mount type allows the build container to access SSH keys via SSH agents, with support for passphrases. - -|Option |Description| -|---------------------|-----------| -|`id` | ID of SSH agent socket or key. Defaults to "default".| -|`target` | SSH agent socket path. Defaults to `/run/buildkit/ssh_agent.${N}`.| -|`required` | If set to `true`, the instruction errors out when the key is unavailable. Defaults to `false`.| - - -#### Example: access to Gitlab - -```dockerfile -# syntax = docker/dockerfile-upstream:experimental -FROM alpine -RUN apk add --no-cache openssh-client -RUN mkdir -p -m 0700 ~/.ssh && ssh-keyscan gitlab.com >> ~/.ssh/known_hosts -RUN --mount=type=ssh ssh git@gitlab.com | tee /hello -# "Welcome to GitLab, @GITLAB_USERNAME_ASSOCIATED_WITH_SSHKEY" should be printed here -``` - -```console -$ eval $(ssh-agent) -$ ssh-add ~/.ssh/id_rsa -(Input your passphrase here) -$ buildctl build --frontend=dockerfile.v0 --local context=. --local dockerfile=. \ - --ssh default=$SSH_AUTH_SOCK -``` - -You can also specify a path to `*.pem` file on the host directly instead of `$SSH_AUTH_SOCK`. -However, pem files with passphrases are not supported. - diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/bflag.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/bflag.go deleted file mode 100644 index d8bf74739499..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/bflag.go +++ /dev/null @@ -1,200 +0,0 @@ -package instructions - -import ( - "fmt" - "strings" -) - -// FlagType is the type of the build flag -type FlagType int - -const ( - boolType FlagType = iota - stringType - stringsType -) - -// BFlags contains all flags information for the builder -type BFlags struct { - Args []string // actual flags/args from cmd line - flags map[string]*Flag - used map[string]*Flag - Err error -} - -// Flag contains all information for a flag -type Flag struct { - bf *BFlags - name string - flagType FlagType - Value string - StringValues []string -} - -// NewBFlags returns the new BFlags struct -func NewBFlags() *BFlags { - return &BFlags{ - flags: make(map[string]*Flag), - used: make(map[string]*Flag), - } -} - -// NewBFlagsWithArgs returns the new BFlags struct with Args set to args -func NewBFlagsWithArgs(args []string) *BFlags { - flags := NewBFlags() - flags.Args = args - return flags -} - -// AddBool adds a bool flag to BFlags -// Note, any error will be generated when Parse() is called (see Parse). -func (bf *BFlags) AddBool(name string, def bool) *Flag { - flag := bf.addFlag(name, boolType) - if flag == nil { - return nil - } - if def { - flag.Value = "true" - } else { - flag.Value = "false" - } - return flag -} - -// AddString adds a string flag to BFlags -// Note, any error will be generated when Parse() is called (see Parse). -func (bf *BFlags) AddString(name string, def string) *Flag { - flag := bf.addFlag(name, stringType) - if flag == nil { - return nil - } - flag.Value = def - return flag -} - -// AddStrings adds a string flag to BFlags that can match multiple values -func (bf *BFlags) AddStrings(name string) *Flag { - flag := bf.addFlag(name, stringsType) - if flag == nil { - return nil - } - return flag -} - -// addFlag is a generic func used by the other AddXXX() func -// to add a new flag to the BFlags struct. -// Note, any error will be generated when Parse() is called (see Parse). -func (bf *BFlags) addFlag(name string, flagType FlagType) *Flag { - if _, ok := bf.flags[name]; ok { - bf.Err = fmt.Errorf("Duplicate flag defined: %s", name) - return nil - } - - newFlag := &Flag{ - bf: bf, - name: name, - flagType: flagType, - } - bf.flags[name] = newFlag - - return newFlag -} - -// IsUsed checks if the flag is used -func (fl *Flag) IsUsed() bool { - if _, ok := fl.bf.used[fl.name]; ok { - return true - } - return false -} - -// IsTrue checks if a bool flag is true -func (fl *Flag) IsTrue() bool { - if fl.flagType != boolType { - // Should never get here - panic(fmt.Errorf("Trying to use IsTrue on a non-boolean: %s", fl.name)) - } - return fl.Value == "true" -} - -// Parse parses and checks if the BFlags is valid. -// Any error noticed during the AddXXX() funcs will be generated/returned -// here. We do this because an error during AddXXX() is more like a -// compile time error so it doesn't matter too much when we stop our -// processing as long as we do stop it, so this allows the code -// around AddXXX() to be just: -// defFlag := AddString("description", "") -// w/o needing to add an if-statement around each one. -func (bf *BFlags) Parse() error { - // If there was an error while defining the possible flags - // go ahead and bubble it back up here since we didn't do it - // earlier in the processing - if bf.Err != nil { - return fmt.Errorf("Error setting up flags: %s", bf.Err) - } - - for _, arg := range bf.Args { - if !strings.HasPrefix(arg, "--") { - return fmt.Errorf("Arg should start with -- : %s", arg) - } - - if arg == "--" { - return nil - } - - arg = arg[2:] - value := "" - - index := strings.Index(arg, "=") - if index >= 0 { - value = arg[index+1:] - arg = arg[:index] - } - - flag, ok := bf.flags[arg] - if !ok { - return fmt.Errorf("Unknown flag: %s", arg) - } - - if _, ok = bf.used[arg]; ok && flag.flagType != stringsType { - return fmt.Errorf("Duplicate flag specified: %s", arg) - } - - bf.used[arg] = flag - - switch flag.flagType { - case boolType: - // value == "" is only ok if no "=" was specified - if index >= 0 && value == "" { - return fmt.Errorf("Missing a value on flag: %s", arg) - } - - lower := strings.ToLower(value) - if lower == "" { - flag.Value = "true" - } else if lower == "true" || lower == "false" { - flag.Value = lower - } else { - return fmt.Errorf("Expecting boolean value for flag %s, not: %s", arg, value) - } - - case stringType: - if index < 0 { - return fmt.Errorf("Missing a value on flag: %s", arg) - } - flag.Value = value - - case stringsType: - if index < 0 { - return fmt.Errorf("Missing a value on flag: %s", arg) - } - flag.StringValues = append(flag.StringValues, value) - - default: - panic("No idea what kind of flag we have! Should never get here!") - } - - } - - return nil -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/bflag_test.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/bflag_test.go deleted file mode 100644 index b194ba785f2c..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/bflag_test.go +++ /dev/null @@ -1,187 +0,0 @@ -package instructions - -import ( - "testing" -) - -func TestBuilderFlags(t *testing.T) { - var expected string - var err error - - // --- - - bf := NewBFlags() - bf.Args = []string{} - if err := bf.Parse(); err != nil { - t.Fatalf("Test1 of %q was supposed to work: %s", bf.Args, err) - } - - // --- - - bf = NewBFlags() - bf.Args = []string{"--"} - if err := bf.Parse(); err != nil { - t.Fatalf("Test2 of %q was supposed to work: %s", bf.Args, err) - } - - // --- - - bf = NewBFlags() - flStr1 := bf.AddString("str1", "") - flBool1 := bf.AddBool("bool1", false) - bf.Args = []string{} - if err = bf.Parse(); err != nil { - t.Fatalf("Test3 of %q was supposed to work: %s", bf.Args, err) - } - - if flStr1.IsUsed() { - t.Fatal("Test3 - str1 was not used!") - } - if flBool1.IsUsed() { - t.Fatal("Test3 - bool1 was not used!") - } - - // --- - - bf = NewBFlags() - flStr1 = bf.AddString("str1", "HI") - flBool1 = bf.AddBool("bool1", false) - bf.Args = []string{} - - if err = bf.Parse(); err != nil { - t.Fatalf("Test4 of %q was supposed to work: %s", bf.Args, err) - } - - if flStr1.Value != "HI" { - t.Fatal("Str1 was supposed to default to: HI") - } - if flBool1.IsTrue() { - t.Fatal("Bool1 was supposed to default to: false") - } - if flStr1.IsUsed() { - t.Fatal("Str1 was not used!") - } - if flBool1.IsUsed() { - t.Fatal("Bool1 was not used!") - } - - // --- - - bf = NewBFlags() - flStr1 = bf.AddString("str1", "HI") - bf.Args = []string{"--str1"} - - if err = bf.Parse(); err == nil { - t.Fatalf("Test %q was supposed to fail", bf.Args) - } - - // --- - - bf = NewBFlags() - flStr1 = bf.AddString("str1", "HI") - bf.Args = []string{"--str1="} - - if err = bf.Parse(); err != nil { - t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) - } - - expected = "" - if flStr1.Value != expected { - t.Fatalf("Str1 (%q) should be: %q", flStr1.Value, expected) - } - - // --- - - bf = NewBFlags() - flStr1 = bf.AddString("str1", "HI") - bf.Args = []string{"--str1=BYE"} - - if err = bf.Parse(); err != nil { - t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) - } - - expected = "BYE" - if flStr1.Value != expected { - t.Fatalf("Str1 (%q) should be: %q", flStr1.Value, expected) - } - - // --- - - bf = NewBFlags() - flBool1 = bf.AddBool("bool1", false) - bf.Args = []string{"--bool1"} - - if err = bf.Parse(); err != nil { - t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) - } - - if !flBool1.IsTrue() { - t.Fatal("Test-b1 Bool1 was supposed to be true") - } - - // --- - - bf = NewBFlags() - flBool1 = bf.AddBool("bool1", false) - bf.Args = []string{"--bool1=true"} - - if err = bf.Parse(); err != nil { - t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) - } - - if !flBool1.IsTrue() { - t.Fatal("Test-b2 Bool1 was supposed to be true") - } - - // --- - - bf = NewBFlags() - flBool1 = bf.AddBool("bool1", false) - bf.Args = []string{"--bool1=false"} - - if err = bf.Parse(); err != nil { - t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) - } - - if flBool1.IsTrue() { - t.Fatal("Test-b3 Bool1 was supposed to be false") - } - - // --- - - bf = NewBFlags() - flBool1 = bf.AddBool("bool1", false) - bf.Args = []string{"--bool1=false1"} - - if err = bf.Parse(); err == nil { - t.Fatalf("Test %q was supposed to fail", bf.Args) - } - - // --- - - bf = NewBFlags() - flBool1 = bf.AddBool("bool1", false) - bf.Args = []string{"--bool2"} - - if err = bf.Parse(); err == nil { - t.Fatalf("Test %q was supposed to fail", bf.Args) - } - - // --- - - bf = NewBFlags() - flStr1 = bf.AddString("str1", "HI") - flBool1 = bf.AddBool("bool1", false) - bf.Args = []string{"--bool1", "--str1=BYE"} - - if err = bf.Parse(); err != nil { - t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) - } - - if flStr1.Value != "BYE" { - t.Fatalf("Test %s, str1 should be BYE", bf.Args) - } - if !flBool1.IsTrue() { - t.Fatalf("Test %s, bool1 should be true", bf.Args) - } -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go deleted file mode 100644 index 28b34f681b01..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go +++ /dev/null @@ -1,446 +0,0 @@ -package instructions - -import ( - "errors" - "strings" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/strslice" -) - -// KeyValuePair represent an arbitrary named value (useful in slice instead of map[string] string to preserve ordering) -type KeyValuePair struct { - Key string - Value string -} - -func (kvp *KeyValuePair) String() string { - return kvp.Key + "=" + kvp.Value -} - -// KeyValuePairOptional is the same as KeyValuePair but Value is optional -type KeyValuePairOptional struct { - Key string - Value *string -} - -func (kvpo *KeyValuePairOptional) ValueString() string { - v := "" - if kvpo.Value != nil { - v = *kvpo.Value - } - return v -} - -// Command is implemented by every command present in a dockerfile -type Command interface { - Name() string -} - -// KeyValuePairs is a slice of KeyValuePair -type KeyValuePairs []KeyValuePair - -// withNameAndCode is the base of every command in a Dockerfile (String() returns its source code) -type withNameAndCode struct { - code string - name string -} - -func (c *withNameAndCode) String() string { - return c.code -} - -// Name of the command -func (c *withNameAndCode) Name() string { - return c.name -} - -func newWithNameAndCode(req parseRequest) withNameAndCode { - return withNameAndCode{code: strings.TrimSpace(req.original), name: req.command} -} - -// SingleWordExpander is a provider for variable expansion where 1 word => 1 output -type SingleWordExpander func(word string) (string, error) - -// SupportsSingleWordExpansion interface marks a command as supporting variable expansion -type SupportsSingleWordExpansion interface { - Expand(expander SingleWordExpander) error -} - -// PlatformSpecific adds platform checks to a command -type PlatformSpecific interface { - CheckPlatform(platform string) error -} - -func expandKvp(kvp KeyValuePair, expander SingleWordExpander) (KeyValuePair, error) { - key, err := expander(kvp.Key) - if err != nil { - return KeyValuePair{}, err - } - value, err := expander(kvp.Value) - if err != nil { - return KeyValuePair{}, err - } - return KeyValuePair{Key: key, Value: value}, nil -} -func expandKvpsInPlace(kvps KeyValuePairs, expander SingleWordExpander) error { - for i, kvp := range kvps { - newKvp, err := expandKvp(kvp, expander) - if err != nil { - return err - } - kvps[i] = newKvp - } - return nil -} - -func expandSliceInPlace(values []string, expander SingleWordExpander) error { - for i, v := range values { - newValue, err := expander(v) - if err != nil { - return err - } - values[i] = newValue - } - return nil -} - -// EnvCommand : ENV key1 value1 [keyN valueN...] -type EnvCommand struct { - withNameAndCode - Env KeyValuePairs // kvp slice instead of map to preserve ordering -} - -// Expand variables -func (c *EnvCommand) Expand(expander SingleWordExpander) error { - return expandKvpsInPlace(c.Env, expander) -} - -// MaintainerCommand : MAINTAINER maintainer_name -type MaintainerCommand struct { - withNameAndCode - Maintainer string -} - -// NewLabelCommand creates a new 'LABEL' command -func NewLabelCommand(k string, v string, NoExp bool) *LabelCommand { - kvp := KeyValuePair{Key: k, Value: v} - c := "LABEL " - c += kvp.String() - nc := withNameAndCode{code: c, name: "label"} - cmd := &LabelCommand{ - withNameAndCode: nc, - Labels: KeyValuePairs{ - kvp, - }, - noExpand: NoExp, - } - return cmd -} - -// LabelCommand : LABEL some json data describing the image -// -// Sets the Label variable foo to bar, -// -type LabelCommand struct { - withNameAndCode - Labels KeyValuePairs // kvp slice instead of map to preserve ordering - noExpand bool -} - -// Expand variables -func (c *LabelCommand) Expand(expander SingleWordExpander) error { - if c.noExpand { - return nil - } - return expandKvpsInPlace(c.Labels, expander) -} - -// SourcesAndDest represent a list of source files and a destination -type SourcesAndDest []string - -// Sources list the source paths -func (s SourcesAndDest) Sources() []string { - res := make([]string, len(s)-1) - copy(res, s[:len(s)-1]) - return res -} - -// Dest path of the operation -func (s SourcesAndDest) Dest() string { - return s[len(s)-1] -} - -// AddCommand : ADD foo /path -// -// Add the file 'foo' to '/path'. Tarball and Remote URL (http, https) handling -// exist here. If you do not wish to have this automatic handling, use COPY. -// -type AddCommand struct { - withNameAndCode - SourcesAndDest - Chown string -} - -// Expand variables -func (c *AddCommand) Expand(expander SingleWordExpander) error { - return expandSliceInPlace(c.SourcesAndDest, expander) -} - -// CopyCommand : COPY foo /path -// -// Same as 'ADD' but without the tar and remote url handling. -// -type CopyCommand struct { - withNameAndCode - SourcesAndDest - From string - Chown string -} - -// Expand variables -func (c *CopyCommand) Expand(expander SingleWordExpander) error { - return expandSliceInPlace(c.SourcesAndDest, expander) -} - -// OnbuildCommand : ONBUILD -type OnbuildCommand struct { - withNameAndCode - Expression string -} - -// WorkdirCommand : WORKDIR /tmp -// -// Set the working directory for future RUN/CMD/etc statements. -// -type WorkdirCommand struct { - withNameAndCode - Path string -} - -// Expand variables -func (c *WorkdirCommand) Expand(expander SingleWordExpander) error { - p, err := expander(c.Path) - if err != nil { - return err - } - c.Path = p - return nil -} - -// ShellDependantCmdLine represents a cmdline optionally prepended with the shell -type ShellDependantCmdLine struct { - CmdLine strslice.StrSlice - PrependShell bool -} - -// RunCommand : RUN some command yo -// -// run a command and commit the image. Args are automatically prepended with -// the current SHELL which defaults to 'sh -c' under linux or 'cmd /S /C' under -// Windows, in the event there is only one argument The difference in processing: -// -// RUN echo hi # sh -c echo hi (Linux) -// RUN echo hi # cmd /S /C echo hi (Windows) -// RUN [ "echo", "hi" ] # echo hi -// -type RunCommand struct { - withNameAndCode - withExternalData - ShellDependantCmdLine -} - -// CmdCommand : CMD foo -// -// Set the default command to run in the container (which may be empty). -// Argument handling is the same as RUN. -// -type CmdCommand struct { - withNameAndCode - ShellDependantCmdLine -} - -// HealthCheckCommand : HEALTHCHECK foo -// -// Set the default healthcheck command to run in the container (which may be empty). -// Argument handling is the same as RUN. -// -type HealthCheckCommand struct { - withNameAndCode - Health *container.HealthConfig -} - -// EntrypointCommand : ENTRYPOINT /usr/sbin/nginx -// -// Set the entrypoint to /usr/sbin/nginx. Will accept the CMD as the arguments -// to /usr/sbin/nginx. Uses the default shell if not in JSON format. -// -// Handles command processing similar to CMD and RUN, only req.runConfig.Entrypoint -// is initialized at newBuilder time instead of through argument parsing. -// -type EntrypointCommand struct { - withNameAndCode - ShellDependantCmdLine -} - -// ExposeCommand : EXPOSE 6667/tcp 7000/tcp -// -// Expose ports for links and port mappings. This all ends up in -// req.runConfig.ExposedPorts for runconfig. -// -type ExposeCommand struct { - withNameAndCode - Ports []string -} - -// UserCommand : USER foo -// -// Set the user to 'foo' for future commands and when running the -// ENTRYPOINT/CMD at container run time. -// -type UserCommand struct { - withNameAndCode - User string -} - -// Expand variables -func (c *UserCommand) Expand(expander SingleWordExpander) error { - p, err := expander(c.User) - if err != nil { - return err - } - c.User = p - return nil -} - -// VolumeCommand : VOLUME /foo -// -// Expose the volume /foo for use. Will also accept the JSON array form. -// -type VolumeCommand struct { - withNameAndCode - Volumes []string -} - -// Expand variables -func (c *VolumeCommand) Expand(expander SingleWordExpander) error { - return expandSliceInPlace(c.Volumes, expander) -} - -// StopSignalCommand : STOPSIGNAL signal -// -// Set the signal that will be used to kill the container. -type StopSignalCommand struct { - withNameAndCode - Signal string -} - -// Expand variables -func (c *StopSignalCommand) Expand(expander SingleWordExpander) error { - p, err := expander(c.Signal) - if err != nil { - return err - } - c.Signal = p - return nil -} - -// CheckPlatform checks that the command is supported in the target platform -func (c *StopSignalCommand) CheckPlatform(platform string) error { - if platform == "windows" { - return errors.New("The daemon on this platform does not support the command stopsignal") - } - return nil -} - -// ArgCommand : ARG name[=value] -// -// Adds the variable foo to the trusted list of variables that can be passed -// to builder using the --build-arg flag for expansion/substitution or passing to 'run'. -// Dockerfile author may optionally set a default value of this variable. -type ArgCommand struct { - withNameAndCode - KeyValuePairOptional -} - -// Expand variables -func (c *ArgCommand) Expand(expander SingleWordExpander) error { - p, err := expander(c.Key) - if err != nil { - return err - } - c.Key = p - if c.Value != nil { - p, err = expander(*c.Value) - if err != nil { - return err - } - c.Value = &p - } - return nil -} - -// ShellCommand : SHELL powershell -command -// -// Set the non-default shell to use. -type ShellCommand struct { - withNameAndCode - Shell strslice.StrSlice -} - -// Stage represents a single stage in a multi-stage build -type Stage struct { - Name string - Commands []Command - BaseName string - SourceCode string - Platform string -} - -// AddCommand to the stage -func (s *Stage) AddCommand(cmd Command) { - // todo: validate cmd type - s.Commands = append(s.Commands, cmd) -} - -// IsCurrentStage check if the stage name is the current stage -func IsCurrentStage(s []Stage, name string) bool { - if len(s) == 0 { - return false - } - return s[len(s)-1].Name == name -} - -// CurrentStage return the last stage in a slice -func CurrentStage(s []Stage) (*Stage, error) { - if len(s) == 0 { - return nil, errors.New("No build stage in current context") - } - return &s[len(s)-1], nil -} - -// HasStage looks for the presence of a given stage name -func HasStage(s []Stage, name string) (int, bool) { - for i, stage := range s { - // Stage name is case-insensitive by design - if strings.EqualFold(stage.Name, name) { - return i, true - } - } - return -1, false -} - -type withExternalData struct { - m map[interface{}]interface{} -} - -func (c *withExternalData) getExternalValue(k interface{}) interface{} { - return c.m[k] -} - -func (c *withExternalData) setExternalValue(k, v interface{}) { - if c.m == nil { - c.m = map[interface{}]interface{}{} - } - c.m[k] = v -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_nosecrets.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_nosecrets.go deleted file mode 100644 index 58780648db82..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_nosecrets.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !dfsecrets - -package instructions - -func isSecretMountsSupported() bool { - return false -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_nossh.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_nossh.go deleted file mode 100644 index a131a273c3ef..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_nossh.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !dfssh - -package instructions - -func isSSHMountsSupported() bool { - return false -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runmount.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runmount.go deleted file mode 100644 index c5db70feca47..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runmount.go +++ /dev/null @@ -1,216 +0,0 @@ -// +build dfrunmount - -package instructions - -import ( - "encoding/csv" - "strconv" - "strings" - - "github.com/pkg/errors" -) - -const MountTypeBind = "bind" -const MountTypeCache = "cache" -const MountTypeTmpfs = "tmpfs" -const MountTypeSecret = "secret" -const MountTypeSSH = "ssh" - -var allowedMountTypes = map[string]struct{}{ - MountTypeBind: {}, - MountTypeCache: {}, - MountTypeTmpfs: {}, - MountTypeSecret: {}, - MountTypeSSH: {}, -} - -const MountSharingShared = "shared" -const MountSharingPrivate = "private" -const MountSharingLocked = "locked" - -var allowedSharingTypes = map[string]struct{}{ - MountSharingShared: {}, - MountSharingPrivate: {}, - MountSharingLocked: {}, -} - -type mountsKeyT string - -var mountsKey = mountsKeyT("dockerfile/run/mounts") - -func init() { - parseRunPreHooks = append(parseRunPreHooks, runMountPreHook) - parseRunPostHooks = append(parseRunPostHooks, runMountPostHook) -} - -func isValidMountType(s string) bool { - if s == "secret" { - if !isSecretMountsSupported() { - return false - } - } - if s == "ssh" { - if !isSSHMountsSupported() { - return false - } - } - _, ok := allowedMountTypes[s] - return ok -} - -func runMountPreHook(cmd *RunCommand, req parseRequest) error { - st := &mountState{} - st.flag = req.flags.AddStrings("mount") - cmd.setExternalValue(mountsKey, st) - return nil -} - -func runMountPostHook(cmd *RunCommand, req parseRequest) error { - st := getMountState(cmd) - if st == nil { - return errors.Errorf("no mount state") - } - var mounts []*Mount - for _, str := range st.flag.StringValues { - m, err := parseMount(str) - if err != nil { - return err - } - mounts = append(mounts, m) - } - st.mounts = mounts - return nil -} - -func getMountState(cmd *RunCommand) *mountState { - v := cmd.getExternalValue(mountsKey) - if v == nil { - return nil - } - return v.(*mountState) -} - -func GetMounts(cmd *RunCommand) []*Mount { - return getMountState(cmd).mounts -} - -type mountState struct { - flag *Flag - mounts []*Mount -} - -type Mount struct { - Type string - From string - Source string - Target string - ReadOnly bool - CacheID string - CacheSharing string - Required bool -} - -func parseMount(value string) (*Mount, error) { - csvReader := csv.NewReader(strings.NewReader(value)) - fields, err := csvReader.Read() - if err != nil { - return nil, errors.Wrap(err, "failed to parse csv mounts") - } - - m := &Mount{Type: MountTypeBind} - - roAuto := true - - for _, field := range fields { - parts := strings.SplitN(field, "=", 2) - key := strings.ToLower(parts[0]) - - if len(parts) == 1 { - switch key { - case "readonly", "ro": - m.ReadOnly = true - roAuto = false - continue - case "readwrite", "rw": - m.ReadOnly = false - roAuto = false - continue - case "required": - if m.Type == "secret" || m.Type == "ssh" { - m.Required = true - continue - } - } - } - - if len(parts) != 2 { - return nil, errors.Errorf("invalid field '%s' must be a key=value pair", field) - } - - value := parts[1] - switch key { - case "type": - if !isValidMountType(strings.ToLower(value)) { - return nil, errors.Errorf("unsupported mount type %q", value) - } - m.Type = strings.ToLower(value) - case "from": - m.From = value - case "source", "src": - m.Source = value - case "target", "dst", "destination": - m.Target = value - case "readonly", "ro": - m.ReadOnly, err = strconv.ParseBool(value) - if err != nil { - return nil, errors.Errorf("invalid value for %s: %s", key, value) - } - roAuto = false - case "readwrite", "rw": - rw, err := strconv.ParseBool(value) - if err != nil { - return nil, errors.Errorf("invalid value for %s: %s", key, value) - } - m.ReadOnly = !rw - roAuto = false - case "id": - m.CacheID = value - case "sharing": - if _, ok := allowedSharingTypes[strings.ToLower(value)]; !ok { - return nil, errors.Errorf("unsupported sharing value %q", value) - } - m.CacheSharing = strings.ToLower(value) - default: - return nil, errors.Errorf("unexpected key '%s' in '%s'", key, field) - } - } - - if roAuto { - if m.Type == MountTypeCache { - m.ReadOnly = false - } else { - m.ReadOnly = true - } - } - - if m.CacheSharing != "" && m.Type != MountTypeCache { - return nil, errors.Errorf("invalid cache sharing set for %v mount", m.Type) - } - - if m.Type == MountTypeSecret { - if m.From != "" { - return nil, errors.Errorf("secret mount should not have a from") - } - if m.CacheSharing != "" { - return nil, errors.Errorf("secret mount should not define sharing") - } - if m.Source == "" && m.Target == "" && m.CacheID == "" { - return nil, errors.Errorf("invalid secret mount. one of source, target required") - } - if m.Source != "" && m.CacheID != "" { - return nil, errors.Errorf("both source and id can't be set") - } - } - - return m, nil -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_secrets.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_secrets.go deleted file mode 100644 index 6cce1191d735..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_secrets.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build dfsecrets - -package instructions - -func isSecretMountsSupported() bool { - return true -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_ssh.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_ssh.go deleted file mode 100644 index 0b94a564470b..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_ssh.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build dfssh - -package instructions - -func isSSHMountsSupported() bool { - return true -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/errors_unix.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/errors_unix.go deleted file mode 100644 index 0b03b34cd17b..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/errors_unix.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build !windows - -package instructions - -import "fmt" - -func errNotJSON(command, _ string) error { - return fmt.Errorf("%s requires the arguments to be in JSON form", command) -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/errors_windows.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/errors_windows.go deleted file mode 100644 index a4843c5b6ab5..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/errors_windows.go +++ /dev/null @@ -1,27 +0,0 @@ -package instructions - -import ( - "fmt" - "path/filepath" - "regexp" - "strings" -) - -func errNotJSON(command, original string) error { - // For Windows users, give a hint if it looks like it might contain - // a path which hasn't been escaped such as ["c:\windows\system32\prog.exe", "-param"], - // as JSON must be escaped. Unfortunate... - // - // Specifically looking for quote-driveletter-colon-backslash, there's no - // double backslash and a [] pair. No, this is not perfect, but it doesn't - // have to be. It's simply a hint to make life a little easier. - extra := "" - original = filepath.FromSlash(strings.ToLower(strings.Replace(strings.ToLower(original), strings.ToLower(command)+" ", "", -1))) - if len(regexp.MustCompile(`"[a-z]:\\.*`).FindStringSubmatch(original)) > 0 && - !strings.Contains(original, `\\`) && - strings.Contains(original, "[") && - strings.Contains(original, "]") { - extra = fmt.Sprintf(`. It looks like '%s' includes a file path without an escaped back-slash. JSON requires back-slashes to be escaped such as ["c:\\path\\to\\file.exe", "/parameter"]`, original) - } - return fmt.Errorf("%s requires the arguments to be in JSON form%s", command, extra) -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go deleted file mode 100644 index 0ce076a58be0..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go +++ /dev/null @@ -1,650 +0,0 @@ -package instructions - -import ( - "fmt" - "regexp" - "sort" - "strconv" - "strings" - "time" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/strslice" - "github.com/moby/buildkit/frontend/dockerfile/command" - "github.com/moby/buildkit/frontend/dockerfile/parser" - "github.com/pkg/errors" -) - -type parseRequest struct { - command string - args []string - attributes map[string]bool - flags *BFlags - original string -} - -var parseRunPreHooks []func(*RunCommand, parseRequest) error -var parseRunPostHooks []func(*RunCommand, parseRequest) error - -func nodeArgs(node *parser.Node) []string { - result := []string{} - for ; node.Next != nil; node = node.Next { - arg := node.Next - if len(arg.Children) == 0 { - result = append(result, arg.Value) - } else if len(arg.Children) == 1 { - //sub command - result = append(result, arg.Children[0].Value) - result = append(result, nodeArgs(arg.Children[0])...) - } - } - return result -} - -func newParseRequestFromNode(node *parser.Node) parseRequest { - return parseRequest{ - command: node.Value, - args: nodeArgs(node), - attributes: node.Attributes, - original: node.Original, - flags: NewBFlagsWithArgs(node.Flags), - } -} - -// ParseInstruction converts an AST to a typed instruction (either a command or a build stage beginning when encountering a `FROM` statement) -func ParseInstruction(node *parser.Node) (interface{}, error) { - req := newParseRequestFromNode(node) - switch node.Value { - case command.Env: - return parseEnv(req) - case command.Maintainer: - return parseMaintainer(req) - case command.Label: - return parseLabel(req) - case command.Add: - return parseAdd(req) - case command.Copy: - return parseCopy(req) - case command.From: - return parseFrom(req) - case command.Onbuild: - return parseOnBuild(req) - case command.Workdir: - return parseWorkdir(req) - case command.Run: - return parseRun(req) - case command.Cmd: - return parseCmd(req) - case command.Healthcheck: - return parseHealthcheck(req) - case command.Entrypoint: - return parseEntrypoint(req) - case command.Expose: - return parseExpose(req) - case command.User: - return parseUser(req) - case command.Volume: - return parseVolume(req) - case command.StopSignal: - return parseStopSignal(req) - case command.Arg: - return parseArg(req) - case command.Shell: - return parseShell(req) - } - - return nil, &UnknownInstruction{Instruction: node.Value, Line: node.StartLine} -} - -// ParseCommand converts an AST to a typed Command -func ParseCommand(node *parser.Node) (Command, error) { - s, err := ParseInstruction(node) - if err != nil { - return nil, err - } - if c, ok := s.(Command); ok { - return c, nil - } - return nil, errors.Errorf("%T is not a command type", s) -} - -// UnknownInstruction represents an error occurring when a command is unresolvable -type UnknownInstruction struct { - Line int - Instruction string -} - -func (e *UnknownInstruction) Error() string { - return fmt.Sprintf("unknown instruction: %s", strings.ToUpper(e.Instruction)) -} - -// IsUnknownInstruction checks if the error is an UnknownInstruction or a parseError containing an UnknownInstruction -func IsUnknownInstruction(err error) bool { - _, ok := err.(*UnknownInstruction) - if !ok { - var pe *parseError - if pe, ok = err.(*parseError); ok { - _, ok = pe.inner.(*UnknownInstruction) - } - } - return ok -} - -type parseError struct { - inner error - node *parser.Node -} - -func (e *parseError) Error() string { - return fmt.Sprintf("Dockerfile parse error line %d: %v", e.node.StartLine, e.inner.Error()) -} - -// Parse a Dockerfile into a collection of buildable stages. -// metaArgs is a collection of ARG instructions that occur before the first FROM. -func Parse(ast *parser.Node) (stages []Stage, metaArgs []ArgCommand, err error) { - for _, n := range ast.Children { - cmd, err := ParseInstruction(n) - if err != nil { - return nil, nil, &parseError{inner: err, node: n} - } - if len(stages) == 0 { - // meta arg case - if a, isArg := cmd.(*ArgCommand); isArg { - metaArgs = append(metaArgs, *a) - continue - } - } - switch c := cmd.(type) { - case *Stage: - stages = append(stages, *c) - case Command: - stage, err := CurrentStage(stages) - if err != nil { - return nil, nil, err - } - stage.AddCommand(c) - default: - return nil, nil, errors.Errorf("%T is not a command type", cmd) - } - - } - return stages, metaArgs, nil -} - -func parseKvps(args []string, cmdName string) (KeyValuePairs, error) { - if len(args) == 0 { - return nil, errAtLeastOneArgument(cmdName) - } - if len(args)%2 != 0 { - // should never get here, but just in case - return nil, errTooManyArguments(cmdName) - } - var res KeyValuePairs - for j := 0; j < len(args); j += 2 { - if len(args[j]) == 0 { - return nil, errBlankCommandNames(cmdName) - } - name := args[j] - value := args[j+1] - res = append(res, KeyValuePair{Key: name, Value: value}) - } - return res, nil -} - -func parseEnv(req parseRequest) (*EnvCommand, error) { - - if err := req.flags.Parse(); err != nil { - return nil, err - } - envs, err := parseKvps(req.args, "ENV") - if err != nil { - return nil, err - } - return &EnvCommand{ - Env: envs, - withNameAndCode: newWithNameAndCode(req), - }, nil -} - -func parseMaintainer(req parseRequest) (*MaintainerCommand, error) { - if len(req.args) != 1 { - return nil, errExactlyOneArgument("MAINTAINER") - } - - if err := req.flags.Parse(); err != nil { - return nil, err - } - return &MaintainerCommand{ - Maintainer: req.args[0], - withNameAndCode: newWithNameAndCode(req), - }, nil -} - -func parseLabel(req parseRequest) (*LabelCommand, error) { - - if err := req.flags.Parse(); err != nil { - return nil, err - } - - labels, err := parseKvps(req.args, "LABEL") - if err != nil { - return nil, err - } - - return &LabelCommand{ - Labels: labels, - withNameAndCode: newWithNameAndCode(req), - }, nil -} - -func parseAdd(req parseRequest) (*AddCommand, error) { - if len(req.args) < 2 { - return nil, errNoDestinationArgument("ADD") - } - flChown := req.flags.AddString("chown", "") - if err := req.flags.Parse(); err != nil { - return nil, err - } - return &AddCommand{ - SourcesAndDest: SourcesAndDest(req.args), - withNameAndCode: newWithNameAndCode(req), - Chown: flChown.Value, - }, nil -} - -func parseCopy(req parseRequest) (*CopyCommand, error) { - if len(req.args) < 2 { - return nil, errNoDestinationArgument("COPY") - } - flChown := req.flags.AddString("chown", "") - flFrom := req.flags.AddString("from", "") - if err := req.flags.Parse(); err != nil { - return nil, err - } - return &CopyCommand{ - SourcesAndDest: SourcesAndDest(req.args), - From: flFrom.Value, - withNameAndCode: newWithNameAndCode(req), - Chown: flChown.Value, - }, nil -} - -func parseFrom(req parseRequest) (*Stage, error) { - stageName, err := parseBuildStageName(req.args) - if err != nil { - return nil, err - } - - flPlatform := req.flags.AddString("platform", "") - if err := req.flags.Parse(); err != nil { - return nil, err - } - - code := strings.TrimSpace(req.original) - return &Stage{ - BaseName: req.args[0], - Name: stageName, - SourceCode: code, - Commands: []Command{}, - Platform: flPlatform.Value, - }, nil - -} - -func parseBuildStageName(args []string) (string, error) { - stageName := "" - switch { - case len(args) == 3 && strings.EqualFold(args[1], "as"): - stageName = strings.ToLower(args[2]) - if ok, _ := regexp.MatchString("^[a-z][a-z0-9-_\\.]*$", stageName); !ok { - return "", errors.Errorf("invalid name for build stage: %q, name can't start with a number or contain symbols", stageName) - } - case len(args) != 1: - return "", errors.New("FROM requires either one or three arguments") - } - - return stageName, nil -} - -func parseOnBuild(req parseRequest) (*OnbuildCommand, error) { - if len(req.args) == 0 { - return nil, errAtLeastOneArgument("ONBUILD") - } - if err := req.flags.Parse(); err != nil { - return nil, err - } - - triggerInstruction := strings.ToUpper(strings.TrimSpace(req.args[0])) - switch strings.ToUpper(triggerInstruction) { - case "ONBUILD": - return nil, errors.New("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") - case "MAINTAINER", "FROM": - return nil, fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction) - } - - original := regexp.MustCompile(`(?i)^\s*ONBUILD\s*`).ReplaceAllString(req.original, "") - return &OnbuildCommand{ - Expression: original, - withNameAndCode: newWithNameAndCode(req), - }, nil - -} - -func parseWorkdir(req parseRequest) (*WorkdirCommand, error) { - if len(req.args) != 1 { - return nil, errExactlyOneArgument("WORKDIR") - } - - err := req.flags.Parse() - if err != nil { - return nil, err - } - return &WorkdirCommand{ - Path: req.args[0], - withNameAndCode: newWithNameAndCode(req), - }, nil - -} - -func parseShellDependentCommand(req parseRequest, emptyAsNil bool) ShellDependantCmdLine { - args := handleJSONArgs(req.args, req.attributes) - cmd := strslice.StrSlice(args) - if emptyAsNil && len(cmd) == 0 { - cmd = nil - } - return ShellDependantCmdLine{ - CmdLine: cmd, - PrependShell: !req.attributes["json"], - } -} - -func parseRun(req parseRequest) (*RunCommand, error) { - cmd := &RunCommand{} - - for _, fn := range parseRunPreHooks { - if err := fn(cmd, req); err != nil { - return nil, err - } - } - - if err := req.flags.Parse(); err != nil { - return nil, err - } - - cmd.ShellDependantCmdLine = parseShellDependentCommand(req, false) - cmd.withNameAndCode = newWithNameAndCode(req) - - for _, fn := range parseRunPostHooks { - if err := fn(cmd, req); err != nil { - return nil, err - } - } - - return cmd, nil -} - -func parseCmd(req parseRequest) (*CmdCommand, error) { - if err := req.flags.Parse(); err != nil { - return nil, err - } - return &CmdCommand{ - ShellDependantCmdLine: parseShellDependentCommand(req, false), - withNameAndCode: newWithNameAndCode(req), - }, nil - -} - -func parseEntrypoint(req parseRequest) (*EntrypointCommand, error) { - if err := req.flags.Parse(); err != nil { - return nil, err - } - - cmd := &EntrypointCommand{ - ShellDependantCmdLine: parseShellDependentCommand(req, true), - withNameAndCode: newWithNameAndCode(req), - } - - return cmd, nil -} - -// parseOptInterval(flag) is the duration of flag.Value, or 0 if -// empty. An error is reported if the value is given and less than minimum duration. -func parseOptInterval(f *Flag) (time.Duration, error) { - s := f.Value - if s == "" { - return 0, nil - } - d, err := time.ParseDuration(s) - if err != nil { - return 0, err - } - if d < container.MinimumDuration { - return 0, fmt.Errorf("Interval %#v cannot be less than %s", f.name, container.MinimumDuration) - } - return d, nil -} -func parseHealthcheck(req parseRequest) (*HealthCheckCommand, error) { - if len(req.args) == 0 { - return nil, errAtLeastOneArgument("HEALTHCHECK") - } - cmd := &HealthCheckCommand{ - withNameAndCode: newWithNameAndCode(req), - } - - typ := strings.ToUpper(req.args[0]) - args := req.args[1:] - if typ == "NONE" { - if len(args) != 0 { - return nil, errors.New("HEALTHCHECK NONE takes no arguments") - } - test := strslice.StrSlice{typ} - cmd.Health = &container.HealthConfig{ - Test: test, - } - } else { - - healthcheck := container.HealthConfig{} - - flInterval := req.flags.AddString("interval", "") - flTimeout := req.flags.AddString("timeout", "") - flStartPeriod := req.flags.AddString("start-period", "") - flRetries := req.flags.AddString("retries", "") - - if err := req.flags.Parse(); err != nil { - return nil, err - } - - switch typ { - case "CMD": - cmdSlice := handleJSONArgs(args, req.attributes) - if len(cmdSlice) == 0 { - return nil, errors.New("Missing command after HEALTHCHECK CMD") - } - - if !req.attributes["json"] { - typ = "CMD-SHELL" - } - - healthcheck.Test = strslice.StrSlice(append([]string{typ}, cmdSlice...)) - default: - return nil, fmt.Errorf("Unknown type %#v in HEALTHCHECK (try CMD)", typ) - } - - interval, err := parseOptInterval(flInterval) - if err != nil { - return nil, err - } - healthcheck.Interval = interval - - timeout, err := parseOptInterval(flTimeout) - if err != nil { - return nil, err - } - healthcheck.Timeout = timeout - - startPeriod, err := parseOptInterval(flStartPeriod) - if err != nil { - return nil, err - } - healthcheck.StartPeriod = startPeriod - - if flRetries.Value != "" { - retries, err := strconv.ParseInt(flRetries.Value, 10, 32) - if err != nil { - return nil, err - } - if retries < 1 { - return nil, fmt.Errorf("--retries must be at least 1 (not %d)", retries) - } - healthcheck.Retries = int(retries) - } else { - healthcheck.Retries = 0 - } - - cmd.Health = &healthcheck - } - return cmd, nil -} - -func parseExpose(req parseRequest) (*ExposeCommand, error) { - portsTab := req.args - - if len(req.args) == 0 { - return nil, errAtLeastOneArgument("EXPOSE") - } - - if err := req.flags.Parse(); err != nil { - return nil, err - } - - sort.Strings(portsTab) - return &ExposeCommand{ - Ports: portsTab, - withNameAndCode: newWithNameAndCode(req), - }, nil -} - -func parseUser(req parseRequest) (*UserCommand, error) { - if len(req.args) != 1 { - return nil, errExactlyOneArgument("USER") - } - - if err := req.flags.Parse(); err != nil { - return nil, err - } - return &UserCommand{ - User: req.args[0], - withNameAndCode: newWithNameAndCode(req), - }, nil -} - -func parseVolume(req parseRequest) (*VolumeCommand, error) { - if len(req.args) == 0 { - return nil, errAtLeastOneArgument("VOLUME") - } - - if err := req.flags.Parse(); err != nil { - return nil, err - } - - cmd := &VolumeCommand{ - withNameAndCode: newWithNameAndCode(req), - } - - for _, v := range req.args { - v = strings.TrimSpace(v) - if v == "" { - return nil, errors.New("VOLUME specified can not be an empty string") - } - cmd.Volumes = append(cmd.Volumes, v) - } - return cmd, nil - -} - -func parseStopSignal(req parseRequest) (*StopSignalCommand, error) { - if len(req.args) != 1 { - return nil, errExactlyOneArgument("STOPSIGNAL") - } - sig := req.args[0] - - cmd := &StopSignalCommand{ - Signal: sig, - withNameAndCode: newWithNameAndCode(req), - } - return cmd, nil - -} - -func parseArg(req parseRequest) (*ArgCommand, error) { - if len(req.args) != 1 { - return nil, errExactlyOneArgument("ARG") - } - - kvpo := KeyValuePairOptional{} - - arg := req.args[0] - // 'arg' can just be a name or name-value pair. Note that this is different - // from 'env' that handles the split of name and value at the parser level. - // The reason for doing it differently for 'arg' is that we support just - // defining an arg and not assign it a value (while 'env' always expects a - // name-value pair). If possible, it will be good to harmonize the two. - if strings.Contains(arg, "=") { - parts := strings.SplitN(arg, "=", 2) - if len(parts[0]) == 0 { - return nil, errBlankCommandNames("ARG") - } - - kvpo.Key = parts[0] - kvpo.Value = &parts[1] - } else { - kvpo.Key = arg - } - - return &ArgCommand{ - KeyValuePairOptional: kvpo, - withNameAndCode: newWithNameAndCode(req), - }, nil -} - -func parseShell(req parseRequest) (*ShellCommand, error) { - if err := req.flags.Parse(); err != nil { - return nil, err - } - shellSlice := handleJSONArgs(req.args, req.attributes) - switch { - case len(shellSlice) == 0: - // SHELL [] - return nil, errAtLeastOneArgument("SHELL") - case req.attributes["json"]: - // SHELL ["powershell", "-command"] - - return &ShellCommand{ - Shell: strslice.StrSlice(shellSlice), - withNameAndCode: newWithNameAndCode(req), - }, nil - default: - // SHELL powershell -command - not JSON - return nil, errNotJSON("SHELL", req.original) - } -} - -func errAtLeastOneArgument(command string) error { - return errors.Errorf("%s requires at least one argument", command) -} - -func errExactlyOneArgument(command string) error { - return errors.Errorf("%s requires exactly one argument", command) -} - -func errNoDestinationArgument(command string) error { - return errors.Errorf("%s requires at least two arguments, but only one was provided. Destination could not be determined.", command) -} - -func errBlankCommandNames(command string) error { - return errors.Errorf("%s names can not be blank", command) -} - -func errTooManyArguments(command string) error { - return errors.Errorf("Bad input to %s, too many arguments", command) -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse_test.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse_test.go deleted file mode 100644 index 3748796b45e2..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse_test.go +++ /dev/null @@ -1,198 +0,0 @@ -package instructions - -import ( - "strings" - "testing" - - "github.com/moby/buildkit/frontend/dockerfile/command" - "github.com/moby/buildkit/frontend/dockerfile/parser" - "gotest.tools/assert" - is "gotest.tools/assert/cmp" -) - -func TestCommandsExactlyOneArgument(t *testing.T) { - commands := []string{ - "MAINTAINER", - "WORKDIR", - "USER", - "STOPSIGNAL", - } - - for _, cmd := range commands { - ast, err := parser.Parse(strings.NewReader(cmd)) - assert.NilError(t, err) - _, err = ParseInstruction(ast.AST.Children[0]) - assert.Check(t, is.Error(err, errExactlyOneArgument(cmd).Error())) - } -} - -func TestCommandsAtLeastOneArgument(t *testing.T) { - commands := []string{ - "ENV", - "LABEL", - "ONBUILD", - "HEALTHCHECK", - "EXPOSE", - "VOLUME", - } - - for _, cmd := range commands { - ast, err := parser.Parse(strings.NewReader(cmd)) - assert.NilError(t, err) - _, err = ParseInstruction(ast.AST.Children[0]) - assert.Check(t, is.Error(err, errAtLeastOneArgument(cmd).Error())) - } -} - -func TestCommandsNoDestinationArgument(t *testing.T) { - commands := []string{ - "ADD", - "COPY", - } - - for _, cmd := range commands { - ast, err := parser.Parse(strings.NewReader(cmd + " arg1")) - assert.NilError(t, err) - _, err = ParseInstruction(ast.AST.Children[0]) - assert.Check(t, is.Error(err, errNoDestinationArgument(cmd).Error())) - } -} - -func TestCommandsTooManyArguments(t *testing.T) { - commands := []string{ - "ENV", - "LABEL", - } - - for _, command := range commands { - node := &parser.Node{ - Original: command + "arg1 arg2 arg3", - Value: strings.ToLower(command), - Next: &parser.Node{ - Value: "arg1", - Next: &parser.Node{ - Value: "arg2", - Next: &parser.Node{ - Value: "arg3", - }, - }, - }, - } - _, err := ParseInstruction(node) - assert.Check(t, is.Error(err, errTooManyArguments(command).Error())) - } -} - -func TestCommandsBlankNames(t *testing.T) { - commands := []string{ - "ENV", - "LABEL", - } - - for _, cmd := range commands { - node := &parser.Node{ - Original: cmd + " =arg2", - Value: strings.ToLower(cmd), - Next: &parser.Node{ - Value: "", - Next: &parser.Node{ - Value: "arg2", - }, - }, - } - _, err := ParseInstruction(node) - assert.Check(t, is.Error(err, errBlankCommandNames(cmd).Error())) - } -} - -func TestHealthCheckCmd(t *testing.T) { - node := &parser.Node{ - Value: command.Healthcheck, - Next: &parser.Node{ - Value: "CMD", - Next: &parser.Node{ - Value: "hello", - Next: &parser.Node{ - Value: "world", - }, - }, - }, - } - cmd, err := ParseInstruction(node) - assert.Check(t, err) - hc, ok := cmd.(*HealthCheckCommand) - assert.Check(t, ok) - expected := []string{"CMD-SHELL", "hello world"} - assert.Check(t, is.DeepEqual(expected, hc.Health.Test)) -} - -func TestParseOptInterval(t *testing.T) { - flInterval := &Flag{ - name: "interval", - flagType: stringType, - Value: "50ns", - } - _, err := parseOptInterval(flInterval) - assert.Check(t, is.ErrorContains(err, "cannot be less than 1ms")) - - flInterval.Value = "1ms" - _, err = parseOptInterval(flInterval) - assert.NilError(t, err) -} - -func TestErrorCases(t *testing.T) { - cases := []struct { - name string - dockerfile string - expectedError string - }{ - { - name: "copyEmptyWhitespace", - dockerfile: `COPY - quux \ - bar`, - expectedError: "COPY requires at least two arguments", - }, - { - name: "ONBUILD forbidden FROM", - dockerfile: "ONBUILD FROM scratch", - expectedError: "FROM isn't allowed as an ONBUILD trigger", - }, - { - name: "ONBUILD forbidden MAINTAINER", - dockerfile: "ONBUILD MAINTAINER docker.io", - expectedError: "MAINTAINER isn't allowed as an ONBUILD trigger", - }, - { - name: "ARG two arguments", - dockerfile: "ARG foo bar", - expectedError: "ARG requires exactly one argument", - }, - { - name: "MAINTAINER unknown flag", - dockerfile: "MAINTAINER --boo joe@example.com", - expectedError: "Unknown flag: boo", - }, - { - name: "Chaining ONBUILD", - dockerfile: `ONBUILD ONBUILD RUN touch foobar`, - expectedError: "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed", - }, - { - name: "Invalid instruction", - dockerfile: `foo bar`, - expectedError: "unknown instruction: FOO", - }, - } - for _, c := range cases { - r := strings.NewReader(c.dockerfile) - ast, err := parser.Parse(r) - - if err != nil { - t.Fatalf("Error when parsing Dockerfile: %s", err) - } - n := ast.AST.Children[0] - _, err = ParseInstruction(n) - assert.Check(t, is.ErrorContains(err, c.expectedError)) - } -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/support.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/support.go deleted file mode 100644 index beefe775cefa..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/support.go +++ /dev/null @@ -1,19 +0,0 @@ -package instructions - -import "strings" - -// handleJSONArgs parses command passed to CMD, ENTRYPOINT, RUN and SHELL instruction in Dockerfile -// for exec form it returns untouched args slice -// for shell form it returns concatenated args as the first element of a slice -func handleJSONArgs(args []string, attributes map[string]bool) []string { - if len(args) == 0 { - return []string{} - } - - if attributes != nil && attributes["json"] { - return args - } - - // literal string command, not an exec array - return []string{strings.Join(args, " ")} -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/support_test.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/support_test.go deleted file mode 100644 index a294422da5f6..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/support_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package instructions - -import "testing" - -type testCase struct { - name string - args []string - attributes map[string]bool - expected []string -} - -func initTestCases() []testCase { - var testCases []testCase - - testCases = append(testCases, testCase{ - name: "empty args", - args: []string{}, - attributes: make(map[string]bool), - expected: []string{}, - }) - - jsonAttributes := make(map[string]bool) - jsonAttributes["json"] = true - - testCases = append(testCases, testCase{ - name: "json attribute with one element", - args: []string{"foo"}, - attributes: jsonAttributes, - expected: []string{"foo"}, - }) - - testCases = append(testCases, testCase{ - name: "json attribute with two elements", - args: []string{"foo", "bar"}, - attributes: jsonAttributes, - expected: []string{"foo", "bar"}, - }) - - testCases = append(testCases, testCase{ - name: "no attributes", - args: []string{"foo", "bar"}, - attributes: nil, - expected: []string{"foo bar"}, - }) - - return testCases -} - -func TestHandleJSONArgs(t *testing.T) { - testCases := initTestCases() - - for _, test := range testCases { - arguments := handleJSONArgs(test.args, test.attributes) - - if len(arguments) != len(test.expected) { - t.Fatalf("In test \"%s\": length of returned slice is incorrect. Expected: %d, got: %d", test.name, len(test.expected), len(arguments)) - } - - for i := range test.expected { - if arguments[i] != test.expected[i] { - t.Fatalf("In test \"%s\": element as position %d is incorrect. Expected: %s, got: %s", test.name, i, test.expected[i], arguments[i]) - } - } - } -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/dumper/main.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/dumper/main.go deleted file mode 100644 index 461066b7319a..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/dumper/main.go +++ /dev/null @@ -1,32 +0,0 @@ -package main - -import ( - "fmt" - "os" - - "github.com/moby/buildkit/frontend/dockerfile/parser" -) - -func main() { - var f *os.File - var err error - - if len(os.Args) < 2 { - fmt.Println("please supply filename(s)") - os.Exit(1) - } - - for _, fn := range os.Args[1:] { - f, err = os.Open(fn) - if err != nil { - panic(err) - } - defer f.Close() - - result, err := parser.Parse(f) - if err != nil { - panic(err) - } - fmt.Println(result.AST.Dump()) - } -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/json_test.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/json_test.go deleted file mode 100644 index d4489191da90..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/json_test.go +++ /dev/null @@ -1,59 +0,0 @@ -package parser - -import ( - "testing" -) - -var invalidJSONArraysOfStrings = []string{ - `["a",42,"b"]`, - `["a",123.456,"b"]`, - `["a",{},"b"]`, - `["a",{"c": "d"},"b"]`, - `["a",["c"],"b"]`, - `["a",true,"b"]`, - `["a",false,"b"]`, - `["a",null,"b"]`, -} - -var validJSONArraysOfStrings = map[string][]string{ - `[]`: {}, - `[""]`: {""}, - `["a"]`: {"a"}, - `["a","b"]`: {"a", "b"}, - `[ "a", "b" ]`: {"a", "b"}, - `[ "a", "b" ]`: {"a", "b"}, - ` [ "a", "b" ] `: {"a", "b"}, - `["abc 123", "♥", "☃", "\" \\ \/ \b \f \n \r \t \u0000"]`: {"abc 123", "♥", "☃", "\" \\ / \b \f \n \r \t \u0000"}, -} - -func TestJSONArraysOfStrings(t *testing.T) { - for json, expected := range validJSONArraysOfStrings { - d := NewDefaultDirective() - - if node, _, err := parseJSON(json, d); err != nil { - t.Fatalf("%q should be a valid JSON array of strings, but wasn't! (err: %q)", json, err) - } else { - i := 0 - for node != nil { - if i >= len(expected) { - t.Fatalf("expected result is shorter than parsed result (%d vs %d+) in %q", len(expected), i+1, json) - } - if node.Value != expected[i] { - t.Fatalf("expected %q (not %q) in %q at pos %d", expected[i], node.Value, json, i) - } - node = node.Next - i++ - } - if i != len(expected) { - t.Fatalf("expected result is longer than parsed result (%d vs %d) in %q", len(expected), i+1, json) - } - } - } - for _, json := range invalidJSONArraysOfStrings { - d := NewDefaultDirective() - - if _, _, err := parseJSON(json, d); err != errDockerfileNotStringArray { - t.Fatalf("%q should be an invalid JSON array of strings, but wasn't!", json) - } - } -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/line_parsers.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/line_parsers.go deleted file mode 100644 index 15f00ce79241..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/line_parsers.go +++ /dev/null @@ -1,368 +0,0 @@ -package parser - -// line parsers are dispatch calls that parse a single unit of text into a -// Node object which contains the whole statement. Dockerfiles have varied -// (but not usually unique, see ONBUILD for a unique example) parsing rules -// per-command, and these unify the processing in a way that makes it -// manageable. - -import ( - "encoding/json" - "errors" - "fmt" - "strings" - "unicode" - "unicode/utf8" -) - -var ( - errDockerfileNotStringArray = errors.New("when using JSON array syntax, arrays must be comprised of strings only") -) - -const ( - commandLabel = "LABEL" -) - -// ignore the current argument. This will still leave a command parsed, but -// will not incorporate the arguments into the ast. -func parseIgnore(rest string, d *Directive) (*Node, map[string]bool, error) { - return &Node{}, nil, nil -} - -// used for onbuild. Could potentially be used for anything that represents a -// statement with sub-statements. -// -// ONBUILD RUN foo bar -> (onbuild (run foo bar)) -// -func parseSubCommand(rest string, d *Directive) (*Node, map[string]bool, error) { - if rest == "" { - return nil, nil, nil - } - - child, err := newNodeFromLine(rest, d) - if err != nil { - return nil, nil, err - } - - return &Node{Children: []*Node{child}}, nil, nil -} - -// helper to parse words (i.e space delimited or quoted strings) in a statement. -// The quotes are preserved as part of this function and they are stripped later -// as part of processWords(). -func parseWords(rest string, d *Directive) []string { - const ( - inSpaces = iota // looking for start of a word - inWord - inQuote - ) - - words := []string{} - phase := inSpaces - word := "" - quote := '\000' - blankOK := false - var ch rune - var chWidth int - - for pos := 0; pos <= len(rest); pos += chWidth { - if pos != len(rest) { - ch, chWidth = utf8.DecodeRuneInString(rest[pos:]) - } - - if phase == inSpaces { // Looking for start of word - if pos == len(rest) { // end of input - break - } - if unicode.IsSpace(ch) { // skip spaces - continue - } - phase = inWord // found it, fall through - } - if (phase == inWord || phase == inQuote) && (pos == len(rest)) { - if blankOK || len(word) > 0 { - words = append(words, word) - } - break - } - if phase == inWord { - if unicode.IsSpace(ch) { - phase = inSpaces - if blankOK || len(word) > 0 { - words = append(words, word) - } - word = "" - blankOK = false - continue - } - if ch == '\'' || ch == '"' { - quote = ch - blankOK = true - phase = inQuote - } - if ch == d.escapeToken { - if pos+chWidth == len(rest) { - continue // just skip an escape token at end of line - } - // If we're not quoted and we see an escape token, then always just - // add the escape token plus the char to the word, even if the char - // is a quote. - word += string(ch) - pos += chWidth - ch, chWidth = utf8.DecodeRuneInString(rest[pos:]) - } - word += string(ch) - continue - } - if phase == inQuote { - if ch == quote { - phase = inWord - } - // The escape token is special except for ' quotes - can't escape anything for ' - if ch == d.escapeToken && quote != '\'' { - if pos+chWidth == len(rest) { - phase = inWord - continue // just skip the escape token at end - } - pos += chWidth - word += string(ch) - ch, chWidth = utf8.DecodeRuneInString(rest[pos:]) - } - word += string(ch) - } - } - - return words -} - -// parse environment like statements. Note that this does *not* handle -// variable interpolation, which will be handled in the evaluator. -func parseNameVal(rest string, key string, d *Directive) (*Node, error) { - // This is kind of tricky because we need to support the old - // variant: KEY name value - // as well as the new one: KEY name=value ... - // The trigger to know which one is being used will be whether we hit - // a space or = first. space ==> old, "=" ==> new - - words := parseWords(rest, d) - if len(words) == 0 { - return nil, nil - } - - // Old format (KEY name value) - if !strings.Contains(words[0], "=") { - parts := tokenWhitespace.Split(rest, 2) - if len(parts) < 2 { - return nil, fmt.Errorf(key + " must have two arguments") - } - return newKeyValueNode(parts[0], parts[1]), nil - } - - var rootNode *Node - var prevNode *Node - for _, word := range words { - if !strings.Contains(word, "=") { - return nil, fmt.Errorf("Syntax error - can't find = in %q. Must be of the form: name=value", word) - } - - parts := strings.SplitN(word, "=", 2) - node := newKeyValueNode(parts[0], parts[1]) - rootNode, prevNode = appendKeyValueNode(node, rootNode, prevNode) - } - - return rootNode, nil -} - -func newKeyValueNode(key, value string) *Node { - return &Node{ - Value: key, - Next: &Node{Value: value}, - } -} - -func appendKeyValueNode(node, rootNode, prevNode *Node) (*Node, *Node) { - if rootNode == nil { - rootNode = node - } - if prevNode != nil { - prevNode.Next = node - } - - prevNode = node.Next - return rootNode, prevNode -} - -func parseEnv(rest string, d *Directive) (*Node, map[string]bool, error) { - node, err := parseNameVal(rest, "ENV", d) - return node, nil, err -} - -func parseLabel(rest string, d *Directive) (*Node, map[string]bool, error) { - node, err := parseNameVal(rest, commandLabel, d) - return node, nil, err -} - -// parses a statement containing one or more keyword definition(s) and/or -// value assignments, like `name1 name2= name3="" name4=value`. -// Note that this is a stricter format than the old format of assignment, -// allowed by parseNameVal(), in a way that this only allows assignment of the -// form `keyword=[]` like `name2=`, `name3=""`, and `name4=value` above. -// In addition, a keyword definition alone is of the form `keyword` like `name1` -// above. And the assignments `name2=` and `name3=""` are equivalent and -// assign an empty value to the respective keywords. -func parseNameOrNameVal(rest string, d *Directive) (*Node, map[string]bool, error) { - words := parseWords(rest, d) - if len(words) == 0 { - return nil, nil, nil - } - - var ( - rootnode *Node - prevNode *Node - ) - for i, word := range words { - node := &Node{} - node.Value = word - if i == 0 { - rootnode = node - } else { - prevNode.Next = node - } - prevNode = node - } - - return rootnode, nil, nil -} - -// parses a whitespace-delimited set of arguments. The result is effectively a -// linked list of string arguments. -func parseStringsWhitespaceDelimited(rest string, d *Directive) (*Node, map[string]bool, error) { - if rest == "" { - return nil, nil, nil - } - - node := &Node{} - rootnode := node - prevnode := node - for _, str := range tokenWhitespace.Split(rest, -1) { // use regexp - prevnode = node - node.Value = str - node.Next = &Node{} - node = node.Next - } - - // XXX to get around regexp.Split *always* providing an empty string at the - // end due to how our loop is constructed, nil out the last node in the - // chain. - prevnode.Next = nil - - return rootnode, nil, nil -} - -// parseString just wraps the string in quotes and returns a working node. -func parseString(rest string, d *Directive) (*Node, map[string]bool, error) { - if rest == "" { - return nil, nil, nil - } - n := &Node{} - n.Value = rest - return n, nil, nil -} - -// parseJSON converts JSON arrays to an AST. -func parseJSON(rest string, d *Directive) (*Node, map[string]bool, error) { - rest = strings.TrimLeftFunc(rest, unicode.IsSpace) - if !strings.HasPrefix(rest, "[") { - return nil, nil, fmt.Errorf(`Error parsing "%s" as a JSON array`, rest) - } - - var myJSON []interface{} - if err := json.NewDecoder(strings.NewReader(rest)).Decode(&myJSON); err != nil { - return nil, nil, err - } - - var top, prev *Node - for _, str := range myJSON { - s, ok := str.(string) - if !ok { - return nil, nil, errDockerfileNotStringArray - } - - node := &Node{Value: s} - if prev == nil { - top = node - } else { - prev.Next = node - } - prev = node - } - - return top, map[string]bool{"json": true}, nil -} - -// parseMaybeJSON determines if the argument appears to be a JSON array. If -// so, passes to parseJSON; if not, quotes the result and returns a single -// node. -func parseMaybeJSON(rest string, d *Directive) (*Node, map[string]bool, error) { - if rest == "" { - return nil, nil, nil - } - - node, attrs, err := parseJSON(rest, d) - - if err == nil { - return node, attrs, nil - } - if err == errDockerfileNotStringArray { - return nil, nil, err - } - - node = &Node{} - node.Value = rest - return node, nil, nil -} - -// parseMaybeJSONToList determines if the argument appears to be a JSON array. If -// so, passes to parseJSON; if not, attempts to parse it as a whitespace -// delimited string. -func parseMaybeJSONToList(rest string, d *Directive) (*Node, map[string]bool, error) { - node, attrs, err := parseJSON(rest, d) - - if err == nil { - return node, attrs, nil - } - if err == errDockerfileNotStringArray { - return nil, nil, err - } - - return parseStringsWhitespaceDelimited(rest, d) -} - -// The HEALTHCHECK command is like parseMaybeJSON, but has an extra type argument. -func parseHealthConfig(rest string, d *Directive) (*Node, map[string]bool, error) { - // Find end of first argument - var sep int - for ; sep < len(rest); sep++ { - if unicode.IsSpace(rune(rest[sep])) { - break - } - } - next := sep - for ; next < len(rest); next++ { - if !unicode.IsSpace(rune(rest[next])) { - break - } - } - - if sep == 0 { - return nil, nil, nil - } - - typ := rest[:sep] - cmd, attrs, err := parseMaybeJSON(rest[next:], d) - if err != nil { - return nil, nil, err - } - - return &Node{Value: typ, Next: cmd}, attrs, err -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/line_parsers_test.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/line_parsers_test.go deleted file mode 100644 index 04d3516457b7..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/line_parsers_test.go +++ /dev/null @@ -1,51 +0,0 @@ -package parser - -import ( - "testing" - - "github.com/google/go-cmp/cmp" - "gotest.tools/assert" - is "gotest.tools/assert/cmp" -) - -func TestParseNameValOldFormat(t *testing.T) { - directive := Directive{} - node, err := parseNameVal("foo bar", "LABEL", &directive) - assert.Check(t, err) - - expected := &Node{ - Value: "foo", - Next: &Node{Value: "bar"}, - } - assert.DeepEqual(t, expected, node, cmpNodeOpt) -} - -var cmpNodeOpt = cmp.AllowUnexported(Node{}) - -func TestParseNameValNewFormat(t *testing.T) { - directive := Directive{} - node, err := parseNameVal("foo=bar thing=star", "LABEL", &directive) - assert.Check(t, err) - - expected := &Node{ - Value: "foo", - Next: &Node{ - Value: "bar", - Next: &Node{ - Value: "thing", - Next: &Node{ - Value: "star", - }, - }, - }, - } - assert.DeepEqual(t, expected, node, cmpNodeOpt) -} - -func TestParseNameValWithoutVal(t *testing.T) { - directive := Directive{} - // In Config.Env, a variable without `=` is removed from the environment. (#31634) - // However, in Dockerfile, we don't allow "unsetting" an environment variable. (#11922) - _, err := parseNameVal("foo", "ENV", &directive) - assert.Check(t, is.ErrorContains(err, ""), "ENV must have two arguments") -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go deleted file mode 100644 index 0453f3a9a2ae..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go +++ /dev/null @@ -1,327 +0,0 @@ -// Package parser implements a parser and parse tree dumper for Dockerfiles. -package parser - -import ( - "bufio" - "bytes" - "fmt" - "io" - "regexp" - "strconv" - "strings" - "unicode" - - "github.com/moby/buildkit/frontend/dockerfile/command" - "github.com/pkg/errors" -) - -// Node is a structure used to represent a parse tree. -// -// In the node there are three fields, Value, Next, and Children. Value is the -// current token's string value. Next is always the next non-child token, and -// children contains all the children. Here's an example: -// -// (value next (child child-next child-next-next) next-next) -// -// This data structure is frankly pretty lousy for handling complex languages, -// but lucky for us the Dockerfile isn't very complicated. This structure -// works a little more effectively than a "proper" parse tree for our needs. -// -type Node struct { - Value string // actual content - Next *Node // the next item in the current sexp - Children []*Node // the children of this sexp - Attributes map[string]bool // special attributes for this node - Original string // original line used before parsing - Flags []string // only top Node should have this set - StartLine int // the line in the original dockerfile where the node begins - endLine int // the line in the original dockerfile where the node ends -} - -// Dump dumps the AST defined by `node` as a list of sexps. -// Returns a string suitable for printing. -func (node *Node) Dump() string { - str := "" - str += node.Value - - if len(node.Flags) > 0 { - str += fmt.Sprintf(" %q", node.Flags) - } - - for _, n := range node.Children { - str += "(" + n.Dump() + ")\n" - } - - for n := node.Next; n != nil; n = n.Next { - if len(n.Children) > 0 { - str += " " + n.Dump() - } else { - str += " " + strconv.Quote(n.Value) - } - } - - return strings.TrimSpace(str) -} - -func (node *Node) lines(start, end int) { - node.StartLine = start - node.endLine = end -} - -// AddChild adds a new child node, and updates line information -func (node *Node) AddChild(child *Node, startLine, endLine int) { - child.lines(startLine, endLine) - if node.StartLine < 0 { - node.StartLine = startLine - } - node.endLine = endLine - node.Children = append(node.Children, child) -} - -var ( - dispatch map[string]func(string, *Directive) (*Node, map[string]bool, error) - tokenWhitespace = regexp.MustCompile(`[\t\v\f\r ]+`) - tokenEscapeCommand = regexp.MustCompile(`^#[ \t]*escape[ \t]*=[ \t]*(?P.).*$`) - tokenComment = regexp.MustCompile(`^#.*$`) -) - -// DefaultEscapeToken is the default escape token -const DefaultEscapeToken = '\\' - -// Directive is the structure used during a build run to hold the state of -// parsing directives. -type Directive struct { - escapeToken rune // Current escape token - lineContinuationRegex *regexp.Regexp // Current line continuation regex - processingComplete bool // Whether we are done looking for directives - escapeSeen bool // Whether the escape directive has been seen -} - -// setEscapeToken sets the default token for escaping characters in a Dockerfile. -func (d *Directive) setEscapeToken(s string) error { - if s != "`" && s != "\\" { - return fmt.Errorf("invalid ESCAPE '%s'. Must be ` or \\", s) - } - d.escapeToken = rune(s[0]) - d.lineContinuationRegex = regexp.MustCompile(`\` + s + `[ \t]*$`) - return nil -} - -// possibleParserDirective looks for parser directives, eg '# escapeToken='. -// Parser directives must precede any builder instruction or other comments, -// and cannot be repeated. -func (d *Directive) possibleParserDirective(line string) error { - if d.processingComplete { - return nil - } - - tecMatch := tokenEscapeCommand.FindStringSubmatch(strings.ToLower(line)) - if len(tecMatch) != 0 { - for i, n := range tokenEscapeCommand.SubexpNames() { - if n == "escapechar" { - if d.escapeSeen { - return errors.New("only one escape parser directive can be used") - } - d.escapeSeen = true - return d.setEscapeToken(tecMatch[i]) - } - } - } - - d.processingComplete = true - return nil -} - -// NewDefaultDirective returns a new Directive with the default escapeToken token -func NewDefaultDirective() *Directive { - directive := Directive{} - directive.setEscapeToken(string(DefaultEscapeToken)) - return &directive -} - -func init() { - // Dispatch Table. see line_parsers.go for the parse functions. - // The command is parsed and mapped to the line parser. The line parser - // receives the arguments but not the command, and returns an AST after - // reformulating the arguments according to the rules in the parser - // functions. Errors are propagated up by Parse() and the resulting AST can - // be incorporated directly into the existing AST as a next. - dispatch = map[string]func(string, *Directive) (*Node, map[string]bool, error){ - command.Add: parseMaybeJSONToList, - command.Arg: parseNameOrNameVal, - command.Cmd: parseMaybeJSON, - command.Copy: parseMaybeJSONToList, - command.Entrypoint: parseMaybeJSON, - command.Env: parseEnv, - command.Expose: parseStringsWhitespaceDelimited, - command.From: parseStringsWhitespaceDelimited, - command.Healthcheck: parseHealthConfig, - command.Label: parseLabel, - command.Maintainer: parseString, - command.Onbuild: parseSubCommand, - command.Run: parseMaybeJSON, - command.Shell: parseMaybeJSON, - command.StopSignal: parseString, - command.User: parseString, - command.Volume: parseMaybeJSONToList, - command.Workdir: parseString, - } -} - -// newNodeFromLine splits the line into parts, and dispatches to a function -// based on the command and command arguments. A Node is created from the -// result of the dispatch. -func newNodeFromLine(line string, directive *Directive) (*Node, error) { - cmd, flags, args, err := splitCommand(line) - if err != nil { - return nil, err - } - - fn := dispatch[cmd] - // Ignore invalid Dockerfile instructions - if fn == nil { - fn = parseIgnore - } - next, attrs, err := fn(args, directive) - if err != nil { - return nil, err - } - - return &Node{ - Value: cmd, - Original: line, - Flags: flags, - Next: next, - Attributes: attrs, - }, nil -} - -// Result is the result of parsing a Dockerfile -type Result struct { - AST *Node - EscapeToken rune - Warnings []string -} - -// PrintWarnings to the writer -func (r *Result) PrintWarnings(out io.Writer) { - if len(r.Warnings) == 0 { - return - } - fmt.Fprintf(out, strings.Join(r.Warnings, "\n")+"\n") -} - -// Parse reads lines from a Reader, parses the lines into an AST and returns -// the AST and escape token -func Parse(rwc io.Reader) (*Result, error) { - d := NewDefaultDirective() - currentLine := 0 - root := &Node{StartLine: -1} - scanner := bufio.NewScanner(rwc) - warnings := []string{} - - var err error - for scanner.Scan() { - bytesRead := scanner.Bytes() - if currentLine == 0 { - // First line, strip the byte-order-marker if present - bytesRead = bytes.TrimPrefix(bytesRead, utf8bom) - } - bytesRead, err = processLine(d, bytesRead, true) - if err != nil { - return nil, err - } - currentLine++ - - startLine := currentLine - line, isEndOfLine := trimContinuationCharacter(string(bytesRead), d) - if isEndOfLine && line == "" { - continue - } - - var hasEmptyContinuationLine bool - for !isEndOfLine && scanner.Scan() { - bytesRead, err := processLine(d, scanner.Bytes(), false) - if err != nil { - return nil, err - } - currentLine++ - - if isComment(scanner.Bytes()) { - // original line was a comment (processLine strips comments) - continue - } - if isEmptyContinuationLine(bytesRead) { - hasEmptyContinuationLine = true - continue - } - - continuationLine := string(bytesRead) - continuationLine, isEndOfLine = trimContinuationCharacter(continuationLine, d) - line += continuationLine - } - - if hasEmptyContinuationLine { - warnings = append(warnings, "[WARNING]: Empty continuation line found in:\n "+line) - } - - child, err := newNodeFromLine(line, d) - if err != nil { - return nil, err - } - root.AddChild(child, startLine, currentLine) - } - - if len(warnings) > 0 { - warnings = append(warnings, "[WARNING]: Empty continuation lines will become errors in a future release.") - } - return &Result{ - AST: root, - Warnings: warnings, - EscapeToken: d.escapeToken, - }, handleScannerError(scanner.Err()) -} - -func trimComments(src []byte) []byte { - return tokenComment.ReplaceAll(src, []byte{}) -} - -func trimWhitespace(src []byte) []byte { - return bytes.TrimLeftFunc(src, unicode.IsSpace) -} - -func isComment(line []byte) bool { - return tokenComment.Match(trimWhitespace(line)) -} - -func isEmptyContinuationLine(line []byte) bool { - return len(trimWhitespace(line)) == 0 -} - -var utf8bom = []byte{0xEF, 0xBB, 0xBF} - -func trimContinuationCharacter(line string, d *Directive) (string, bool) { - if d.lineContinuationRegex.MatchString(line) { - line = d.lineContinuationRegex.ReplaceAllString(line, "") - return line, false - } - return line, true -} - -// TODO: remove stripLeftWhitespace after deprecation period. It seems silly -// to preserve whitespace on continuation lines. Why is that done? -func processLine(d *Directive, token []byte, stripLeftWhitespace bool) ([]byte, error) { - if stripLeftWhitespace { - token = trimWhitespace(token) - } - return trimComments(token), d.possibleParserDirective(string(token)) -} - -func handleScannerError(err error) error { - switch err { - case bufio.ErrTooLong: - return errors.Errorf("dockerfile line greater than max allowed size of %d", bufio.MaxScanTokenSize-1) - default: - return err - } -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser_test.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser_test.go deleted file mode 100644 index 7185c4b37b8a..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser_test.go +++ /dev/null @@ -1,174 +0,0 @@ -package parser - -import ( - "bufio" - "bytes" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "strings" - "testing" - - "gotest.tools/assert" - is "gotest.tools/assert/cmp" -) - -const testDir = "testfiles" -const negativeTestDir = "testfiles-negative" -const testFileLineInfo = "testfile-line/Dockerfile" - -func getDirs(t *testing.T, dir string) []string { - f, err := os.Open(dir) - assert.NilError(t, err) - defer f.Close() - - dirs, err := f.Readdirnames(0) - assert.NilError(t, err) - return dirs -} - -func TestParseErrorCases(t *testing.T) { - for _, dir := range getDirs(t, negativeTestDir) { - dockerfile := filepath.Join(negativeTestDir, dir, "Dockerfile") - - df, err := os.Open(dockerfile) - assert.NilError(t, err, dockerfile) - defer df.Close() - - _, err = Parse(df) - assert.Check(t, is.ErrorContains(err, ""), dockerfile) - } -} - -func TestParseCases(t *testing.T) { - for _, dir := range getDirs(t, testDir) { - dockerfile := filepath.Join(testDir, dir, "Dockerfile") - resultfile := filepath.Join(testDir, dir, "result") - - df, err := os.Open(dockerfile) - assert.NilError(t, err, dockerfile) - defer df.Close() - - result, err := Parse(df) - assert.NilError(t, err, dockerfile) - - content, err := ioutil.ReadFile(resultfile) - assert.NilError(t, err, resultfile) - - if runtime.GOOS == "windows" { - // CRLF --> CR to match Unix behavior - content = bytes.Replace(content, []byte{'\x0d', '\x0a'}, []byte{'\x0a'}, -1) - } - assert.Check(t, is.Equal(result.AST.Dump()+"\n", string(content)), "In "+dockerfile) - } -} - -func TestParseWords(t *testing.T) { - tests := []map[string][]string{ - { - "input": {"foo"}, - "expect": {"foo"}, - }, - { - "input": {"foo bar"}, - "expect": {"foo", "bar"}, - }, - { - "input": {"foo\\ bar"}, - "expect": {"foo\\ bar"}, - }, - { - "input": {"foo=bar"}, - "expect": {"foo=bar"}, - }, - { - "input": {"foo bar 'abc xyz'"}, - "expect": {"foo", "bar", "'abc xyz'"}, - }, - { - "input": {`foo bar "abc xyz"`}, - "expect": {"foo", "bar", `"abc xyz"`}, - }, - { - "input": {"àöû"}, - "expect": {"àöû"}, - }, - { - "input": {`föo bàr "âbc xÿz"`}, - "expect": {"föo", "bàr", `"âbc xÿz"`}, - }, - } - - for _, test := range tests { - words := parseWords(test["input"][0], NewDefaultDirective()) - assert.Check(t, is.DeepEqual(test["expect"], words)) - } -} - -func TestParseIncludesLineNumbers(t *testing.T) { - df, err := os.Open(testFileLineInfo) - assert.NilError(t, err) - defer df.Close() - - result, err := Parse(df) - assert.NilError(t, err) - - ast := result.AST - assert.Check(t, is.Equal(5, ast.StartLine)) - assert.Check(t, is.Equal(31, ast.endLine)) - assert.Check(t, is.Len(ast.Children, 3)) - expected := [][]int{ - {5, 5}, - {11, 12}, - {17, 31}, - } - for i, child := range ast.Children { - msg := fmt.Sprintf("Child %d", i) - assert.Check(t, is.DeepEqual(expected[i], []int{child.StartLine, child.endLine}), msg) - } -} - -func TestParseWarnsOnEmptyContinutationLine(t *testing.T) { - dockerfile := bytes.NewBufferString(` -FROM alpine:3.6 - -RUN something \ - - following \ - - more - -RUN another \ - - thing -RUN non-indented \ -# this is a comment - after-comment - -RUN indented \ - # this is an indented comment - comment - `) - - result, err := Parse(dockerfile) - assert.NilError(t, err) - warnings := result.Warnings - assert.Check(t, is.Len(warnings, 3)) - assert.Check(t, is.Contains(warnings[0], "Empty continuation line found in")) - assert.Check(t, is.Contains(warnings[0], "RUN something following more")) - assert.Check(t, is.Contains(warnings[1], "RUN another thing")) - assert.Check(t, is.Contains(warnings[2], "will become errors in a future release")) -} - -func TestParseReturnsScannerErrors(t *testing.T) { - label := strings.Repeat("a", bufio.MaxScanTokenSize) - - dockerfile := strings.NewReader(fmt.Sprintf(` - FROM image - LABEL test=%s -`, label)) - _, err := Parse(dockerfile) - assert.Check(t, is.Error(err, "dockerfile line greater than max allowed size of 65535")) -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/split_command.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/split_command.go deleted file mode 100644 index 171f454f6da2..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/split_command.go +++ /dev/null @@ -1,118 +0,0 @@ -package parser - -import ( - "strings" - "unicode" -) - -// splitCommand takes a single line of text and parses out the cmd and args, -// which are used for dispatching to more exact parsing functions. -func splitCommand(line string) (string, []string, string, error) { - var args string - var flags []string - - // Make sure we get the same results irrespective of leading/trailing spaces - cmdline := tokenWhitespace.Split(strings.TrimSpace(line), 2) - cmd := strings.ToLower(cmdline[0]) - - if len(cmdline) == 2 { - var err error - args, flags, err = extractBuilderFlags(cmdline[1]) - if err != nil { - return "", nil, "", err - } - } - - return cmd, flags, strings.TrimSpace(args), nil -} - -func extractBuilderFlags(line string) (string, []string, error) { - // Parses the BuilderFlags and returns the remaining part of the line - - const ( - inSpaces = iota // looking for start of a word - inWord - inQuote - ) - - words := []string{} - phase := inSpaces - word := "" - quote := '\000' - blankOK := false - var ch rune - - for pos := 0; pos <= len(line); pos++ { - if pos != len(line) { - ch = rune(line[pos]) - } - - if phase == inSpaces { // Looking for start of word - if pos == len(line) { // end of input - break - } - if unicode.IsSpace(ch) { // skip spaces - continue - } - - // Only keep going if the next word starts with -- - if ch != '-' || pos+1 == len(line) || rune(line[pos+1]) != '-' { - return line[pos:], words, nil - } - - phase = inWord // found something with "--", fall through - } - if (phase == inWord || phase == inQuote) && (pos == len(line)) { - if word != "--" && (blankOK || len(word) > 0) { - words = append(words, word) - } - break - } - if phase == inWord { - if unicode.IsSpace(ch) { - phase = inSpaces - if word == "--" { - return line[pos:], words, nil - } - if blankOK || len(word) > 0 { - words = append(words, word) - } - word = "" - blankOK = false - continue - } - if ch == '\'' || ch == '"' { - quote = ch - blankOK = true - phase = inQuote - continue - } - if ch == '\\' { - if pos+1 == len(line) { - continue // just skip \ at end - } - pos++ - ch = rune(line[pos]) - } - word += string(ch) - continue - } - if phase == inQuote { - if ch == quote { - phase = inWord - continue - } - if ch == '\\' { - if pos+1 == len(line) { - phase = inWord - continue // just skip \ at end - } - pos++ - ch = rune(line[pos]) - } - word += string(ch) - } - } - - return "", words, nil -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfile-line/Dockerfile b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfile-line/Dockerfile deleted file mode 100644 index c7601c9f692b..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfile-line/Dockerfile +++ /dev/null @@ -1,35 +0,0 @@ -# ESCAPE=\ - - - -FROM brimstone/ubuntu:14.04 - - -# TORUN -v /var/run/docker.sock:/var/run/docker.sock - - -ENV GOPATH \ -/go - - - -# Install the packages we need, clean up after them and us -RUN apt-get update \ - && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ - - - && apt-get install -y --no-install-recommends git golang ca-certificates \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists \ - - && go get -v github.com/brimstone/consuldock \ - && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \ - - && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ - && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ - && rm /tmp/dpkg.* \ - && rm -rf $GOPATH - - - - diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles-negative/env_no_value/Dockerfile b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles-negative/env_no_value/Dockerfile deleted file mode 100644 index 1d6557879465..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles-negative/env_no_value/Dockerfile +++ /dev/null @@ -1,3 +0,0 @@ -FROM busybox - -ENV PATH diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles-negative/shykes-nested-json/Dockerfile b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles-negative/shykes-nested-json/Dockerfile deleted file mode 100644 index d1be4596c7b0..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles-negative/shykes-nested-json/Dockerfile +++ /dev/null @@ -1 +0,0 @@ -CMD [ "echo", [ "nested json" ] ] diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/ADD-COPY-with-JSON/Dockerfile b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/ADD-COPY-with-JSON/Dockerfile deleted file mode 100644 index 035b4e8bb571..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/ADD-COPY-with-JSON/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM ubuntu:14.04 -LABEL maintainer Seongyeol Lim - -COPY . /go/src/github.com/docker/docker -ADD . / -ADD null / -COPY nullfile /tmp -ADD [ "vimrc", "/tmp" ] -COPY [ "bashrc", "/tmp" ] -COPY [ "test file", "/tmp" ] -ADD [ "test file", "/tmp/test file" ] diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/ADD-COPY-with-JSON/result b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/ADD-COPY-with-JSON/result deleted file mode 100644 index d1f71ecc5afc..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/ADD-COPY-with-JSON/result +++ /dev/null @@ -1,10 +0,0 @@ -(from "ubuntu:14.04") -(label "maintainer" "Seongyeol Lim ") -(copy "." "/go/src/github.com/docker/docker") -(add "." "/") -(add "null" "/") -(copy "nullfile" "/tmp") -(add "vimrc" "/tmp") -(copy "bashrc" "/tmp") -(copy "test file" "/tmp") -(add "test file" "/tmp/test file") diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/brimstone-consuldock/Dockerfile b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/brimstone-consuldock/Dockerfile deleted file mode 100644 index 9c0952acb0bd..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/brimstone-consuldock/Dockerfile +++ /dev/null @@ -1,26 +0,0 @@ -#escape=\ -FROM brimstone/ubuntu:14.04 - -LABEL maintainer brimstone@the.narro.ws - -# TORUN -v /var/run/docker.sock:/var/run/docker.sock - -ENV GOPATH /go - -# Set our command -ENTRYPOINT ["/usr/local/bin/consuldock"] - -# Install the packages we need, clean up after them and us -RUN apt-get update \ - && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ - && apt-get install -y --no-install-recommends git golang ca-certificates \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists \ - - && go get -v github.com/brimstone/consuldock \ - && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \ - - && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ - && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ - && rm /tmp/dpkg.* \ - && rm -rf $GOPATH diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/brimstone-consuldock/result b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/brimstone-consuldock/result deleted file mode 100644 index 3b45db62b466..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/brimstone-consuldock/result +++ /dev/null @@ -1,5 +0,0 @@ -(from "brimstone/ubuntu:14.04") -(label "maintainer" "brimstone@the.narro.ws") -(env "GOPATH" "/go") -(entrypoint "/usr/local/bin/consuldock") -(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates && apt-get clean && rm -rf /var/lib/apt/lists \t&& go get -v github.com/brimstone/consuldock && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.* \t&& rm -rf $GOPATH") diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/brimstone-docker-consul/Dockerfile b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/brimstone-docker-consul/Dockerfile deleted file mode 100644 index 25ae352166c9..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/brimstone-docker-consul/Dockerfile +++ /dev/null @@ -1,52 +0,0 @@ -FROM brimstone/ubuntu:14.04 - -CMD [] - -ENTRYPOINT ["/usr/bin/consul", "agent", "-server", "-data-dir=/consul", "-client=0.0.0.0", "-ui-dir=/webui"] - -EXPOSE 8500 8600 8400 8301 8302 - -RUN apt-get update \ - && apt-get install -y unzip wget \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists - -RUN cd /tmp \ - && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \ - -O web_ui.zip \ - && unzip web_ui.zip \ - && mv dist /webui \ - && rm web_ui.zip - -RUN apt-get update \ - && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ - && apt-get install -y --no-install-recommends unzip wget \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists \ - - && cd /tmp \ - && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \ - -O web_ui.zip \ - && unzip web_ui.zip \ - && mv dist /webui \ - && rm web_ui.zip \ - - && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ - && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ - && rm /tmp/dpkg.* - -ENV GOPATH /go - -RUN apt-get update \ - && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ - && apt-get install -y --no-install-recommends git golang ca-certificates build-essential \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists \ - - && go get -v github.com/hashicorp/consul \ - && mv $GOPATH/bin/consul /usr/bin/consul \ - - && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ - && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ - && rm /tmp/dpkg.* \ - && rm -rf $GOPATH diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/brimstone-docker-consul/result b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/brimstone-docker-consul/result deleted file mode 100644 index 16492e516ade..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/brimstone-docker-consul/result +++ /dev/null @@ -1,9 +0,0 @@ -(from "brimstone/ubuntu:14.04") -(cmd) -(entrypoint "/usr/bin/consul" "agent" "-server" "-data-dir=/consul" "-client=0.0.0.0" "-ui-dir=/webui") -(expose "8500" "8600" "8400" "8301" "8302") -(run "apt-get update && apt-get install -y unzip wget \t&& apt-get clean \t&& rm -rf /var/lib/apt/lists") -(run "cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip") -(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends unzip wget && apt-get clean && rm -rf /var/lib/apt/lists && cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.*") -(env "GOPATH" "/go") -(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates build-essential && apt-get clean && rm -rf /var/lib/apt/lists \t&& go get -v github.com/hashicorp/consul \t&& mv $GOPATH/bin/consul /usr/bin/consul \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.* \t&& rm -rf $GOPATH") diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/continue-at-eof/Dockerfile b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/continue-at-eof/Dockerfile deleted file mode 100644 index a8ec369ad1b9..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/continue-at-eof/Dockerfile +++ /dev/null @@ -1,3 +0,0 @@ -FROM alpine:3.5 - -RUN something \ \ No newline at end of file diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/continue-at-eof/result b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/continue-at-eof/result deleted file mode 100644 index 14e4f0932168..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/continue-at-eof/result +++ /dev/null @@ -1,2 +0,0 @@ -(from "alpine:3.5") -(run "something") diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/continueIndent/Dockerfile b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/continueIndent/Dockerfile deleted file mode 100644 index 42b324e77b54..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/continueIndent/Dockerfile +++ /dev/null @@ -1,36 +0,0 @@ -FROM ubuntu:14.04 - -RUN echo hello\ - world\ - goodnight \ - moon\ - light\ -ning -RUN echo hello \ - world -RUN echo hello \ -world -RUN echo hello \ -goodbye\ -frog -RUN echo hello \ -world -RUN echo hi \ - \ - world \ -\ - good\ -\ -night -RUN echo goodbye\ -frog -RUN echo good\ -bye\ -frog - -RUN echo hello \ -# this is a comment - -# this is a comment with a blank line surrounding it - -this is some more useful stuff diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/continueIndent/result b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/continueIndent/result deleted file mode 100644 index 268ae073c830..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/continueIndent/result +++ /dev/null @@ -1,10 +0,0 @@ -(from "ubuntu:14.04") -(run "echo hello world goodnight moon lightning") -(run "echo hello world") -(run "echo hello world") -(run "echo hello goodbyefrog") -(run "echo hello world") -(run "echo hi world goodnight") -(run "echo goodbyefrog") -(run "echo goodbyefrog") -(run "echo hello this is some more useful stuff") diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/cpuguy83-nagios/Dockerfile b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/cpuguy83-nagios/Dockerfile deleted file mode 100644 index 8ccb71a57894..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/cpuguy83-nagios/Dockerfile +++ /dev/null @@ -1,54 +0,0 @@ -FROM cpuguy83/ubuntu -ENV NAGIOS_HOME /opt/nagios -ENV NAGIOS_USER nagios -ENV NAGIOS_GROUP nagios -ENV NAGIOS_CMDUSER nagios -ENV NAGIOS_CMDGROUP nagios -ENV NAGIOSADMIN_USER nagiosadmin -ENV NAGIOSADMIN_PASS nagios -ENV APACHE_RUN_USER nagios -ENV APACHE_RUN_GROUP nagios -ENV NAGIOS_TIMEZONE UTC - -RUN sed -i 's/universe/universe multiverse/' /etc/apt/sources.list -RUN apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx -RUN ( egrep -i "^${NAGIOS_GROUP}" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i "^${NAGIOS_CMDGROUP}" /etc/group || groupadd $NAGIOS_CMDGROUP ) -RUN ( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER ) - -ADD http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3 /tmp/nagios.tar.gz -RUN cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf -ADD http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz /tmp/ -RUN cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install - -RUN sed -i.bak 's/.*\=www\-data//g' /etc/apache2/envvars -RUN export DOC_ROOT="DocumentRoot $(echo $NAGIOS_HOME/share)"; sed -i "s,DocumentRoot.*,$DOC_ROOT," /etc/apache2/sites-enabled/000-default - -RUN ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo - -RUN echo "use_timezone=$NAGIOS_TIMEZONE" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo "SetEnv TZ \"${NAGIOS_TIMEZONE}\"" >> /etc/apache2/conf.d/nagios.conf - -RUN mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs -RUN echo "cfg_dir=${NAGIOS_HOME}/etc/conf.d" >> ${NAGIOS_HOME}/etc/nagios.cfg -RUN echo "cfg_dir=${NAGIOS_HOME}/etc/monitor" >> ${NAGIOS_HOME}/etc/nagios.cfg -RUN download-mibs && echo "mibs +ALL" > /etc/snmp/snmp.conf - -RUN sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && \ - sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg -RUN cp /etc/services /var/spool/postfix/etc/ - -RUN mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix -ADD nagios.init /etc/sv/nagios/run -ADD apache.init /etc/sv/apache/run -ADD postfix.init /etc/sv/postfix/run -ADD postfix.stop /etc/sv/postfix/finish - -ADD start.sh /usr/local/bin/start_nagios - -ENV APACHE_LOCK_DIR /var/run -ENV APACHE_LOG_DIR /var/log/apache2 - -EXPOSE 80 - -VOLUME ["/opt/nagios/var", "/opt/nagios/etc", "/opt/nagios/libexec", "/var/log/apache2", "/usr/share/snmp/mibs"] - -CMD ["/usr/local/bin/start_nagios"] diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/cpuguy83-nagios/result b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/cpuguy83-nagios/result deleted file mode 100644 index 25dd3ddfe58a..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/cpuguy83-nagios/result +++ /dev/null @@ -1,40 +0,0 @@ -(from "cpuguy83/ubuntu") -(env "NAGIOS_HOME" "/opt/nagios") -(env "NAGIOS_USER" "nagios") -(env "NAGIOS_GROUP" "nagios") -(env "NAGIOS_CMDUSER" "nagios") -(env "NAGIOS_CMDGROUP" "nagios") -(env "NAGIOSADMIN_USER" "nagiosadmin") -(env "NAGIOSADMIN_PASS" "nagios") -(env "APACHE_RUN_USER" "nagios") -(env "APACHE_RUN_GROUP" "nagios") -(env "NAGIOS_TIMEZONE" "UTC") -(run "sed -i 's/universe/universe multiverse/' /etc/apt/sources.list") -(run "apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx") -(run "( egrep -i \"^${NAGIOS_GROUP}\" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i \"^${NAGIOS_CMDGROUP}\" /etc/group || groupadd $NAGIOS_CMDGROUP )") -(run "( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER )") -(add "http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3" "/tmp/nagios.tar.gz") -(run "cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf") -(add "http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz" "/tmp/") -(run "cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install") -(run "sed -i.bak 's/.*\\=www\\-data//g' /etc/apache2/envvars") -(run "export DOC_ROOT=\"DocumentRoot $(echo $NAGIOS_HOME/share)\"; sed -i \"s,DocumentRoot.*,$DOC_ROOT,\" /etc/apache2/sites-enabled/000-default") -(run "ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo") -(run "echo \"use_timezone=$NAGIOS_TIMEZONE\" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo \"SetEnv TZ \\\"${NAGIOS_TIMEZONE}\\\"\" >> /etc/apache2/conf.d/nagios.conf") -(run "mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs") -(run "echo \"cfg_dir=${NAGIOS_HOME}/etc/conf.d\" >> ${NAGIOS_HOME}/etc/nagios.cfg") -(run "echo \"cfg_dir=${NAGIOS_HOME}/etc/monitor\" >> ${NAGIOS_HOME}/etc/nagios.cfg") -(run "download-mibs && echo \"mibs +ALL\" > /etc/snmp/snmp.conf") -(run "sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg") -(run "cp /etc/services /var/spool/postfix/etc/") -(run "mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix") -(add "nagios.init" "/etc/sv/nagios/run") -(add "apache.init" "/etc/sv/apache/run") -(add "postfix.init" "/etc/sv/postfix/run") -(add "postfix.stop" "/etc/sv/postfix/finish") -(add "start.sh" "/usr/local/bin/start_nagios") -(env "APACHE_LOCK_DIR" "/var/run") -(env "APACHE_LOG_DIR" "/var/log/apache2") -(expose "80") -(volume "/opt/nagios/var" "/opt/nagios/etc" "/opt/nagios/libexec" "/var/log/apache2" "/usr/share/snmp/mibs") -(cmd "/usr/local/bin/start_nagios") diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/docker/Dockerfile b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/docker/Dockerfile deleted file mode 100644 index cd327aa499e0..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/docker/Dockerfile +++ /dev/null @@ -1,94 +0,0 @@ -# This file describes the standard way to build Docker, using docker -# -# Usage: -# -# # Assemble the full dev environment. This is slow the first time. -# docker build -t docker . -# -# # Mount your source in an interactive container for quick testing: -# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash -# -# # Run the test suite: -# docker run --privileged docker hack/make.sh test-unit test-integration test-docker-py -# -# Note: AppArmor used to mess with privileged mode, but this is no longer -# the case. Therefore, you don't have to disable it anymore. -# - -FROM ubuntu:14.04 -LABEL maintainer Tianon Gravi (@tianon) - -# Packaged dependencies -RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \ - apt-utils \ - aufs-tools \ - automake \ - btrfs-tools \ - build-essential \ - curl \ - dpkg-sig \ - git \ - iptables \ - libapparmor-dev \ - libcap-dev \ - mercurial \ - pandoc \ - parallel \ - reprepro \ - ruby1.9.1 \ - ruby1.9.1-dev \ - s3cmd=1.1.0* \ - --no-install-recommends - -# Get lvm2 source for compiling statically -RUN git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103 -# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags -# note: we don't use "git clone -b" above because it then spews big nasty warnings about 'detached HEAD' state that we can't silence as easily as we can silence them using "git checkout" directly - -# Compile and install lvm2 -RUN cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper -# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL - -# Install Go -RUN curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz -ENV PATH /usr/local/go/bin:$PATH -ENV GOPATH /go:/go/src/github.com/docker/docker/vendor -RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1 - -# Compile Go for cross compilation -ENV DOCKER_CROSSPLATFORMS \ - linux/386 linux/arm \ - darwin/amd64 darwin/386 \ - freebsd/amd64 freebsd/386 freebsd/arm -# (set an explicit GOARM of 5 for maximum compatibility) -ENV GOARM 5 -RUN cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done' - -# Grab Go's cover tool for dead-simple code coverage testing -RUN go get golang.org/x/tools/cmd/cover - -# TODO replace FPM with some very minimal debhelper stuff -RUN gem install --no-rdoc --no-ri fpm --version 1.0.2 - -# Get the "busybox" image source so we can build locally instead of pulling -RUN git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox - -# Setup s3cmd config -RUN /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg - -# Set user.email so crosbymichael's in-container merge commits go smoothly -RUN git config --global user.email 'docker-dummy@example.com' - -# Add an unprivileged user to be used for tests which need it -RUN groupadd -r docker -RUN useradd --create-home --gid docker unprivilegeduser - -VOLUME /var/lib/docker -WORKDIR /go/src/github.com/docker/docker -ENV DOCKER_BUILDTAGS apparmor selinux - -# Wrap all commands in the "docker-in-docker" script to allow nested containers -ENTRYPOINT ["hack/dind"] - -# Upload docker source -COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/docker/result b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/docker/result deleted file mode 100644 index 0c2f22991621..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/docker/result +++ /dev/null @@ -1,24 +0,0 @@ -(from "ubuntu:14.04") -(label "maintainer" "Tianon Gravi (@tianon)") -(run "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \tapt-utils \taufs-tools \tautomake \tbtrfs-tools \tbuild-essential \tcurl \tdpkg-sig \tgit \tiptables \tlibapparmor-dev \tlibcap-dev \tmercurial \tpandoc \tparallel \treprepro \truby1.9.1 \truby1.9.1-dev \ts3cmd=1.1.0* \t--no-install-recommends") -(run "git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103") -(run "cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper") -(run "curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz") -(env "PATH" "/usr/local/go/bin:$PATH") -(env "GOPATH" "/go:/go/src/github.com/docker/docker/vendor") -(run "cd /usr/local/go/src && ./make.bash --no-clean 2>&1") -(env "DOCKER_CROSSPLATFORMS" "linux/386 linux/arm \tdarwin/amd64 darwin/386 \tfreebsd/amd64 freebsd/386 freebsd/arm") -(env "GOARM" "5") -(run "cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'") -(run "go get golang.org/x/tools/cmd/cover") -(run "gem install --no-rdoc --no-ri fpm --version 1.0.2") -(run "git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox") -(run "/bin/echo -e '[default]\\naccess_key=$AWS_ACCESS_KEY\\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg") -(run "git config --global user.email 'docker-dummy@example.com'") -(run "groupadd -r docker") -(run "useradd --create-home --gid docker unprivilegeduser") -(volume "/var/lib/docker") -(workdir "/go/src/github.com/docker/docker") -(env "DOCKER_BUILDTAGS" "apparmor selinux") -(entrypoint "hack/dind") -(copy "." "/go/src/github.com/docker/docker") diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/env/Dockerfile b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/env/Dockerfile deleted file mode 100644 index 08fa18acecdb..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/env/Dockerfile +++ /dev/null @@ -1,23 +0,0 @@ -FROM ubuntu -ENV name value -ENV name=value -ENV name=value name2=value2 -ENV name="value value1" -ENV name=value\ value2 -ENV name="value'quote space'value2" -ENV name='value"double quote"value2' -ENV name=value\ value2 name2=value2\ value3 -ENV name="a\"b" -ENV name="a\'b" -ENV name='a\'b' -ENV name='a\'b'' -ENV name='a\"b' -ENV name="''" -# don't put anything after the next line - it must be the last line of the -# Dockerfile and it must end with \ -ENV name=value \ - name1=value1 \ - name2="value2a \ - value2b" \ - name3="value3a\n\"value3b\"" \ - name4="value4a\\nvalue4b" \ diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/env/result b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/env/result deleted file mode 100644 index ba0a6dd7cb42..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/env/result +++ /dev/null @@ -1,16 +0,0 @@ -(from "ubuntu") -(env "name" "value") -(env "name" "value") -(env "name" "value" "name2" "value2") -(env "name" "\"value value1\"") -(env "name" "value\\ value2") -(env "name" "\"value'quote space'value2\"") -(env "name" "'value\"double quote\"value2'") -(env "name" "value\\ value2" "name2" "value2\\ value3") -(env "name" "\"a\\\"b\"") -(env "name" "\"a\\'b\"") -(env "name" "'a\\'b'") -(env "name" "'a\\'b''") -(env "name" "'a\\\"b'") -(env "name" "\"''\"") -(env "name" "value" "name1" "value1" "name2" "\"value2a value2b\"" "name3" "\"value3a\\n\\\"value3b\\\"\"" "name4" "\"value4a\\\\nvalue4b\"") diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/escape-after-comment/Dockerfile b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/escape-after-comment/Dockerfile deleted file mode 100644 index 18e9a474f2db..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/escape-after-comment/Dockerfile +++ /dev/null @@ -1,9 +0,0 @@ -# Comment here. Should not be looking for the following parser directive. -# Hence the following line will be ignored, and the subsequent backslash -# continuation will be the default. -# escape = ` - -FROM image -LABEL maintainer foo@bar.com -ENV GOPATH \ -\go \ No newline at end of file diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/escape-after-comment/result b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/escape-after-comment/result deleted file mode 100644 index 9ab119c414af..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/escape-after-comment/result +++ /dev/null @@ -1,3 +0,0 @@ -(from "image") -(label "maintainer" "foo@bar.com") -(env "GOPATH" "\\go") diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/escape-nonewline/Dockerfile b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/escape-nonewline/Dockerfile deleted file mode 100644 index 366ee3c36b62..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/escape-nonewline/Dockerfile +++ /dev/null @@ -1,7 +0,0 @@ -# escape = `` -# There is no white space line after the directives. This still succeeds, but goes -# against best practices. -FROM image -LABEL maintainer foo@bar.com -ENV GOPATH ` -\go \ No newline at end of file diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/escape-nonewline/result b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/escape-nonewline/result deleted file mode 100644 index 9ab119c414af..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/escape-nonewline/result +++ /dev/null @@ -1,3 +0,0 @@ -(from "image") -(label "maintainer" "foo@bar.com") -(env "GOPATH" "\\go") diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/escape/Dockerfile b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/escape/Dockerfile deleted file mode 100644 index a515af152d7b..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/escape/Dockerfile +++ /dev/null @@ -1,6 +0,0 @@ -#escape = ` - -FROM image -LABEL maintainer foo@bar.com -ENV GOPATH ` -\go \ No newline at end of file diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/escape/result b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/escape/result deleted file mode 100644 index 9ab119c414af..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/escape/result +++ /dev/null @@ -1,3 +0,0 @@ -(from "image") -(label "maintainer" "foo@bar.com") -(env "GOPATH" "\\go") diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/escapes/Dockerfile b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/escapes/Dockerfile deleted file mode 100644 index 03062394ae25..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/escapes/Dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -FROM ubuntu:14.04 -LABEL maintainer Erik \\Hollensbe \" - -RUN apt-get \update && \ - apt-get \"install znc -y -ADD \conf\\" /.znc - -RUN foo \ - -bar \ - -baz - -CMD [ "\/usr\\\"/bin/znc", "-f", "-r" ] diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/escapes/result b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/escapes/result deleted file mode 100644 index 98e3e3b73732..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/escapes/result +++ /dev/null @@ -1,6 +0,0 @@ -(from "ubuntu:14.04") -(label "maintainer" "Erik \\\\Hollensbe \\\"") -(run "apt-get \\update && apt-get \\\"install znc -y") -(add "\\conf\\\\\"" "/.znc") -(run "foo bar baz") -(cmd "/usr\\\"/bin/znc" "-f" "-r") diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/flags/Dockerfile b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/flags/Dockerfile deleted file mode 100644 index 2418e0f069b9..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/flags/Dockerfile +++ /dev/null @@ -1,10 +0,0 @@ -FROM scratch -COPY foo /tmp/ -COPY --user=me foo /tmp/ -COPY --doit=true foo /tmp/ -COPY --user=me --doit=true foo /tmp/ -COPY --doit=true -- foo /tmp/ -COPY -- foo /tmp/ -CMD --doit [ "a", "b" ] -CMD --doit=true -- [ "a", "b" ] -CMD --doit -- [ ] diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/flags/result b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/flags/result deleted file mode 100644 index 4578f4cba4b8..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/flags/result +++ /dev/null @@ -1,10 +0,0 @@ -(from "scratch") -(copy "foo" "/tmp/") -(copy ["--user=me"] "foo" "/tmp/") -(copy ["--doit=true"] "foo" "/tmp/") -(copy ["--user=me" "--doit=true"] "foo" "/tmp/") -(copy ["--doit=true"] "foo" "/tmp/") -(copy "foo" "/tmp/") -(cmd ["--doit"] "a" "b") -(cmd ["--doit=true"] "a" "b") -(cmd ["--doit"]) diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/health/Dockerfile b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/health/Dockerfile deleted file mode 100644 index 081e4428820a..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/health/Dockerfile +++ /dev/null @@ -1,10 +0,0 @@ -FROM debian -ADD check.sh main.sh /app/ -CMD /app/main.sh -HEALTHCHECK -HEALTHCHECK --interval=5s --timeout=3s --retries=3 \ - CMD /app/check.sh --quiet -HEALTHCHECK CMD -HEALTHCHECK CMD a b -HEALTHCHECK --timeout=3s CMD ["foo"] -HEALTHCHECK CONNECT TCP 7000 diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/health/result b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/health/result deleted file mode 100644 index 092924f88c5c..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/health/result +++ /dev/null @@ -1,9 +0,0 @@ -(from "debian") -(add "check.sh" "main.sh" "/app/") -(cmd "/app/main.sh") -(healthcheck) -(healthcheck ["--interval=5s" "--timeout=3s" "--retries=3"] "CMD" "/app/check.sh --quiet") -(healthcheck "CMD") -(healthcheck "CMD" "a b") -(healthcheck ["--timeout=3s"] "CMD" "foo") -(healthcheck "CONNECT" "TCP 7000") diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/influxdb/Dockerfile b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/influxdb/Dockerfile deleted file mode 100644 index 587fb9b54bed..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/influxdb/Dockerfile +++ /dev/null @@ -1,15 +0,0 @@ -FROM ubuntu:14.04 - -RUN apt-get update && apt-get install wget -y -RUN wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb -RUN dpkg -i influxdb_latest_amd64.deb -RUN rm -r /opt/influxdb/shared - -VOLUME /opt/influxdb/shared - -CMD /usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml - -EXPOSE 8083 -EXPOSE 8086 -EXPOSE 8090 -EXPOSE 8099 diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/influxdb/result b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/influxdb/result deleted file mode 100644 index 0998e87e638b..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/influxdb/result +++ /dev/null @@ -1,11 +0,0 @@ -(from "ubuntu:14.04") -(run "apt-get update && apt-get install wget -y") -(run "wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb") -(run "dpkg -i influxdb_latest_amd64.deb") -(run "rm -r /opt/influxdb/shared") -(volume "/opt/influxdb/shared") -(cmd "/usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml") -(expose "8083") -(expose "8086") -(expose "8090") -(expose "8099") diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile deleted file mode 100644 index 39fe27d99c67..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile +++ /dev/null @@ -1 +0,0 @@ -CMD "[\"echo\", \"Phew, I just managed to escaped those double quotes\"]" diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result deleted file mode 100644 index afc220c2a7bd..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result +++ /dev/null @@ -1 +0,0 @@ -(cmd "\"[\\\"echo\\\", \\\"Phew, I just managed to escaped those double quotes\\\"]\"") diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile deleted file mode 100644 index eaae081a06b5..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile +++ /dev/null @@ -1 +0,0 @@ -CMD '["echo", "Well, JSON in a string is JSON too?"]' diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/result b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/result deleted file mode 100644 index 484804e2b2e9..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/result +++ /dev/null @@ -1 +0,0 @@ -(cmd "'[\"echo\", \"Well, JSON in a string is JSON too?\"]'") diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile deleted file mode 100644 index c3ac63c07a04..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile +++ /dev/null @@ -1 +0,0 @@ -CMD ['echo','single quotes are invalid JSON'] diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/result b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/result deleted file mode 100644 index 614789120746..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/result +++ /dev/null @@ -1 +0,0 @@ -(cmd "['echo','single quotes are invalid JSON']") diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile deleted file mode 100644 index 5fd4afa52204..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile +++ /dev/null @@ -1 +0,0 @@ -CMD ["echo", "Please, close the brackets when you're done" diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result deleted file mode 100644 index 1ffbb8ff85bf..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result +++ /dev/null @@ -1 +0,0 @@ -(cmd "[\"echo\", \"Please, close the brackets when you're done\"") diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile deleted file mode 100644 index 30cc4bb48fee..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile +++ /dev/null @@ -1 +0,0 @@ -CMD ["echo", "look ma, no quote!] diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/result b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/result deleted file mode 100644 index 32048147b516..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/result +++ /dev/null @@ -1 +0,0 @@ -(cmd "[\"echo\", \"look ma, no quote!]") diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/json/Dockerfile b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/json/Dockerfile deleted file mode 100644 index a586917110f4..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/json/Dockerfile +++ /dev/null @@ -1,8 +0,0 @@ -CMD [] -CMD [""] -CMD ["a"] -CMD ["a","b"] -CMD [ "a", "b" ] -CMD [ "a", "b" ] -CMD [ "a", "b" ] -CMD ["abc 123", "♥", "☃", "\" \\ \/ \b \f \n \r \t \u0000"] diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/json/result b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/json/result deleted file mode 100644 index c6553e6e1af2..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/json/result +++ /dev/null @@ -1,8 +0,0 @@ -(cmd) -(cmd "") -(cmd "a") -(cmd "a" "b") -(cmd "a" "b") -(cmd "a" "b") -(cmd "a" "b") -(cmd "abc 123" "♥" "☃" "\" \\ / \b \f \n \r \t \x00") diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/kartar-entrypoint-oddities/Dockerfile b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/kartar-entrypoint-oddities/Dockerfile deleted file mode 100644 index 728ec9a78782..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/kartar-entrypoint-oddities/Dockerfile +++ /dev/null @@ -1,7 +0,0 @@ -FROM ubuntu:14.04 -LABEL maintainer James Turnbull "james@example.com" -ENV REFRESHED_AT 2014-06-01 -RUN apt-get update -RUN apt-get -y install redis-server redis-tools -EXPOSE 6379 -ENTRYPOINT [ "/usr/bin/redis-server" ] diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/kartar-entrypoint-oddities/result b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/kartar-entrypoint-oddities/result deleted file mode 100644 index e774bc4f972c..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/kartar-entrypoint-oddities/result +++ /dev/null @@ -1,7 +0,0 @@ -(from "ubuntu:14.04") -(label "maintainer" "James Turnbull \"james@example.com\"") -(env "REFRESHED_AT" "2014-06-01") -(run "apt-get update") -(run "apt-get -y install redis-server redis-tools") -(expose "6379") -(entrypoint "/usr/bin/redis-server") diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile deleted file mode 100644 index 27f28cb92150..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile +++ /dev/null @@ -1,48 +0,0 @@ -FROM busybox:buildroot-2014.02 - -LABEL maintainer docker - -ONBUILD RUN ["echo", "test"] -ONBUILD RUN echo test -ONBUILD COPY . / - - -# RUN Commands \ -# linebreak in comment \ -RUN ["ls", "-la"] -RUN ["echo", "'1234'"] -RUN echo "1234" -RUN echo 1234 -RUN echo '1234' && \ - echo "456" && \ - echo 789 -RUN sh -c 'echo root:testpass \ - > /tmp/passwd' -RUN mkdir -p /test /test2 /test3/test - -# ENV \ -ENV SCUBA 1 DUBA 3 -ENV SCUBA "1 DUBA 3" - -# CMD \ -CMD ["echo", "test"] -CMD echo test -CMD echo "test" -CMD echo 'test' -CMD echo 'test' | wc - - -#EXPOSE\ -EXPOSE 3000 -EXPOSE 9000 5000 6000 - -USER docker -USER docker:root - -VOLUME ["/test"] -VOLUME ["/test", "/test2"] -VOLUME /test3 - -WORKDIR /test - -ADD . / -COPY . copy diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/result b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/result deleted file mode 100644 index 8a499ff9488b..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/result +++ /dev/null @@ -1,29 +0,0 @@ -(from "busybox:buildroot-2014.02") -(label "maintainer" "docker ") -(onbuild (run "echo" "test")) -(onbuild (run "echo test")) -(onbuild (copy "." "/")) -(run "ls" "-la") -(run "echo" "'1234'") -(run "echo \"1234\"") -(run "echo 1234") -(run "echo '1234' && echo \"456\" && echo 789") -(run "sh -c 'echo root:testpass > /tmp/passwd'") -(run "mkdir -p /test /test2 /test3/test") -(env "SCUBA" "1 DUBA 3") -(env "SCUBA" "\"1 DUBA 3\"") -(cmd "echo" "test") -(cmd "echo test") -(cmd "echo \"test\"") -(cmd "echo 'test'") -(cmd "echo 'test' | wc -") -(expose "3000") -(expose "9000" "5000" "6000") -(user "docker") -(user "docker:root") -(volume "/test") -(volume "/test" "/test2") -(volume "/test3") -(workdir "/test") -(add "." "/") -(copy "." "copy") diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/mail/Dockerfile b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/mail/Dockerfile deleted file mode 100644 index f64c1168c18b..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/mail/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -FROM ubuntu:14.04 - -RUN apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y -ADD .muttrc / -ADD .offlineimaprc / -ADD .tmux.conf / -ADD mutt /.mutt -ADD vim /.vim -ADD vimrc /.vimrc -ADD crontab /etc/crontab -RUN chmod 644 /etc/crontab -RUN mkdir /Mail -RUN mkdir /.offlineimap -RUN echo "export TERM=screen-256color" >/.zshenv - -CMD setsid cron; tmux -2 diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/mail/result b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/mail/result deleted file mode 100644 index a0efcf04b699..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/mail/result +++ /dev/null @@ -1,14 +0,0 @@ -(from "ubuntu:14.04") -(run "apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y") -(add ".muttrc" "/") -(add ".offlineimaprc" "/") -(add ".tmux.conf" "/") -(add "mutt" "/.mutt") -(add "vim" "/.vim") -(add "vimrc" "/.vimrc") -(add "crontab" "/etc/crontab") -(run "chmod 644 /etc/crontab") -(run "mkdir /Mail") -(run "mkdir /.offlineimap") -(run "echo \"export TERM=screen-256color\" >/.zshenv") -(cmd "setsid cron; tmux -2") diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/multiple-volumes/Dockerfile b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/multiple-volumes/Dockerfile deleted file mode 100644 index 57bb5976a3f1..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/multiple-volumes/Dockerfile +++ /dev/null @@ -1,3 +0,0 @@ -FROM foo - -VOLUME /opt/nagios/var /opt/nagios/etc /opt/nagios/libexec /var/log/apache2 /usr/share/snmp/mibs diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/multiple-volumes/result b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/multiple-volumes/result deleted file mode 100644 index 18dbdeeaa048..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/multiple-volumes/result +++ /dev/null @@ -1,2 +0,0 @@ -(from "foo") -(volume "/opt/nagios/var" "/opt/nagios/etc" "/opt/nagios/libexec" "/var/log/apache2" "/usr/share/snmp/mibs") diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/mumble/Dockerfile b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/mumble/Dockerfile deleted file mode 100644 index 5b9ec06a6c38..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/mumble/Dockerfile +++ /dev/null @@ -1,7 +0,0 @@ -FROM ubuntu:14.04 - -RUN apt-get update && apt-get install libcap2-bin mumble-server -y - -ADD ./mumble-server.ini /etc/mumble-server.ini - -CMD /usr/sbin/murmurd diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/mumble/result b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/mumble/result deleted file mode 100644 index a0036a943e86..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/mumble/result +++ /dev/null @@ -1,4 +0,0 @@ -(from "ubuntu:14.04") -(run "apt-get update && apt-get install libcap2-bin mumble-server -y") -(add "./mumble-server.ini" "/etc/mumble-server.ini") -(cmd "/usr/sbin/murmurd") diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/nginx/Dockerfile b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/nginx/Dockerfile deleted file mode 100644 index 0a35e2c6b2f0..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/nginx/Dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -FROM ubuntu:14.04 -LABEL maintainer Erik Hollensbe - -RUN apt-get update && apt-get install nginx-full -y -RUN rm -rf /etc/nginx -ADD etc /etc/nginx -RUN chown -R root:root /etc/nginx -RUN /usr/sbin/nginx -qt -RUN mkdir /www - -CMD ["/usr/sbin/nginx"] - -VOLUME /www -EXPOSE 80 diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/nginx/result b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/nginx/result deleted file mode 100644 index a895fadbbe3b..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/nginx/result +++ /dev/null @@ -1,11 +0,0 @@ -(from "ubuntu:14.04") -(label "maintainer" "Erik Hollensbe ") -(run "apt-get update && apt-get install nginx-full -y") -(run "rm -rf /etc/nginx") -(add "etc" "/etc/nginx") -(run "chown -R root:root /etc/nginx") -(run "/usr/sbin/nginx -qt") -(run "mkdir /www") -(cmd "/usr/sbin/nginx") -(volume "/www") -(expose "80") diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/tf2/Dockerfile b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/tf2/Dockerfile deleted file mode 100644 index 72b79bdd7d4d..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/tf2/Dockerfile +++ /dev/null @@ -1,23 +0,0 @@ -FROM ubuntu:12.04 - -EXPOSE 27015 -EXPOSE 27005 -EXPOSE 26901 -EXPOSE 27020 - -RUN apt-get update && apt-get install libc6-dev-i386 curl unzip -y -RUN mkdir -p /steam -RUN curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam -ADD ./script /steam/script -RUN /steam/steamcmd.sh +runscript /steam/script -RUN curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf -RUN curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf -ADD ./server.cfg /steam/tf2/tf/cfg/server.cfg -ADD ./ctf_2fort.cfg /steam/tf2/tf/cfg/ctf_2fort.cfg -ADD ./sourcemod.cfg /steam/tf2/tf/cfg/sourcemod/sourcemod.cfg -RUN rm -r /steam/tf2/tf/addons/sourcemod/configs -ADD ./configs /steam/tf2/tf/addons/sourcemod/configs -RUN mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en -RUN cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en - -CMD cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/tf2/result b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/tf2/result deleted file mode 100644 index d4f94cd8be01..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/tf2/result +++ /dev/null @@ -1,20 +0,0 @@ -(from "ubuntu:12.04") -(expose "27015") -(expose "27005") -(expose "26901") -(expose "27020") -(run "apt-get update && apt-get install libc6-dev-i386 curl unzip -y") -(run "mkdir -p /steam") -(run "curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam") -(add "./script" "/steam/script") -(run "/steam/steamcmd.sh +runscript /steam/script") -(run "curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf") -(run "curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf") -(add "./server.cfg" "/steam/tf2/tf/cfg/server.cfg") -(add "./ctf_2fort.cfg" "/steam/tf2/tf/cfg/ctf_2fort.cfg") -(add "./sourcemod.cfg" "/steam/tf2/tf/cfg/sourcemod/sourcemod.cfg") -(run "rm -r /steam/tf2/tf/addons/sourcemod/configs") -(add "./configs" "/steam/tf2/tf/addons/sourcemod/configs") -(run "mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en") -(run "cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en") -(cmd "cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill") diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/weechat/Dockerfile b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/weechat/Dockerfile deleted file mode 100644 index 4842088166ce..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/weechat/Dockerfile +++ /dev/null @@ -1,9 +0,0 @@ -FROM ubuntu:14.04 - -RUN apt-get update -qy && apt-get install tmux zsh weechat-curses -y - -ADD .weechat /.weechat -ADD .tmux.conf / -RUN echo "export TERM=screen-256color" >/.zshenv - -CMD zsh -c weechat diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/weechat/result b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/weechat/result deleted file mode 100644 index c3abb4c54f15..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/weechat/result +++ /dev/null @@ -1,6 +0,0 @@ -(from "ubuntu:14.04") -(run "apt-get update -qy && apt-get install tmux zsh weechat-curses -y") -(add ".weechat" "/.weechat") -(add ".tmux.conf" "/") -(run "echo \"export TERM=screen-256color\" >/.zshenv") -(cmd "zsh -c weechat") diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/znc/Dockerfile b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/znc/Dockerfile deleted file mode 100644 index 626b126d8acb..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/znc/Dockerfile +++ /dev/null @@ -1,7 +0,0 @@ -FROM ubuntu:14.04 -LABEL maintainer Erik Hollensbe - -RUN apt-get update && apt-get install znc -y -ADD conf /.znc - -CMD [ "/usr/bin/znc", "-f", "-r" ] diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/znc/result b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/znc/result deleted file mode 100644 index bfc7f65135a9..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/testfiles/znc/result +++ /dev/null @@ -1,5 +0,0 @@ -(from "ubuntu:14.04") -(label "maintainer" "Erik Hollensbe ") -(run "apt-get update && apt-get install znc -y") -(add "conf" "/.znc") -(cmd "/usr/bin/znc" "-f" "-r") diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/release/experimental/tags b/vendor/github.com/moby/buildkit/frontend/dockerfile/release/experimental/tags deleted file mode 100644 index 194ee033ddad..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/release/experimental/tags +++ /dev/null @@ -1 +0,0 @@ -dfrunmount dfsecrets dfssh \ No newline at end of file diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/release/mainline/tags b/vendor/github.com/moby/buildkit/frontend/dockerfile/release/mainline/tags deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/release/mounts/tags b/vendor/github.com/moby/buildkit/frontend/dockerfile/release/mounts/tags deleted file mode 100644 index ea8a0085b958..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/release/mounts/tags +++ /dev/null @@ -1 +0,0 @@ -dfrunmount \ No newline at end of file diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/release/secrets/tags b/vendor/github.com/moby/buildkit/frontend/dockerfile/release/secrets/tags deleted file mode 100644 index e31407fa78e5..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/release/secrets/tags +++ /dev/null @@ -1 +0,0 @@ -dfrunmount dfsecrets \ No newline at end of file diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/release/ssh/tags b/vendor/github.com/moby/buildkit/frontend/dockerfile/release/ssh/tags deleted file mode 100644 index 4d46af2b8330..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/release/ssh/tags +++ /dev/null @@ -1 +0,0 @@ -dfrunmount dfssh \ No newline at end of file diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/envVarTest b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/envVarTest deleted file mode 100644 index 08011801c565..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/envVarTest +++ /dev/null @@ -1,232 +0,0 @@ -A|hello | hello -A|he'll'o | hello -A|he'llo | error -A|he\'llo | he'llo -A|he\\'llo | error -A|abc\tdef | abctdef -A|"abc\tdef" | abc\tdef -A|"abc\\tdef" | abc\tdef -A|'abc\tdef' | abc\tdef -A|hello\ | hello -A|hello\\ | hello\ -A|"hello | error -A|"hello\" | error -A|"hel'lo" | hel'lo -A|'hello | error -A|'hello\' | hello\ -A|'hello\there' | hello\there -A|'hello\\there' | hello\\there -A|"''" | '' -A|$. | $. -A|he$1x | hex -A|he$.x | he$.x -# Next one is different on Windows as $pwd==$PWD -U|he$pwd. | he. -W|he$pwd. | he/home. -A|he$PWD | he/home -A|he\$PWD | he$PWD -A|he\\$PWD | he\/home -A|"he\$PWD" | he$PWD -A|"he\\$PWD" | he\/home -A|\${} | ${} -A|\${}aaa | ${}aaa -A|he\${} | he${} -A|he\${}xx | he${}xx -A|${} | error -A|${}aaa | error -A|he${} | error -A|he${}xx | error -A|he${hi} | he -A|he${hi}xx | hexx -A|he${PWD} | he/home -A|he${.} | error -A|he${XXX:-000}xx | he000xx -A|he${PWD:-000}xx | he/homexx -A|he${XXX:-$PWD}xx | he/homexx -A|he${XXX:-${PWD:-yyy}}xx | he/homexx -A|he${XXX:-${YYY:-yyy}}xx | heyyyxx -A|he${XXX:YYY} | error -A|he${XXX:+${PWD}}xx | hexx -A|he${PWD:+${XXX}}xx | hexx -A|he${PWD:+${SHELL}}xx | hebashxx -A|he${XXX:+000}xx | hexx -A|he${PWD:+000}xx | he000xx -A|'he${XX}' | he${XX} -A|"he${PWD}" | he/home -A|"he'$PWD'" | he'/home' -A|"$PWD" | /home -A|'$PWD' | $PWD -A|'\$PWD' | \$PWD -A|'"hello"' | "hello" -A|he\$PWD | he$PWD -A|"he\$PWD" | he$PWD -A|'he\$PWD' | he\$PWD -A|he${PWD | error -A|he${PWD:=000}xx | error -A|he${PWD:+${PWD}:}xx | he/home:xx -A|he${XXX:-\$PWD:}xx | he$PWD:xx -A|he${XXX:-\${PWD}z}xx | he${PWDz}xx -A|안녕하세요 | 안녕하세요 -A|안'녕'하세요 | 안녕하세요 -A|안'녕하세요 | error -A|안녕\'하세요 | 안녕'하세요 -A|안\\'녕하세요 | error -A|안녕\t하세요 | 안녕t하세요 -A|"안녕\t하세요" | 안녕\t하세요 -A|'안녕\t하세요 | error -A|안녕하세요\ | 안녕하세요 -A|안녕하세요\\ | 안녕하세요\ -A|"안녕하세요 | error -A|"안녕하세요\" | error -A|"안녕'하세요" | 안녕'하세요 -A|'안녕하세요 | error -A|'안녕하세요\' | 안녕하세요\ -A|안녕$1x | 안녕x -A|안녕$.x | 안녕$.x -# Next one is different on Windows as $pwd==$PWD -U|안녕$pwd. | 안녕. -W|안녕$pwd. | 안녕/home. -A|안녕$PWD | 안녕/home -A|안녕\$PWD | 안녕$PWD -A|안녕\\$PWD | 안녕\/home -A|안녕\${} | 안녕${} -A|안녕\${}xx | 안녕${}xx -A|안녕${} | error -A|안녕${}xx | error -A|안녕${hi} | 안녕 -A|안녕${hi}xx | 안녕xx -A|안녕${PWD} | 안녕/home -A|안녕${.} | error -A|안녕${XXX:-000}xx | 안녕000xx -A|안녕${PWD:-000}xx | 안녕/homexx -A|안녕${XXX:-$PWD}xx | 안녕/homexx -A|안녕${XXX:-${PWD:-yyy}}xx | 안녕/homexx -A|안녕${XXX:-${YYY:-yyy}}xx | 안녕yyyxx -A|안녕${XXX:YYY} | error -A|안녕${XXX:+${PWD}}xx | 안녕xx -A|안녕${PWD:+${XXX}}xx | 안녕xx -A|안녕${PWD:+${SHELL}}xx | 안녕bashxx -A|안녕${XXX:+000}xx | 안녕xx -A|안녕${PWD:+000}xx | 안녕000xx -A|'안녕${XX}' | 안녕${XX} -A|"안녕${PWD}" | 안녕/home -A|"안녕'$PWD'" | 안녕'/home' -A|'"안녕"' | "안녕" -A|안녕\$PWD | 안녕$PWD -A|"안녕\$PWD" | 안녕$PWD -A|'안녕\$PWD' | 안녕\$PWD -A|안녕${PWD | error -A|안녕${PWD:=000}xx | error -A|안녕${PWD:+${PWD}:}xx | 안녕/home:xx -A|안녕${XXX:-\$PWD:}xx | 안녕$PWD:xx -A|안녕${XXX:-\${PWD}z}xx | 안녕${PWDz}xx -A|$KOREAN | 한국어 -A|안녕$KOREAN | 안녕한국어 -A|${{aaa} | error -A|${aaa}} | } -A|${aaa | error -A|${{aaa:-bbb} | error -A|${aaa:-bbb}} | bbb} -A|${aaa:-bbb | error -A|${aaa:-bbb} | bbb -A|${aaa:-${bbb:-ccc}} | ccc -A|${aaa:-bbb ${foo} | error -A|${aaa:-bbb {foo} | bbb {foo -A|${:} | error -A|${:-bbb} | error -A|${:+bbb} | error - -# Positional parameters won't be set: -# http://pubs.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_05_01 -A|$1 | -A|${1} | -A|${1:+bbb} | -A|${1:-bbb} | bbb -A|$2 | -A|${2} | -A|${2:+bbb} | -A|${2:-bbb} | bbb -A|$3 | -A|${3} | -A|${3:+bbb} | -A|${3:-bbb} | bbb -A|$4 | -A|${4} | -A|${4:+bbb} | -A|${4:-bbb} | bbb -A|$5 | -A|${5} | -A|${5:+bbb} | -A|${5:-bbb} | bbb -A|$6 | -A|${6} | -A|${6:+bbb} | -A|${6:-bbb} | bbb -A|$7 | -A|${7} | -A|${7:+bbb} | -A|${7:-bbb} | bbb -A|$8 | -A|${8} | -A|${8:+bbb} | -A|${8:-bbb} | bbb -A|$9 | -A|${9} | -A|${9:+bbb} | -A|${9:-bbb} | bbb -A|$999 | -A|${999} | -A|${999:+bbb} | -A|${999:-bbb} | bbb -A|$999aaa | aaa -A|${999}aaa | aaa -A|${999:+bbb}aaa | aaa -A|${999:-bbb}aaa | bbbaaa -A|$001 | -A|${001} | -A|${001:+bbb} | -A|${001:-bbb} | bbb -A|$001aaa | aaa -A|${001}aaa | aaa -A|${001:+bbb}aaa | aaa -A|${001:-bbb}aaa | bbbaaa - -# Special parameters won't be set in the Dockerfile: -# http://pubs.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_05_02 -A|$@ | -A|${@} | -A|${@:+bbb} | -A|${@:-bbb} | bbb -A|$@@@ | @@ -A|$@aaa | aaa -A|${@}aaa | aaa -A|${@:+bbb}aaa | aaa -A|${@:-bbb}aaa | bbbaaa -A|$* | -A|${*} | -A|${*:+bbb} | -A|${*:-bbb} | bbb -A|$# | -A|${#} | -A|${#:+bbb} | -A|${#:-bbb} | bbb -A|$? | -A|${?} | -A|${?:+bbb} | -A|${?:-bbb} | bbb -A|$- | -A|${-} | -A|${-:+bbb} | -A|${-:-bbb} | bbb -A|$$ | -A|${$} | -A|${$:+bbb} | -A|${$:-bbb} | bbb -A|$! | -A|${!} | -A|${!:+bbb} | -A|${!:-bbb} | bbb -A|$0 | -A|${0} | -A|${0:+bbb} | -A|${0:-bbb} | bbb diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_unix.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_unix.go deleted file mode 100644 index 36903ec58d9a..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_unix.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !windows - -package shell - -// EqualEnvKeys compare two strings and returns true if they are equal. -// On Unix this comparison is case sensitive. -// On Windows this comparison is case insensitive. -func EqualEnvKeys(from, to string) bool { - return from == to -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_windows.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_windows.go deleted file mode 100644 index 010569bbaa06..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_windows.go +++ /dev/null @@ -1,10 +0,0 @@ -package shell - -import "strings" - -// EqualEnvKeys compare two strings and returns true if they are equal. -// On Unix this comparison is case sensitive. -// On Windows this comparison is case insensitive. -func EqualEnvKeys(from, to string) bool { - return strings.ToUpper(from) == strings.ToUpper(to) -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go deleted file mode 100644 index 6153f50d63a4..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go +++ /dev/null @@ -1,429 +0,0 @@ -package shell - -import ( - "bytes" - "fmt" - "strings" - "text/scanner" - "unicode" - - "github.com/pkg/errors" -) - -// Lex performs shell word splitting and variable expansion. -// -// Lex takes a string and an array of env variables and -// process all quotes (" and ') as well as $xxx and ${xxx} env variable -// tokens. Tries to mimic bash shell process. -// It doesn't support all flavors of ${xx:...} formats but new ones can -// be added by adding code to the "special ${} format processing" section -type Lex struct { - escapeToken rune - RawQuotes bool - SkipUnsetEnv bool -} - -// NewLex creates a new Lex which uses escapeToken to escape quotes. -func NewLex(escapeToken rune) *Lex { - return &Lex{escapeToken: escapeToken} -} - -// ProcessWord will use the 'env' list of environment variables, -// and replace any env var references in 'word'. -func (s *Lex) ProcessWord(word string, env []string) (string, error) { - word, _, err := s.process(word, BuildEnvs(env)) - return word, err -} - -// ProcessWords will use the 'env' list of environment variables, -// and replace any env var references in 'word' then it will also -// return a slice of strings which represents the 'word' -// split up based on spaces - taking into account quotes. Note that -// this splitting is done **after** the env var substitutions are done. -// Note, each one is trimmed to remove leading and trailing spaces (unless -// they are quoted", but ProcessWord retains spaces between words. -func (s *Lex) ProcessWords(word string, env []string) ([]string, error) { - _, words, err := s.process(word, BuildEnvs(env)) - return words, err -} - -// ProcessWordWithMap will use the 'env' list of environment variables, -// and replace any env var references in 'word'. -func (s *Lex) ProcessWordWithMap(word string, env map[string]string) (string, error) { - word, _, err := s.process(word, env) - return word, err -} - -func (s *Lex) ProcessWordsWithMap(word string, env map[string]string) ([]string, error) { - _, words, err := s.process(word, env) - return words, err -} - -func (s *Lex) process(word string, env map[string]string) (string, []string, error) { - sw := &shellWord{ - envs: env, - escapeToken: s.escapeToken, - skipUnsetEnv: s.SkipUnsetEnv, - rawQuotes: s.RawQuotes, - } - sw.scanner.Init(strings.NewReader(word)) - return sw.process(word) -} - -type shellWord struct { - scanner scanner.Scanner - envs map[string]string - escapeToken rune - rawQuotes bool - skipUnsetEnv bool -} - -func (sw *shellWord) process(source string) (string, []string, error) { - word, words, err := sw.processStopOn(scanner.EOF) - if err != nil { - err = errors.Wrapf(err, "failed to process %q", source) - } - return word, words, err -} - -type wordsStruct struct { - word string - words []string - inWord bool -} - -func (w *wordsStruct) addChar(ch rune) { - if unicode.IsSpace(ch) && w.inWord { - if len(w.word) != 0 { - w.words = append(w.words, w.word) - w.word = "" - w.inWord = false - } - } else if !unicode.IsSpace(ch) { - w.addRawChar(ch) - } -} - -func (w *wordsStruct) addRawChar(ch rune) { - w.word += string(ch) - w.inWord = true -} - -func (w *wordsStruct) addString(str string) { - for _, ch := range str { - w.addChar(ch) - } -} - -func (w *wordsStruct) addRawString(str string) { - w.word += str - w.inWord = true -} - -func (w *wordsStruct) getWords() []string { - if len(w.word) > 0 { - w.words = append(w.words, w.word) - - // Just in case we're called again by mistake - w.word = "" - w.inWord = false - } - return w.words -} - -// Process the word, starting at 'pos', and stop when we get to the -// end of the word or the 'stopChar' character -func (sw *shellWord) processStopOn(stopChar rune) (string, []string, error) { - var result bytes.Buffer - var words wordsStruct - - var charFuncMapping = map[rune]func() (string, error){ - '\'': sw.processSingleQuote, - '"': sw.processDoubleQuote, - '$': sw.processDollar, - } - - for sw.scanner.Peek() != scanner.EOF { - ch := sw.scanner.Peek() - - if stopChar != scanner.EOF && ch == stopChar { - sw.scanner.Next() - return result.String(), words.getWords(), nil - } - if fn, ok := charFuncMapping[ch]; ok { - // Call special processing func for certain chars - tmp, err := fn() - if err != nil { - return "", []string{}, err - } - result.WriteString(tmp) - - if ch == rune('$') { - words.addString(tmp) - } else { - words.addRawString(tmp) - } - } else { - // Not special, just add it to the result - ch = sw.scanner.Next() - - if ch == sw.escapeToken { - // '\' (default escape token, but ` allowed) escapes, except end of line - ch = sw.scanner.Next() - - if ch == scanner.EOF { - break - } - - words.addRawChar(ch) - } else { - words.addChar(ch) - } - - result.WriteRune(ch) - } - } - if stopChar != scanner.EOF { - return "", []string{}, errors.Errorf("unexpected end of statement while looking for matching %s", string(stopChar)) - } - return result.String(), words.getWords(), nil -} - -func (sw *shellWord) processSingleQuote() (string, error) { - // All chars between single quotes are taken as-is - // Note, you can't escape ' - // - // From the "sh" man page: - // Single Quotes - // Enclosing characters in single quotes preserves the literal meaning of - // all the characters (except single quotes, making it impossible to put - // single-quotes in a single-quoted string). - - var result bytes.Buffer - - ch := sw.scanner.Next() - if sw.rawQuotes { - result.WriteRune(ch) - } - - for { - ch = sw.scanner.Next() - switch ch { - case scanner.EOF: - return "", errors.New("unexpected end of statement while looking for matching single-quote") - case '\'': - if sw.rawQuotes { - result.WriteRune(ch) - } - return result.String(), nil - } - result.WriteRune(ch) - } -} - -func (sw *shellWord) processDoubleQuote() (string, error) { - // All chars up to the next " are taken as-is, even ', except any $ chars - // But you can escape " with a \ (or ` if escape token set accordingly) - // - // From the "sh" man page: - // Double Quotes - // Enclosing characters within double quotes preserves the literal meaning - // of all characters except dollarsign ($), backquote (`), and backslash - // (\). The backslash inside double quotes is historically weird, and - // serves to quote only the following characters: - // $ ` " \ . - // Otherwise it remains literal. - - var result bytes.Buffer - - ch := sw.scanner.Next() - if sw.rawQuotes { - result.WriteRune(ch) - } - - for { - switch sw.scanner.Peek() { - case scanner.EOF: - return "", errors.New("unexpected end of statement while looking for matching double-quote") - case '"': - ch := sw.scanner.Next() - if sw.rawQuotes { - result.WriteRune(ch) - } - return result.String(), nil - case '$': - value, err := sw.processDollar() - if err != nil { - return "", err - } - result.WriteString(value) - default: - ch := sw.scanner.Next() - if ch == sw.escapeToken { - switch sw.scanner.Peek() { - case scanner.EOF: - // Ignore \ at end of word - continue - case '"', '$', sw.escapeToken: - // These chars can be escaped, all other \'s are left as-is - // Note: for now don't do anything special with ` chars. - // Not sure what to do with them anyway since we're not going - // to execute the text in there (not now anyway). - ch = sw.scanner.Next() - } - } - result.WriteRune(ch) - } - } -} - -func (sw *shellWord) processDollar() (string, error) { - sw.scanner.Next() - - // $xxx case - if sw.scanner.Peek() != '{' { - name := sw.processName() - if name == "" { - return "$", nil - } - value, found := sw.getEnv(name) - if !found && sw.skipUnsetEnv { - return "$" + name, nil - } - return value, nil - } - - sw.scanner.Next() - switch sw.scanner.Peek() { - case scanner.EOF: - return "", errors.New("syntax error: missing '}'") - case '{', '}', ':': - // Invalid ${{xx}, ${:xx}, ${:}. ${} case - return "", errors.New("syntax error: bad substitution") - } - name := sw.processName() - ch := sw.scanner.Next() - switch ch { - case '}': - // Normal ${xx} case - value, found := sw.getEnv(name) - if !found && sw.skipUnsetEnv { - return fmt.Sprintf("${%s}", name), nil - } - return value, nil - case ':': - // Special ${xx:...} format processing - // Yes it allows for recursive $'s in the ... spot - modifier := sw.scanner.Next() - - word, _, err := sw.processStopOn('}') - if err != nil { - if sw.scanner.Peek() == scanner.EOF { - return "", errors.New("syntax error: missing '}'") - } - return "", err - } - - // Grab the current value of the variable in question so we - // can use to to determine what to do based on the modifier - newValue, found := sw.getEnv(name) - - switch modifier { - case '+': - if newValue != "" { - newValue = word - } - if !found && sw.skipUnsetEnv { - return fmt.Sprintf("${%s:%s%s}", name, string(modifier), word), nil - } - return newValue, nil - - case '-': - if newValue == "" { - newValue = word - } - if !found && sw.skipUnsetEnv { - return fmt.Sprintf("${%s:%s%s}", name, string(modifier), word), nil - } - - return newValue, nil - - default: - return "", errors.Errorf("unsupported modifier (%c) in substitution", modifier) - } - } - return "", errors.Errorf("missing ':' in substitution") -} - -func (sw *shellWord) processName() string { - // Read in a name (alphanumeric or _) - // If it starts with a numeric then just return $# - var name bytes.Buffer - - for sw.scanner.Peek() != scanner.EOF { - ch := sw.scanner.Peek() - if name.Len() == 0 && unicode.IsDigit(ch) { - for sw.scanner.Peek() != scanner.EOF && unicode.IsDigit(sw.scanner.Peek()) { - // Keep reading until the first non-digit character, or EOF - ch = sw.scanner.Next() - name.WriteRune(ch) - } - return name.String() - } - if name.Len() == 0 && isSpecialParam(ch) { - ch = sw.scanner.Next() - return string(ch) - } - if !unicode.IsLetter(ch) && !unicode.IsDigit(ch) && ch != '_' { - break - } - ch = sw.scanner.Next() - name.WriteRune(ch) - } - - return name.String() -} - -// isSpecialParam checks if the provided character is a special parameters, -// as defined in http://pubs.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_05_02 -func isSpecialParam(char rune) bool { - switch char { - case '@', '*', '#', '?', '-', '$', '!', '0': - // Special parameters - // http://pubs.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_05_02 - return true - } - return false -} - -func (sw *shellWord) getEnv(name string) (string, bool) { - for key, value := range sw.envs { - if EqualEnvKeys(name, key) { - return value, true - } - } - return "", false -} - -func BuildEnvs(env []string) map[string]string { - envs := map[string]string{} - - for _, e := range env { - i := strings.Index(e, "=") - - if i < 0 { - envs[e] = "" - } else { - k := e[:i] - v := e[i+1:] - - // If key already exists, keep previous value. - if _, ok := envs[k]; ok { - continue - } - envs[k] = v - } - } - - return envs -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex_test.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex_test.go deleted file mode 100644 index c092a255f3ec..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex_test.go +++ /dev/null @@ -1,190 +0,0 @@ -package shell - -import ( - "bufio" - "os" - "runtime" - "strings" - "testing" - - "gotest.tools/assert" - is "gotest.tools/assert/cmp" -) - -func TestShellParser4EnvVars(t *testing.T) { - fn := "envVarTest" - lineCount := 0 - - file, err := os.Open(fn) - assert.Check(t, err) - defer file.Close() - - shlex := NewLex('\\') - scanner := bufio.NewScanner(file) - envs := []string{"PWD=/home", "SHELL=bash", "KOREAN=한국어"} - envsMap := BuildEnvs(envs) - for scanner.Scan() { - line := scanner.Text() - lineCount++ - - // Skip comments and blank lines - if strings.HasPrefix(line, "#") { - continue - } - line = strings.TrimSpace(line) - if line == "" { - continue - } - - words := strings.Split(line, "|") - assert.Check(t, is.Len(words, 3)) - - platform := strings.TrimSpace(words[0]) - source := strings.TrimSpace(words[1]) - expected := strings.TrimSpace(words[2]) - - // Key W=Windows; A=All; U=Unix - if platform != "W" && platform != "A" && platform != "U" { - t.Fatalf("Invalid tag %s at line %d of %s. Must be W, A or U", platform, lineCount, fn) - } - - if ((platform == "W" || platform == "A") && runtime.GOOS == "windows") || - ((platform == "U" || platform == "A") && runtime.GOOS != "windows") { - newWord, err := shlex.ProcessWord(source, envs) - if expected == "error" { - assert.Check(t, is.ErrorContains(err, ""), "input: %q, result: %q", source, newWord) - } else { - assert.Check(t, err, "at line %d of %s", lineCount, fn) - assert.Check(t, is.Equal(newWord, expected), "at line %d of %s", lineCount, fn) - } - - newWord, err = shlex.ProcessWordWithMap(source, envsMap) - if expected == "error" { - assert.Check(t, is.ErrorContains(err, ""), "input: %q, result: %q", source, newWord) - } else { - assert.Check(t, err, "at line %d of %s", lineCount, fn) - assert.Check(t, is.Equal(newWord, expected), "at line %d of %s", lineCount, fn) - } - } - } -} - -func TestShellParser4Words(t *testing.T) { - fn := "wordsTest" - - file, err := os.Open(fn) - if err != nil { - t.Fatalf("Can't open '%s': %s", err, fn) - } - defer file.Close() - - const ( - modeNormal = iota - modeOnlySetEnv - ) - for _, mode := range []int{modeNormal, modeOnlySetEnv} { - var envs []string - shlex := NewLex('\\') - if mode == modeOnlySetEnv { - shlex.RawQuotes = true - shlex.SkipUnsetEnv = true - } - scanner := bufio.NewScanner(file) - lineNum := 0 - for scanner.Scan() { - line := scanner.Text() - lineNum = lineNum + 1 - - if strings.HasPrefix(line, "#") { - continue - } - - if strings.HasPrefix(line, "ENV ") { - line = strings.TrimLeft(line[3:], " ") - envs = append(envs, line) - continue - } - - words := strings.Split(line, "|") - if len(words) != 2 { - t.Fatalf("Error in '%s'(line %d) - should be exactly one | in: %q", fn, lineNum, line) - } - test := strings.TrimSpace(words[0]) - expected := strings.Split(strings.TrimLeft(words[1], " "), ",") - - // test for ProcessWords - result, err := shlex.ProcessWords(test, envs) - - if err != nil { - result = []string{"error"} - } - - if len(result) != len(expected) { - t.Fatalf("Error on line %d. %q was suppose to result in %q, but got %q instead", lineNum, test, expected, result) - } - for i, w := range expected { - if w != result[i] { - t.Fatalf("Error on line %d. %q was suppose to result in %q, but got %q instead", lineNum, test, expected, result) - } - } - - // test for ProcessWordsWithMap - result, err = shlex.ProcessWordsWithMap(test, BuildEnvs(envs)) - - if err != nil { - result = []string{"error"} - } - - if len(result) != len(expected) { - t.Fatalf("Error on line %d. %q was suppose to result in %q, but got %q instead", lineNum, test, expected, result) - } - for i, w := range expected { - if w != result[i] { - t.Fatalf("Error on line %d. %q was suppose to result in %q, but got %q instead", lineNum, test, expected, result) - } - } - } - } -} - -func TestGetEnv(t *testing.T) { - sw := &shellWord{envs: nil} - - getEnv := func(name string) string { - value, _ := sw.getEnv(name) - return value - } - sw.envs = BuildEnvs([]string{}) - if getEnv("foo") != "" { - t.Fatal("2 - 'foo' should map to ''") - } - - sw.envs = BuildEnvs([]string{"foo"}) - if getEnv("foo") != "" { - t.Fatal("3 - 'foo' should map to ''") - } - - sw.envs = BuildEnvs([]string{"foo="}) - if getEnv("foo") != "" { - t.Fatal("4 - 'foo' should map to ''") - } - - sw.envs = BuildEnvs([]string{"foo=bar"}) - if getEnv("foo") != "bar" { - t.Fatal("5 - 'foo' should map to 'bar'") - } - - sw.envs = BuildEnvs([]string{"foo=bar", "car=hat"}) - if getEnv("foo") != "bar" { - t.Fatal("6 - 'foo' should map to 'bar'") - } - if getEnv("car") != "hat" { - t.Fatal("7 - 'car' should map to 'hat'") - } - - // Make sure we grab the first 'car' in the list - sw.envs = BuildEnvs([]string{"foo=bar", "car=hat", "car=bike"}) - if getEnv("car") != "hat" { - t.Fatal("8 - 'car' should map to 'hat'") - } -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/wordsTest b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/wordsTest deleted file mode 100644 index 1fd9f19433d9..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/wordsTest +++ /dev/null @@ -1,30 +0,0 @@ -hello | hello -hello${hi}bye | hellobye -ENV hi=hi -hello${hi}bye | hellohibye -ENV space=abc def -hello${space}bye | helloabc,defbye -hello"${space}"bye | helloabc defbye -hello "${space}"bye | hello,abc defbye -ENV leading= ab c -hello${leading}def | hello,ab,cdef -hello"${leading}" def | hello ab c,def -hello"${leading}" | hello ab c -hello${leading} | hello,ab,c -# next line MUST have 3 trailing spaces, don't erase them! -ENV trailing=ab c -hello${trailing} | helloab,c -hello${trailing}d | helloab,c,d -hello"${trailing}"d | helloab c d -# next line MUST have 3 trailing spaces, don't erase them! -hel"lo${trailing}" | helloab c -hello" there " | hello there -hello there | hello,there -hello\ there | hello there -hello" there | error -hello\" there | hello",there -hello"\\there" | hello\there -hello"\there" | hello\there -hello'\\there' | hello\\there -hello'\there' | hello\there -hello'$there' | hello$there diff --git a/vendor/github.com/moby/buildkit/frontend/frontend.go b/vendor/github.com/moby/buildkit/frontend/frontend.go deleted file mode 100644 index 26b861f85e58..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/frontend.go +++ /dev/null @@ -1,28 +0,0 @@ -package frontend - -import ( - "context" - "io" - - "github.com/moby/buildkit/cache" - "github.com/moby/buildkit/client" - "github.com/moby/buildkit/executor" - gw "github.com/moby/buildkit/frontend/gateway/client" - digest "github.com/opencontainers/go-digest" -) - -type Frontend interface { - Solve(ctx context.Context, llb FrontendLLBBridge, opt map[string]string) (*Result, error) -} - -type FrontendLLBBridge interface { - Solve(ctx context.Context, req SolveRequest) (*Result, error) - ResolveImageConfig(ctx context.Context, ref string, opt gw.ResolveImageConfigOpt) (digest.Digest, []byte, error) - Exec(ctx context.Context, meta executor.Meta, rootfs cache.ImmutableRef, stdin io.ReadCloser, stdout, stderr io.WriteCloser) error -} - -type SolveRequest = gw.SolveRequest - -type WorkerInfos interface { - WorkerInfos() []client.WorkerInfo -} diff --git a/vendor/github.com/moby/buildkit/frontend/frontend_test.go b/vendor/github.com/moby/buildkit/frontend/frontend_test.go deleted file mode 100644 index 4405650c67e6..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/frontend_test.go +++ /dev/null @@ -1,277 +0,0 @@ -package frontend - -import ( - "context" - "io/ioutil" - "os" - "path/filepath" - "testing" - - "github.com/containerd/continuity/fs/fstest" - "github.com/moby/buildkit/client" - "github.com/moby/buildkit/client/llb" - gateway "github.com/moby/buildkit/frontend/gateway/client" - "github.com/moby/buildkit/util/testutil/integration" - "github.com/pkg/errors" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/tonistiigi/fsutil" - fstypes "github.com/tonistiigi/fsutil/types" -) - -var ( - errFailed = errors.New("test failed") -) - -func TestFrontendIntegration(t *testing.T) { - integration.Run(t, []integration.Test{ - testRefReadFile, - testRefReadDir, - testRefStatFile, - }) -} - -func testRefReadFile(t *testing.T, sb integration.Sandbox) { - ctx := context.TODO() - - c, err := client.New(ctx, sb.Address()) - require.NoError(t, err) - defer c.Close() - - testcontent := []byte(`foobar`) - - dir, err := tmpdir( - fstest.CreateFile("test", testcontent, 0666), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { - def, err := llb.Local("mylocal").Marshal() - if err != nil { - return nil, err - } - - res, err := c.Solve(ctx, gateway.SolveRequest{ - Definition: def.ToPB(), - }) - if err != nil { - return nil, err - } - - ref, err := res.SingleRef() - if err != nil { - return nil, err - } - - for _, tc := range []struct { - name string - exp []byte - r *gateway.FileRange - }{ - {"fullfile", testcontent, nil}, - {"prefix", []byte(`foo`), &gateway.FileRange{Offset: 0, Length: 3}}, - {"suffix", []byte(`ar`), &gateway.FileRange{Offset: 4, Length: 2}}, - {"mid", []byte(`oba`), &gateway.FileRange{Offset: 2, Length: 3}}, - {"overrun", []byte(`bar`), &gateway.FileRange{Offset: 3, Length: 10}}, - } { - t.Run(tc.name, func(t *testing.T) { - r, err := ref.ReadFile(ctx, gateway.ReadRequest{ - Filename: "test", - Range: tc.r, - }) - require.NoError(t, err) - assert.Equal(t, tc.exp, r) - }) - } - - return gateway.NewResult(), nil - } - - _, err = c.Build(ctx, client.SolveOpt{ - LocalDirs: map[string]string{ - "mylocal": dir, - }, - }, "", frontend, nil) - require.NoError(t, err) -} - -func testRefReadDir(t *testing.T, sb integration.Sandbox) { - ctx := context.TODO() - - c, err := client.New(ctx, sb.Address()) - require.NoError(t, err) - defer c.Close() - - dir, err := tmpdir( - fstest.CreateDir("somedir", 0777), - fstest.CreateFile("somedir/foo1.txt", []byte(`foo1`), 0666), - fstest.CreateFile("somedir/foo2.txt", []byte{}, 0666), - fstest.CreateFile("somedir/bar.log", []byte(`somethingsomething`), 0666), - fstest.Symlink("bar.log", "somedir/link.log"), - fstest.CreateDir("somedir/baz.dir", 0777), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - expMap := make(map[string]*fstypes.Stat) - - fsutil.Walk(ctx, dir, nil, func(path string, info os.FileInfo, err error) error { - require.NoError(t, err) - stat, ok := info.Sys().(*fstypes.Stat) - require.True(t, ok) - stat.ModTime = 0 // this will inevitably differ, we clear it during the tests below too - stat.Path = filepath.Base(stat.Path) // we are only testing reading a single directory here - expMap[path] = stat - return nil - }) - - frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { - def, err := llb.Local("mylocal").Marshal() - if err != nil { - return nil, err - } - - res, err := c.Solve(ctx, gateway.SolveRequest{ - Definition: def.ToPB(), - }) - if err != nil { - return nil, err - } - - ref, err := res.SingleRef() - if err != nil { - return nil, err - } - - for _, tc := range []struct { - name string - req gateway.ReadDirRequest - exp []*fstypes.Stat - }{ - { - name: "toplevel", - req: gateway.ReadDirRequest{Path: "/"}, - exp: []*fstypes.Stat{ - expMap["somedir"], - }, - }, - { - name: "subdir", - req: gateway.ReadDirRequest{Path: "/somedir"}, - exp: []*fstypes.Stat{ - expMap["somedir/bar.log"], - expMap["somedir/baz.dir"], - expMap["somedir/foo1.txt"], - expMap["somedir/foo2.txt"], - expMap["somedir/link.log"], - }, - }, - { - name: "globtxt", - req: gateway.ReadDirRequest{Path: "/somedir", IncludePattern: "*.txt"}, - exp: []*fstypes.Stat{ - expMap["somedir/foo1.txt"], - expMap["somedir/foo2.txt"], - }, - }, - { - name: "globlog", - req: gateway.ReadDirRequest{Path: "/somedir", IncludePattern: "*.log"}, - exp: []*fstypes.Stat{ - expMap["somedir/bar.log"], - expMap["somedir/link.log"], - }, - }, - { - name: "subsubdir", - req: gateway.ReadDirRequest{Path: "/somedir", IncludePattern: "*.dir"}, - exp: []*fstypes.Stat{ - expMap["somedir/baz.dir"], - }, - }, - } { - t.Run(tc.name, func(t *testing.T) { - dirents, err := ref.ReadDir(ctx, tc.req) - require.NoError(t, err) - for _, s := range dirents { - s.ModTime = 0 // this will inevitably differ, we cleared it in the expected versions above. - } - assert.Equal(t, tc.exp, dirents) - }) - } - - return gateway.NewResult(), nil - } - - _, err = c.Build(ctx, client.SolveOpt{ - LocalDirs: map[string]string{ - "mylocal": dir, - }, - }, "", frontend, nil) - require.NoError(t, err) -} - -func testRefStatFile(t *testing.T, sb integration.Sandbox) { - ctx := context.TODO() - - c, err := client.New(ctx, sb.Address()) - require.NoError(t, err) - defer c.Close() - - testcontent := []byte(`foobar`) - - dir, err := tmpdir( - fstest.CreateFile("test", testcontent, 0666), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - exp, err := fsutil.Stat(filepath.Join(dir, "test")) - require.NoError(t, err) - - frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { - def, err := llb.Local("mylocal").Marshal() - if err != nil { - return nil, err - } - - res, err := c.Solve(ctx, gateway.SolveRequest{ - Definition: def.ToPB(), - }) - if err != nil { - return nil, err - } - - ref, err := res.SingleRef() - if err != nil { - return nil, err - } - - st, err := ref.StatFile(ctx, gateway.StatRequest{ - Path: "test", - }) - require.NoError(t, err) - require.NotNil(t, st) - assert.Equal(t, exp, st) - return gateway.NewResult(), nil - } - - _, err = c.Build(ctx, client.SolveOpt{ - LocalDirs: map[string]string{ - "mylocal": dir, - }, - }, "", frontend, nil) - require.NoError(t, err) -} - -func tmpdir(appliers ...fstest.Applier) (string, error) { - tmpdir, err := ioutil.TempDir("", "buildkit-frontend") - if err != nil { - return "", err - } - if err := fstest.Apply(appliers...).Apply(tmpdir); err != nil { - return "", err - } - return tmpdir, nil -} diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/client/client.go b/vendor/github.com/moby/buildkit/frontend/gateway/client/client.go deleted file mode 100644 index 372353879458..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/gateway/client/client.go +++ /dev/null @@ -1,71 +0,0 @@ -package client - -import ( - "context" - - "github.com/moby/buildkit/solver/pb" - "github.com/moby/buildkit/util/apicaps" - digest "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go/v1" - fstypes "github.com/tonistiigi/fsutil/types" -) - -type Client interface { - Solve(ctx context.Context, req SolveRequest) (*Result, error) - ResolveImageConfig(ctx context.Context, ref string, opt ResolveImageConfigOpt) (digest.Digest, []byte, error) - BuildOpts() BuildOpts -} - -type Reference interface { - ReadFile(ctx context.Context, req ReadRequest) ([]byte, error) - StatFile(ctx context.Context, req StatRequest) (*fstypes.Stat, error) - ReadDir(ctx context.Context, req ReadDirRequest) ([]*fstypes.Stat, error) -} - -type ReadRequest struct { - Filename string - Range *FileRange -} - -type FileRange struct { - Offset int - Length int -} - -type ReadDirRequest struct { - Path string - IncludePattern string -} - -type StatRequest struct { - Path string -} - -// SolveRequest is same as frontend.SolveRequest but avoiding dependency -type SolveRequest struct { - Definition *pb.Definition - Frontend string - FrontendOpt map[string]string - ImportCacheRefs []string -} - -type WorkerInfo struct { - ID string - Labels map[string]string - Platforms []specs.Platform -} - -type BuildOpts struct { - Opts map[string]string - SessionID string - Workers []WorkerInfo - Product string - LLBCaps apicaps.CapSet - Caps apicaps.CapSet -} - -type ResolveImageConfigOpt struct { - Platform *specs.Platform - ResolveMode string - LogName string -} diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/client/result.go b/vendor/github.com/moby/buildkit/frontend/gateway/client/result.go deleted file mode 100644 index bd5422847822..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/gateway/client/result.go +++ /dev/null @@ -1,54 +0,0 @@ -package client - -import ( - "context" - "sync" - - "github.com/pkg/errors" -) - -type BuildFunc func(context.Context, Client) (*Result, error) - -type Result struct { - mu sync.Mutex - Ref Reference - Refs map[string]Reference - Metadata map[string][]byte -} - -func NewResult() *Result { - return &Result{} -} - -func (r *Result) AddMeta(k string, v []byte) { - r.mu.Lock() - if r.Metadata == nil { - r.Metadata = map[string][]byte{} - } - r.Metadata[k] = v - r.mu.Unlock() -} - -func (r *Result) AddRef(k string, ref Reference) { - r.mu.Lock() - if r.Refs == nil { - r.Refs = map[string]Reference{} - } - r.Refs[k] = ref - r.mu.Unlock() -} - -func (r *Result) SetRef(ref Reference) { - r.Ref = ref -} - -func (r *Result) SingleRef() (Reference, error) { - r.mu.Lock() - defer r.mu.Unlock() - - if r.Refs != nil && r.Ref == nil { - return nil, errors.Errorf("invalid map result") - } - - return r.Ref, nil -} diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/forward.go b/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/forward.go deleted file mode 100644 index 23eb227fb1aa..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/forward.go +++ /dev/null @@ -1,179 +0,0 @@ -package forwarder - -import ( - "context" - "sync" - - "github.com/moby/buildkit/cache" - cacheutil "github.com/moby/buildkit/cache/util" - clienttypes "github.com/moby/buildkit/client" - "github.com/moby/buildkit/frontend" - "github.com/moby/buildkit/frontend/gateway/client" - gwpb "github.com/moby/buildkit/frontend/gateway/pb" - "github.com/moby/buildkit/session" - "github.com/moby/buildkit/solver" - opspb "github.com/moby/buildkit/solver/pb" - "github.com/moby/buildkit/util/apicaps" - "github.com/moby/buildkit/worker" - "github.com/pkg/errors" - fstypes "github.com/tonistiigi/fsutil/types" -) - -func llbBridgeToGatewayClient(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string, workerInfos []clienttypes.WorkerInfo) (*bridgeClient, error) { - return &bridgeClient{ - opts: opts, - FrontendLLBBridge: llbBridge, - sid: session.FromContext(ctx), - workerInfos: workerInfos, - final: map[*ref]struct{}{}, - }, nil -} - -type bridgeClient struct { - frontend.FrontendLLBBridge - mu sync.Mutex - opts map[string]string - final map[*ref]struct{} - sid string - exporterAttr map[string][]byte - refs []*ref - workerInfos []clienttypes.WorkerInfo -} - -func (c *bridgeClient) Solve(ctx context.Context, req client.SolveRequest) (*client.Result, error) { - res, err := c.FrontendLLBBridge.Solve(ctx, frontend.SolveRequest{ - Definition: req.Definition, - Frontend: req.Frontend, - FrontendOpt: req.FrontendOpt, - ImportCacheRefs: req.ImportCacheRefs, - }) - if err != nil { - return nil, err - } - - cRes := &client.Result{} - c.mu.Lock() - for k, r := range res.Refs { - rr := &ref{r} - c.refs = append(c.refs, rr) - cRes.AddRef(k, rr) - } - if r := res.Ref; r != nil { - rr := &ref{r} - c.refs = append(c.refs, rr) - cRes.SetRef(rr) - } - c.mu.Unlock() - cRes.Metadata = res.Metadata - - return cRes, nil -} -func (c *bridgeClient) BuildOpts() client.BuildOpts { - workers := make([]client.WorkerInfo, 0, len(c.workerInfos)) - for _, w := range c.workerInfos { - workers = append(workers, client.WorkerInfo{ - ID: w.ID, - Labels: w.Labels, - Platforms: w.Platforms, - }) - } - - return client.BuildOpts{ - Opts: c.opts, - SessionID: c.sid, - Workers: workers, - Product: apicaps.ExportedProduct, - Caps: gwpb.Caps.CapSet(gwpb.Caps.All()), - LLBCaps: opspb.Caps.CapSet(opspb.Caps.All()), - } -} - -func (c *bridgeClient) toFrontendResult(r *client.Result) (*frontend.Result, error) { - if r == nil { - return nil, nil - } - - res := &frontend.Result{} - - if r.Refs != nil { - res.Refs = make(map[string]solver.CachedResult, len(r.Refs)) - for k, r := range r.Refs { - rr, ok := r.(*ref) - if !ok { - return nil, errors.Errorf("invalid reference type for forward %T", r) - } - c.final[rr] = struct{}{} - res.Refs[k] = rr.CachedResult - } - } - if r := r.Ref; r != nil { - rr, ok := r.(*ref) - if !ok { - return nil, errors.Errorf("invalid reference type for forward %T", r) - } - c.final[rr] = struct{}{} - res.Ref = rr.CachedResult - } - res.Metadata = r.Metadata - - return res, nil -} - -func (c *bridgeClient) discard(err error) { - for _, r := range c.refs { - if r != nil { - if _, ok := c.final[r]; !ok || err != nil { - r.Release(context.TODO()) - } - } - } -} - -type ref struct { - solver.CachedResult -} - -func (r *ref) ReadFile(ctx context.Context, req client.ReadRequest) ([]byte, error) { - ref, err := r.getImmutableRef() - if err != nil { - return nil, err - } - newReq := cacheutil.ReadRequest{ - Filename: req.Filename, - } - if r := req.Range; r != nil { - newReq.Range = &cacheutil.FileRange{ - Offset: r.Offset, - Length: r.Length, - } - } - return cacheutil.ReadFile(ctx, ref, newReq) -} - -func (r *ref) ReadDir(ctx context.Context, req client.ReadDirRequest) ([]*fstypes.Stat, error) { - ref, err := r.getImmutableRef() - if err != nil { - return nil, err - } - newReq := cacheutil.ReadDirRequest{ - Path: req.Path, - IncludePattern: req.IncludePattern, - } - return cacheutil.ReadDir(ctx, ref, newReq) -} - -func (r *ref) StatFile(ctx context.Context, req client.StatRequest) (*fstypes.Stat, error) { - ref, err := r.getImmutableRef() - if err != nil { - return nil, err - } - return cacheutil.StatFile(ctx, ref, req.Path) -} - -func (r *ref) getImmutableRef() (cache.ImmutableRef, error) { - ref, ok := r.CachedResult.Sys().(*worker.WorkerRef) - if !ok { - return nil, errors.Errorf("invalid ref: %T", r.CachedResult.Sys()) - } - return ref.ImmutableRef, nil -} diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/frontend.go b/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/frontend.go deleted file mode 100644 index 61a187dc3779..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/frontend.go +++ /dev/null @@ -1,38 +0,0 @@ -package forwarder - -import ( - "context" - - "github.com/moby/buildkit/frontend" - "github.com/moby/buildkit/frontend/gateway/client" -) - -func NewGatewayForwarder(w frontend.WorkerInfos, f client.BuildFunc) frontend.Frontend { - return &GatewayForwarder{ - workers: w, - f: f, - } -} - -type GatewayForwarder struct { - workers frontend.WorkerInfos - f client.BuildFunc -} - -func (gf *GatewayForwarder) Solve(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string) (retRes *frontend.Result, retErr error) { - c, err := llbBridgeToGatewayClient(ctx, llbBridge, opts, gf.workers.WorkerInfos()) - if err != nil { - return nil, err - } - - defer func() { - c.discard(retErr) - }() - - res, err := gf.f(ctx, c) - if err != nil { - return nil, err - } - - return c.toFrontendResult(res) -} diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/gateway.go b/vendor/github.com/moby/buildkit/frontend/gateway/gateway.go deleted file mode 100644 index 18a28b0183cd..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/gateway/gateway.go +++ /dev/null @@ -1,639 +0,0 @@ -package gateway - -import ( - "context" - "encoding/json" - "fmt" - "io" - "net" - "os" - "strings" - "sync" - "time" - - "github.com/docker/distribution/reference" - apitypes "github.com/moby/buildkit/api/types" - "github.com/moby/buildkit/cache" - cacheutil "github.com/moby/buildkit/cache/util" - "github.com/moby/buildkit/client" - "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/executor" - "github.com/moby/buildkit/exporter/containerimage/exptypes" - "github.com/moby/buildkit/frontend" - gw "github.com/moby/buildkit/frontend/gateway/client" - pb "github.com/moby/buildkit/frontend/gateway/pb" - "github.com/moby/buildkit/identity" - "github.com/moby/buildkit/session" - "github.com/moby/buildkit/solver" - opspb "github.com/moby/buildkit/solver/pb" - "github.com/moby/buildkit/util/apicaps" - "github.com/moby/buildkit/util/tracing" - "github.com/moby/buildkit/worker" - specs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/net/http2" - spb "google.golang.org/genproto/googleapis/rpc/status" - "google.golang.org/grpc" - "google.golang.org/grpc/health" - "google.golang.org/grpc/health/grpc_health_v1" - "google.golang.org/grpc/status" -) - -const ( - keySource = "source" - keyDevel = "gateway-devel" -) - -func NewGatewayFrontend(w frontend.WorkerInfos) frontend.Frontend { - return &gatewayFrontend{ - workers: w, - } -} - -type gatewayFrontend struct { - workers frontend.WorkerInfos -} - -func filterPrefix(opts map[string]string, pfx string) map[string]string { - m := map[string]string{} - for k, v := range opts { - if strings.HasPrefix(k, pfx) { - m[strings.TrimPrefix(k, pfx)] = v - } - } - return m -} - -func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string) (*frontend.Result, error) { - source, ok := opts[keySource] - if !ok { - return nil, errors.Errorf("no source specified for gateway") - } - - sid := session.FromContext(ctx) - - _, isDevel := opts[keyDevel] - var img specs.Image - var rootFS cache.ImmutableRef - var readonly bool // TODO: try to switch to read-only by default. - - if isDevel { - devRes, err := llbBridge.Solve(session.NewContext(ctx, "gateway:"+sid), - frontend.SolveRequest{ - Frontend: source, - FrontendOpt: filterPrefix(opts, "gateway-"), - }) - if err != nil { - return nil, err - } - defer func() { - devRes.EachRef(func(ref solver.CachedResult) error { - return ref.Release(context.TODO()) - }) - }() - if devRes.Ref == nil { - return nil, errors.Errorf("development gateway didn't return default result") - } - workerRef, ok := devRes.Ref.Sys().(*worker.WorkerRef) - if !ok { - return nil, errors.Errorf("invalid ref: %T", devRes.Ref.Sys()) - } - rootFS = workerRef.ImmutableRef - config, ok := devRes.Metadata[exptypes.ExporterImageConfigKey] - if ok { - if err := json.Unmarshal(config, &img); err != nil { - return nil, err - } - } - } else { - sourceRef, err := reference.ParseNormalizedNamed(source) - if err != nil { - return nil, err - } - - dgst, config, err := llbBridge.ResolveImageConfig(ctx, reference.TagNameOnly(sourceRef).String(), gw.ResolveImageConfigOpt{}) - if err != nil { - return nil, err - } - - if err := json.Unmarshal(config, &img); err != nil { - return nil, err - } - - if dgst != "" { - sourceRef, err = reference.WithDigest(sourceRef, dgst) - if err != nil { - return nil, err - } - } - - src := llb.Image(sourceRef.String(), &markTypeFrontend{}) - - def, err := src.Marshal() - if err != nil { - return nil, err - } - - res, err := llbBridge.Solve(ctx, frontend.SolveRequest{ - Definition: def.ToPB(), - }) - if err != nil { - return nil, err - } - defer func() { - res.EachRef(func(ref solver.CachedResult) error { - return ref.Release(context.TODO()) - }) - }() - if res.Ref == nil { - return nil, errors.Errorf("gateway source didn't return default result") - - } - workerRef, ok := res.Ref.Sys().(*worker.WorkerRef) - if !ok { - return nil, errors.Errorf("invalid ref: %T", res.Ref.Sys()) - } - rootFS = workerRef.ImmutableRef - } - - lbf, err := newLLBBridgeForwarder(ctx, llbBridge, gf.workers) - defer lbf.conn.Close() - if err != nil { - return nil, err - } - - args := []string{"/run"} - env := []string{} - cwd := "/" - if img.Config.Env != nil { - env = img.Config.Env - } - if img.Config.Entrypoint != nil { - args = img.Config.Entrypoint - } - if img.Config.WorkingDir != "" { - cwd = img.Config.WorkingDir - } - i := 0 - for k, v := range opts { - env = append(env, fmt.Sprintf("BUILDKIT_FRONTEND_OPT_%d", i)+"="+k+"="+v) - i++ - } - - env = append(env, "BUILDKIT_SESSION_ID="+sid) - - dt, err := json.Marshal(gf.workers.WorkerInfos()) - if err != nil { - return nil, errors.Wrap(err, "failed to marshal workers array") - } - env = append(env, "BUILDKIT_WORKERS="+string(dt)) - - defer lbf.Discard() - - env = append(env, "BUILDKIT_EXPORTEDPRODUCT="+apicaps.ExportedProduct) - - err = llbBridge.Exec(ctx, executor.Meta{ - Env: env, - Args: args, - Cwd: cwd, - ReadonlyRootFS: readonly, - }, rootFS, lbf.Stdin, lbf.Stdout, os.Stderr) - - if err != nil { - // An existing error (set via Return rpc) takes - // precedence over this error, which in turn takes - // precedence over a success reported via Return. - lbf.mu.Lock() - if lbf.err == nil { - lbf.result = nil - lbf.err = err - } - lbf.mu.Unlock() - } - - return lbf.Result() -} - -func (lbf *llbBridgeForwarder) Discard() { - lbf.mu.Lock() - defer lbf.mu.Unlock() - for id, r := range lbf.refs { - if lbf.err == nil && lbf.result != nil { - keep := false - lbf.result.EachRef(func(r2 solver.CachedResult) error { - if r == r2 { - keep = true - } - return nil - }) - if keep { - continue - } - } - r.Release(context.TODO()) - delete(lbf.refs, id) - } -} - -func (lbf *llbBridgeForwarder) Done() <-chan struct{} { - return lbf.doneCh -} - -func (lbf *llbBridgeForwarder) setResult(r *frontend.Result, err error) (*pb.ReturnResponse, error) { - lbf.mu.Lock() - defer lbf.mu.Unlock() - - if (r == nil) == (err == nil) { - return nil, errors.New("gateway return must be either result or err") - } - - if lbf.result != nil || lbf.err != nil { - return nil, errors.New("gateway result is already set") - } - - lbf.result = r - lbf.err = err - close(lbf.doneCh) - return &pb.ReturnResponse{}, nil -} - -func (lbf *llbBridgeForwarder) Result() (*frontend.Result, error) { - lbf.mu.Lock() - defer lbf.mu.Unlock() - - if lbf.result == nil && lbf.err == nil { - return nil, errors.New("no result for incomplete build") - } - - if lbf.err != nil { - return nil, lbf.err - } - - return lbf.result, nil -} - -func NewBridgeForwarder(ctx context.Context, llbBridge frontend.FrontendLLBBridge, workers frontend.WorkerInfos) *llbBridgeForwarder { - lbf := &llbBridgeForwarder{ - callCtx: ctx, - llbBridge: llbBridge, - refs: map[string]solver.CachedResult{}, - doneCh: make(chan struct{}), - pipe: newPipe(), - workers: workers, - } - return lbf -} - -func newLLBBridgeForwarder(ctx context.Context, llbBridge frontend.FrontendLLBBridge, workers frontend.WorkerInfos) (*llbBridgeForwarder, error) { - lbf := NewBridgeForwarder(ctx, llbBridge, workers) - server := grpc.NewServer() - grpc_health_v1.RegisterHealthServer(server, health.NewServer()) - pb.RegisterLLBBridgeServer(server, lbf) - - go serve(ctx, server, lbf.conn) - - return lbf, nil -} - -type pipe struct { - Stdin io.ReadCloser - Stdout io.WriteCloser - conn net.Conn -} - -func newPipe() *pipe { - pr1, pw1, _ := os.Pipe() - pr2, pw2, _ := os.Pipe() - return &pipe{ - Stdin: pr1, - Stdout: pw2, - conn: &conn{ - Reader: pr2, - Writer: pw1, - Closer: pw2, - }, - } -} - -type conn struct { - io.Reader - io.Writer - io.Closer -} - -func (s *conn) LocalAddr() net.Addr { - return dummyAddr{} -} -func (s *conn) RemoteAddr() net.Addr { - return dummyAddr{} -} -func (s *conn) SetDeadline(t time.Time) error { - return nil -} -func (s *conn) SetReadDeadline(t time.Time) error { - return nil -} -func (s *conn) SetWriteDeadline(t time.Time) error { - return nil -} - -type dummyAddr struct { -} - -func (d dummyAddr) Network() string { - return "pipe" -} - -func (d dummyAddr) String() string { - return "localhost" -} - -type LLBBridgeForwarder interface { - pb.LLBBridgeServer - Done() <-chan struct{} - Result() (*frontend.Result, error) -} - -type llbBridgeForwarder struct { - mu sync.Mutex - callCtx context.Context - llbBridge frontend.FrontendLLBBridge - refs map[string]solver.CachedResult - // lastRef solver.CachedResult - // lastRefs map[string]solver.CachedResult - // err error - doneCh chan struct{} // closed when result or err become valid through a call to a Return - result *frontend.Result - err error - exporterAttr map[string][]byte - workers frontend.WorkerInfos - *pipe -} - -func (lbf *llbBridgeForwarder) ResolveImageConfig(ctx context.Context, req *pb.ResolveImageConfigRequest) (*pb.ResolveImageConfigResponse, error) { - ctx = tracing.ContextWithSpanFromContext(ctx, lbf.callCtx) - var platform *specs.Platform - if p := req.Platform; p != nil { - platform = &specs.Platform{ - OS: p.OS, - Architecture: p.Architecture, - Variant: p.Variant, - OSVersion: p.OSVersion, - OSFeatures: p.OSFeatures, - } - } - dgst, dt, err := lbf.llbBridge.ResolveImageConfig(ctx, req.Ref, gw.ResolveImageConfigOpt{ - Platform: platform, - ResolveMode: req.ResolveMode, - LogName: req.LogName, - }) - if err != nil { - return nil, err - } - return &pb.ResolveImageConfigResponse{ - Digest: dgst, - Config: dt, - }, nil -} - -func (lbf *llbBridgeForwarder) Solve(ctx context.Context, req *pb.SolveRequest) (*pb.SolveResponse, error) { - ctx = tracing.ContextWithSpanFromContext(ctx, lbf.callCtx) - res, err := lbf.llbBridge.Solve(ctx, frontend.SolveRequest{ - Definition: req.Definition, - Frontend: req.Frontend, - FrontendOpt: req.FrontendOpt, - ImportCacheRefs: req.ImportCacheRefs, - }) - if err != nil { - return nil, err - } - - if len(res.Refs) > 0 && !req.AllowResultReturn { - // this should never happen because old client shouldn't make a map request - return nil, errors.Errorf("solve did not return default result") - } - - pbRes := &pb.Result{} - var defaultID string - - lbf.mu.Lock() - if res.Refs != nil { - ids := make(map[string]string, len(res.Refs)) - for k, ref := range res.Refs { - id := identity.NewID() - if ref == nil { - id = "" - } else { - lbf.refs[id] = ref - } - ids[k] = id - } - pbRes.Result = &pb.Result_Refs{Refs: &pb.RefMap{Refs: ids}} - } else { - id := identity.NewID() - if res.Ref == nil { - id = "" - } else { - lbf.refs[id] = res.Ref - } - defaultID = id - pbRes.Result = &pb.Result_Ref{Ref: id} - } - lbf.mu.Unlock() - - // compatibility mode for older clients - if req.Final { - exp := map[string][]byte{} - if err := json.Unmarshal(req.ExporterAttr, &exp); err != nil { - return nil, err - } - - for k, v := range res.Metadata { - exp[k] = v - } - - lbf.mu.Lock() - lbf.result = &frontend.Result{ - Ref: lbf.refs[defaultID], - Metadata: exp, - } - lbf.mu.Unlock() - } - - resp := &pb.SolveResponse{ - Result: pbRes, - } - - if !req.AllowResultReturn { - resp.Ref = defaultID - } - - return resp, nil -} -func (lbf *llbBridgeForwarder) ReadFile(ctx context.Context, req *pb.ReadFileRequest) (*pb.ReadFileResponse, error) { - ctx = tracing.ContextWithSpanFromContext(ctx, lbf.callCtx) - lbf.mu.Lock() - ref, ok := lbf.refs[req.Ref] - lbf.mu.Unlock() - if !ok { - return nil, errors.Errorf("no such ref: %v", req.Ref) - } - if ref == nil { - return nil, errors.Wrapf(os.ErrNotExist, "%s not found", req.FilePath) - } - workerRef, ok := ref.Sys().(*worker.WorkerRef) - if !ok { - return nil, errors.Errorf("invalid ref: %T", ref.Sys()) - } - - newReq := cacheutil.ReadRequest{ - Filename: req.FilePath, - } - if r := req.Range; r != nil { - newReq.Range = &cacheutil.FileRange{ - Offset: int(r.Offset), - Length: int(r.Length), - } - } - - dt, err := cacheutil.ReadFile(ctx, workerRef.ImmutableRef, newReq) - if err != nil { - return nil, err - } - - return &pb.ReadFileResponse{Data: dt}, nil -} - -func (lbf *llbBridgeForwarder) ReadDir(ctx context.Context, req *pb.ReadDirRequest) (*pb.ReadDirResponse, error) { - ctx = tracing.ContextWithSpanFromContext(ctx, lbf.callCtx) - lbf.mu.Lock() - ref, ok := lbf.refs[req.Ref] - lbf.mu.Unlock() - if !ok { - return nil, errors.Errorf("no such ref: %v", req.Ref) - } - if ref == nil { - return nil, errors.Wrapf(os.ErrNotExist, "%s not found", req.DirPath) - } - workerRef, ok := ref.Sys().(*worker.WorkerRef) - if !ok { - return nil, errors.Errorf("invalid ref: %T", ref.Sys()) - } - - newReq := cacheutil.ReadDirRequest{ - Path: req.DirPath, - IncludePattern: req.IncludePattern, - } - entries, err := cacheutil.ReadDir(ctx, workerRef.ImmutableRef, newReq) - if err != nil { - return nil, err - } - - return &pb.ReadDirResponse{Entries: entries}, nil -} - -func (lbf *llbBridgeForwarder) StatFile(ctx context.Context, req *pb.StatFileRequest) (*pb.StatFileResponse, error) { - ctx = tracing.ContextWithSpanFromContext(ctx, lbf.callCtx) - lbf.mu.Lock() - ref, ok := lbf.refs[req.Ref] - lbf.mu.Unlock() - if !ok { - return nil, errors.Errorf("no such ref: %v", req.Ref) - } - if ref == nil { - return nil, errors.Wrapf(os.ErrNotExist, "%s not found", req.Path) - } - workerRef, ok := ref.Sys().(*worker.WorkerRef) - if !ok { - return nil, errors.Errorf("invalid ref: %T", ref.Sys()) - } - - st, err := cacheutil.StatFile(ctx, workerRef.ImmutableRef, req.Path) - if err != nil { - return nil, err - } - - return &pb.StatFileResponse{Stat: st}, nil -} - -func (lbf *llbBridgeForwarder) Ping(context.Context, *pb.PingRequest) (*pb.PongResponse, error) { - - workers := lbf.workers.WorkerInfos() - pbWorkers := make([]*apitypes.WorkerRecord, 0, len(workers)) - for _, w := range workers { - pbWorkers = append(pbWorkers, &apitypes.WorkerRecord{ - ID: w.ID, - Labels: w.Labels, - Platforms: opspb.PlatformsFromSpec(w.Platforms), - }) - } - - return &pb.PongResponse{ - FrontendAPICaps: pb.Caps.All(), - Workers: pbWorkers, - LLBCaps: opspb.Caps.All(), - }, nil -} - -func (lbf *llbBridgeForwarder) Return(ctx context.Context, in *pb.ReturnRequest) (*pb.ReturnResponse, error) { - if in.Error != nil { - return lbf.setResult(nil, status.ErrorProto(&spb.Status{ - Code: in.Error.Code, - Message: in.Error.Message, - // Details: in.Error.Details, - })) - } else { - r := &frontend.Result{ - Metadata: in.Result.Metadata, - } - - switch res := in.Result.Result.(type) { - case *pb.Result_Ref: - ref, err := lbf.convertRef(res.Ref) - if err != nil { - return nil, err - } - r.Ref = ref - case *pb.Result_Refs: - m := map[string]solver.CachedResult{} - for k, v := range res.Refs.Refs { - ref, err := lbf.convertRef(v) - if err != nil { - return nil, err - } - m[k] = ref - } - r.Refs = m - } - return lbf.setResult(r, nil) - } -} - -func (lbf *llbBridgeForwarder) convertRef(id string) (solver.CachedResult, error) { - if id == "" { - return nil, nil - } - lbf.mu.Lock() - defer lbf.mu.Unlock() - r, ok := lbf.refs[id] - if !ok { - return nil, errors.Errorf("return reference %s not found", id) - } - return r, nil -} - -func serve(ctx context.Context, grpcServer *grpc.Server, conn net.Conn) { - go func() { - <-ctx.Done() - conn.Close() - }() - logrus.Debugf("serving grpc connection") - (&http2.Server{}).ServeConn(conn, &http2.ServeConnOpts{Handler: grpcServer}) -} - -type markTypeFrontend struct{} - -func (*markTypeFrontend) SetImageOption(ii *llb.ImageInfo) { - ii.RecordType = string(client.UsageRecordTypeFrontend) -} diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go b/vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go deleted file mode 100644 index d4e0525d897d..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go +++ /dev/null @@ -1,479 +0,0 @@ -package grpcclient - -import ( - "context" - "encoding/json" - "io" - "net" - "os" - "strings" - "time" - - "github.com/gogo/googleapis/google/rpc" - "github.com/moby/buildkit/frontend/gateway/client" - pb "github.com/moby/buildkit/frontend/gateway/pb" - opspb "github.com/moby/buildkit/solver/pb" - "github.com/moby/buildkit/util/apicaps" - digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - fstypes "github.com/tonistiigi/fsutil/types" - "google.golang.org/grpc" - "google.golang.org/grpc/status" -) - -const frontendPrefix = "BUILDKIT_FRONTEND_OPT_" - -type GrpcClient interface { - Run(context.Context, client.BuildFunc) error -} - -func New(ctx context.Context, opts map[string]string, session, product string, c pb.LLBBridgeClient, w []client.WorkerInfo) (GrpcClient, error) { - resp, err := c.Ping(ctx, &pb.PingRequest{}) - if err != nil { - return nil, err - } - - if resp.FrontendAPICaps == nil { - resp.FrontendAPICaps = defaultCaps() - } - - if resp.LLBCaps == nil { - resp.LLBCaps = defaultLLBCaps() - } - - return &grpcClient{ - client: c, - opts: opts, - sessionID: session, - workers: w, - product: product, - caps: pb.Caps.CapSet(resp.FrontendAPICaps), - llbCaps: opspb.Caps.CapSet(resp.LLBCaps), - requests: map[string]*pb.SolveRequest{}, - }, nil -} - -func current() (GrpcClient, error) { - if ep := product(); ep != "" { - apicaps.ExportedProduct = ep - } - - ctx, conn, err := grpcClientConn(context.Background()) - if err != nil { - return nil, err - } - - return New(ctx, opts(), sessionID(), product(), pb.NewLLBBridgeClient(conn), workers()) -} - -func convertRef(ref client.Reference) (string, error) { - if ref == nil { - return "", nil - } - r, ok := ref.(*reference) - if !ok { - return "", errors.Errorf("invalid return reference type %T", ref) - } - return r.id, nil -} - -func RunFromEnvironment(ctx context.Context, f client.BuildFunc) error { - client, err := current() - if err != nil { - return errors.Wrapf(err, "failed to initialize client from environment") - } - return client.Run(ctx, f) -} - -func (c *grpcClient) Run(ctx context.Context, f client.BuildFunc) (retError error) { - export := c.caps.Supports(pb.CapReturnResult) == nil - - var ( - res *client.Result - err error - ) - if export { - defer func() { - req := &pb.ReturnRequest{} - if retError == nil { - if res == nil { - res = &client.Result{} - } - pbRes := &pb.Result{ - Metadata: res.Metadata, - } - if res.Refs != nil { - m := map[string]string{} - for k, r := range res.Refs { - id, err := convertRef(r) - if err != nil { - retError = err - continue - } - m[k] = id - } - pbRes.Result = &pb.Result_Refs{Refs: &pb.RefMap{Refs: m}} - } else { - id, err := convertRef(res.Ref) - if err != nil { - retError = err - } else { - pbRes.Result = &pb.Result_Ref{Ref: id} - } - } - if retError == nil { - req.Result = pbRes - } - } - if retError != nil { - st, _ := status.FromError(retError) - stp := st.Proto() - req.Error = &rpc.Status{ - Code: stp.Code, - Message: stp.Message, - // Details: stp.Details, - } - } - if _, err := c.client.Return(ctx, req); err != nil && retError == nil { - retError = err - } - }() - } - - if res, err = f(ctx, c); err != nil { - return err - } - - if err := c.caps.Supports(pb.CapReturnMap); len(res.Refs) > 1 && err != nil { - return err - } - - if !export { - exportedAttrBytes, err := json.Marshal(res.Metadata) - if err != nil { - return errors.Wrapf(err, "failed to marshal return metadata") - } - - req, err := c.requestForRef(res.Ref) - if err != nil { - return errors.Wrapf(err, "failed to find return ref") - } - - req.Final = true - req.ExporterAttr = exportedAttrBytes - - if _, err := c.client.Solve(ctx, req); err != nil { - return errors.Wrapf(err, "failed to solve") - } - } - - return nil -} - -// defaultCaps returns the capabilities that were implemented when capabilities -// support was added. This list is frozen and should never be changed. -func defaultCaps() []apicaps.PBCap { - return []apicaps.PBCap{ - {ID: string(pb.CapSolveBase), Enabled: true}, - {ID: string(pb.CapSolveInlineReturn), Enabled: true}, - {ID: string(pb.CapResolveImage), Enabled: true}, - {ID: string(pb.CapReadFile), Enabled: true}, - } -} - -// defaultLLBCaps returns the LLB capabilities that were implemented when capabilities -// support was added. This list is frozen and should never be changed. -func defaultLLBCaps() []apicaps.PBCap { - return []apicaps.PBCap{ - {ID: string(opspb.CapSourceImage), Enabled: true}, - {ID: string(opspb.CapSourceLocal), Enabled: true}, - {ID: string(opspb.CapSourceLocalUnique), Enabled: true}, - {ID: string(opspb.CapSourceLocalSessionID), Enabled: true}, - {ID: string(opspb.CapSourceLocalIncludePatterns), Enabled: true}, - {ID: string(opspb.CapSourceLocalFollowPaths), Enabled: true}, - {ID: string(opspb.CapSourceLocalExcludePatterns), Enabled: true}, - {ID: string(opspb.CapSourceLocalSharedKeyHint), Enabled: true}, - {ID: string(opspb.CapSourceGit), Enabled: true}, - {ID: string(opspb.CapSourceGitKeepDir), Enabled: true}, - {ID: string(opspb.CapSourceGitFullURL), Enabled: true}, - {ID: string(opspb.CapSourceHTTP), Enabled: true}, - {ID: string(opspb.CapSourceHTTPChecksum), Enabled: true}, - {ID: string(opspb.CapSourceHTTPPerm), Enabled: true}, - {ID: string(opspb.CapSourceHTTPUIDGID), Enabled: true}, - {ID: string(opspb.CapBuildOpLLBFileName), Enabled: true}, - {ID: string(opspb.CapExecMetaBase), Enabled: true}, - {ID: string(opspb.CapExecMetaProxy), Enabled: true}, - {ID: string(opspb.CapExecMountBind), Enabled: true}, - {ID: string(opspb.CapExecMountCache), Enabled: true}, - {ID: string(opspb.CapExecMountCacheSharing), Enabled: true}, - {ID: string(opspb.CapExecMountSelector), Enabled: true}, - {ID: string(opspb.CapExecMountTmpfs), Enabled: true}, - {ID: string(opspb.CapExecMountSecret), Enabled: true}, - {ID: string(opspb.CapConstraints), Enabled: true}, - {ID: string(opspb.CapPlatform), Enabled: true}, - {ID: string(opspb.CapMetaIgnoreCache), Enabled: true}, - {ID: string(opspb.CapMetaDescription), Enabled: true}, - {ID: string(opspb.CapMetaExportCache), Enabled: true}, - } -} - -type grpcClient struct { - client pb.LLBBridgeClient - opts map[string]string - sessionID string - product string - workers []client.WorkerInfo - caps apicaps.CapSet - llbCaps apicaps.CapSet - requests map[string]*pb.SolveRequest -} - -func (c *grpcClient) requestForRef(ref client.Reference) (*pb.SolveRequest, error) { - emptyReq := &pb.SolveRequest{ - Definition: &opspb.Definition{}, - } - if ref == nil { - return emptyReq, nil - } - r, ok := ref.(*reference) - if !ok { - return nil, errors.Errorf("return reference has invalid type %T", ref) - } - if r.id == "" { - return emptyReq, nil - } - req, ok := c.requests[r.id] - if !ok { - return nil, errors.Errorf("did not find request for return reference %s", r.id) - } - return req, nil -} - -func (c *grpcClient) Solve(ctx context.Context, creq client.SolveRequest) (*client.Result, error) { - if creq.Definition != nil { - for _, md := range creq.Definition.Metadata { - for cap := range md.Caps { - if err := c.llbCaps.Supports(cap); err != nil { - return nil, err - } - } - } - } - - req := &pb.SolveRequest{ - Definition: creq.Definition, - Frontend: creq.Frontend, - FrontendOpt: creq.FrontendOpt, - ImportCacheRefs: creq.ImportCacheRefs, - AllowResultReturn: true, - } - - // backwards compatibility with inline return - if c.caps.Supports(pb.CapReturnResult) != nil { - req.ExporterAttr = []byte("{}") - } - - resp, err := c.client.Solve(ctx, req) - if err != nil { - return nil, err - } - - res := &client.Result{} - - if resp.Result == nil { - if id := resp.Ref; id != "" { - c.requests[id] = req - } - res.SetRef(&reference{id: resp.Ref, c: c}) - } else { - res.Metadata = resp.Result.Metadata - switch pbRes := resp.Result.Result.(type) { - case *pb.Result_Ref: - if id := pbRes.Ref; id != "" { - res.SetRef(&reference{id: id, c: c}) - } - case *pb.Result_Refs: - for k, v := range pbRes.Refs.Refs { - ref := &reference{id: v, c: c} - if v == "" { - ref = nil - } - res.AddRef(k, ref) - } - } - } - - return res, nil -} - -func (c *grpcClient) ResolveImageConfig(ctx context.Context, ref string, opt client.ResolveImageConfigOpt) (digest.Digest, []byte, error) { - var p *opspb.Platform - if platform := opt.Platform; platform != nil { - p = &opspb.Platform{ - OS: platform.OS, - Architecture: platform.Architecture, - Variant: platform.Variant, - OSVersion: platform.OSVersion, - OSFeatures: platform.OSFeatures, - } - } - resp, err := c.client.ResolveImageConfig(ctx, &pb.ResolveImageConfigRequest{Ref: ref, Platform: p, ResolveMode: opt.ResolveMode, LogName: opt.LogName}) - if err != nil { - return "", nil, err - } - return resp.Digest, resp.Config, nil -} - -func (c *grpcClient) BuildOpts() client.BuildOpts { - return client.BuildOpts{ - Opts: c.opts, - SessionID: c.sessionID, - Workers: c.workers, - Product: c.product, - LLBCaps: c.llbCaps, - Caps: c.caps, - } -} - -type reference struct { - id string - c *grpcClient -} - -func (r *reference) ReadFile(ctx context.Context, req client.ReadRequest) ([]byte, error) { - rfr := &pb.ReadFileRequest{FilePath: req.Filename, Ref: r.id} - if r := req.Range; r != nil { - rfr.Range = &pb.FileRange{ - Offset: int64(r.Offset), - Length: int64(r.Length), - } - } - resp, err := r.c.client.ReadFile(ctx, rfr) - if err != nil { - return nil, err - } - return resp.Data, nil -} - -func (r *reference) ReadDir(ctx context.Context, req client.ReadDirRequest) ([]*fstypes.Stat, error) { - if err := r.c.caps.Supports(pb.CapReadDir); err != nil { - return nil, err - } - rdr := &pb.ReadDirRequest{ - DirPath: req.Path, - IncludePattern: req.IncludePattern, - Ref: r.id, - } - resp, err := r.c.client.ReadDir(ctx, rdr) - if err != nil { - return nil, err - } - return resp.Entries, nil -} - -func (r *reference) StatFile(ctx context.Context, req client.StatRequest) (*fstypes.Stat, error) { - if err := r.c.caps.Supports(pb.CapStatFile); err != nil { - return nil, err - } - rdr := &pb.StatFileRequest{ - Path: req.Path, - Ref: r.id, - } - resp, err := r.c.client.StatFile(ctx, rdr) - if err != nil { - return nil, err - } - return resp.Stat, nil -} - -func grpcClientConn(ctx context.Context) (context.Context, *grpc.ClientConn, error) { - dialOpt := grpc.WithDialer(func(addr string, d time.Duration) (net.Conn, error) { - return stdioConn(), nil - }) - - cc, err := grpc.DialContext(ctx, "", dialOpt, grpc.WithInsecure()) - if err != nil { - return nil, nil, errors.Wrap(err, "failed to create grpc client") - } - - ctx, cancel := context.WithCancel(ctx) - _ = cancel - // go monitorHealth(ctx, cc, cancel) - - return ctx, cc, nil -} - -func stdioConn() net.Conn { - return &conn{os.Stdin, os.Stdout, os.Stdout} -} - -type conn struct { - io.Reader - io.Writer - io.Closer -} - -func (s *conn) LocalAddr() net.Addr { - return dummyAddr{} -} -func (s *conn) RemoteAddr() net.Addr { - return dummyAddr{} -} -func (s *conn) SetDeadline(t time.Time) error { - return nil -} -func (s *conn) SetReadDeadline(t time.Time) error { - return nil -} -func (s *conn) SetWriteDeadline(t time.Time) error { - return nil -} - -type dummyAddr struct { -} - -func (d dummyAddr) Network() string { - return "pipe" -} - -func (d dummyAddr) String() string { - return "localhost" -} - -func opts() map[string]string { - opts := map[string]string{} - for _, env := range os.Environ() { - parts := strings.SplitN(env, "=", 2) - k := parts[0] - v := "" - if len(parts) == 2 { - v = parts[1] - } - if !strings.HasPrefix(k, frontendPrefix) { - continue - } - parts = strings.SplitN(v, "=", 2) - v = "" - if len(parts) == 2 { - v = parts[1] - } - opts[parts[0]] = v - } - return opts -} - -func sessionID() string { - return os.Getenv("BUILDKIT_SESSION_ID") -} - -func workers() []client.WorkerInfo { - var c []client.WorkerInfo - if err := json.Unmarshal([]byte(os.Getenv("BUILDKIT_WORKERS")), &c); err != nil { - return nil - } - return c -} - -func product() string { - return os.Getenv("BUILDKIT_EXPORTEDPRODUCT") -} diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go b/vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go deleted file mode 100644 index fd05e4c8195e..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go +++ /dev/null @@ -1,87 +0,0 @@ -package moby_buildkit_v1_frontend - -import "github.com/moby/buildkit/util/apicaps" - -var Caps apicaps.CapList - -// Every backwards or forwards non-compatible change needs to add a new capability row. -// By default new capabilities should be experimental. After merge a capability is -// considered immutable. After a capability is marked stable it should not be disabled. - -const ( - CapSolveBase apicaps.CapID = "solve.base" - CapSolveInlineReturn apicaps.CapID = "solve.inlinereturn" - CapResolveImage apicaps.CapID = "resolveimage" - CapResolveImageResolveMode apicaps.CapID = "resolveimage.resolvemode" - CapReadFile apicaps.CapID = "readfile" - CapReturnResult apicaps.CapID = "return" - CapReturnMap apicaps.CapID = "returnmap" - CapReadDir apicaps.CapID = "readdir" - CapStatFile apicaps.CapID = "statfile" -) - -func init() { - - Caps.Init(apicaps.Cap{ - ID: CapSolveBase, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapSolveInlineReturn, - Name: "inline return from solve", - Enabled: true, - Deprecated: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapResolveImage, - Name: "resolve remote image config", - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapResolveImageResolveMode, - Name: "resolve remote image config with custom resolvemode", - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapReadFile, - Name: "read static file", - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapReturnResult, - Name: "return solve result", - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapReturnMap, - Name: "return reference map", - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapReadDir, - Name: "read static directory", - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapStatFile, - Name: "stat a file", - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) -} diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go b/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go deleted file mode 100644 index cf90c8a26e1b..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go +++ /dev/null @@ -1,4219 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: gateway.proto - -/* - Package moby_buildkit_v1_frontend is a generated protocol buffer package. - - It is generated from these files: - gateway.proto - - It has these top-level messages: - Result - RefMap - ReturnRequest - ReturnResponse - ResolveImageConfigRequest - ResolveImageConfigResponse - SolveRequest - SolveResponse - ReadFileRequest - FileRange - ReadFileResponse - ReadDirRequest - ReadDirResponse - StatFileRequest - StatFileResponse - PingRequest - PongResponse -*/ -package moby_buildkit_v1_frontend - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" -import _ "github.com/gogo/protobuf/gogoproto" -import google_rpc "github.com/gogo/googleapis/google/rpc" -import pb "github.com/moby/buildkit/solver/pb" -import moby_buildkit_v1_types "github.com/moby/buildkit/api/types" -import moby_buildkit_v1_apicaps "github.com/moby/buildkit/util/apicaps/pb" -import fsutil_types "github.com/tonistiigi/fsutil/types" - -import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest" - -import context "golang.org/x/net/context" -import grpc "google.golang.org/grpc" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -type Result struct { - // Types that are valid to be assigned to Result: - // *Result_Ref - // *Result_Refs - Result isResult_Result `protobuf_oneof:"result"` - Metadata map[string][]byte `protobuf:"bytes,10,rep,name=metadata" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (m *Result) Reset() { *m = Result{} } -func (m *Result) String() string { return proto.CompactTextString(m) } -func (*Result) ProtoMessage() {} -func (*Result) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{0} } - -type isResult_Result interface { - isResult_Result() - MarshalTo([]byte) (int, error) - Size() int -} - -type Result_Ref struct { - Ref string `protobuf:"bytes,1,opt,name=ref,proto3,oneof"` -} -type Result_Refs struct { - Refs *RefMap `protobuf:"bytes,2,opt,name=refs,oneof"` -} - -func (*Result_Ref) isResult_Result() {} -func (*Result_Refs) isResult_Result() {} - -func (m *Result) GetResult() isResult_Result { - if m != nil { - return m.Result - } - return nil -} - -func (m *Result) GetRef() string { - if x, ok := m.GetResult().(*Result_Ref); ok { - return x.Ref - } - return "" -} - -func (m *Result) GetRefs() *RefMap { - if x, ok := m.GetResult().(*Result_Refs); ok { - return x.Refs - } - return nil -} - -func (m *Result) GetMetadata() map[string][]byte { - if m != nil { - return m.Metadata - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*Result) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _Result_OneofMarshaler, _Result_OneofUnmarshaler, _Result_OneofSizer, []interface{}{ - (*Result_Ref)(nil), - (*Result_Refs)(nil), - } -} - -func _Result_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*Result) - // result - switch x := m.Result.(type) { - case *Result_Ref: - _ = b.EncodeVarint(1<<3 | proto.WireBytes) - _ = b.EncodeStringBytes(x.Ref) - case *Result_Refs: - _ = b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Refs); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("Result.Result has unexpected type %T", x) - } - return nil -} - -func _Result_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*Result) - switch tag { - case 1: // result.ref - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Result = &Result_Ref{x} - return true, err - case 2: // result.refs - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(RefMap) - err := b.DecodeMessage(msg) - m.Result = &Result_Refs{msg} - return true, err - default: - return false, nil - } -} - -func _Result_OneofSizer(msg proto.Message) (n int) { - m := msg.(*Result) - // result - switch x := m.Result.(type) { - case *Result_Ref: - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.Ref))) - n += len(x.Ref) - case *Result_Refs: - s := proto.Size(x.Refs) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type RefMap struct { - Refs map[string]string `protobuf:"bytes,1,rep,name=refs" json:"refs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (m *RefMap) Reset() { *m = RefMap{} } -func (m *RefMap) String() string { return proto.CompactTextString(m) } -func (*RefMap) ProtoMessage() {} -func (*RefMap) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{1} } - -func (m *RefMap) GetRefs() map[string]string { - if m != nil { - return m.Refs - } - return nil -} - -type ReturnRequest struct { - Result *Result `protobuf:"bytes,1,opt,name=result" json:"result,omitempty"` - Error *google_rpc.Status `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"` -} - -func (m *ReturnRequest) Reset() { *m = ReturnRequest{} } -func (m *ReturnRequest) String() string { return proto.CompactTextString(m) } -func (*ReturnRequest) ProtoMessage() {} -func (*ReturnRequest) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{2} } - -func (m *ReturnRequest) GetResult() *Result { - if m != nil { - return m.Result - } - return nil -} - -func (m *ReturnRequest) GetError() *google_rpc.Status { - if m != nil { - return m.Error - } - return nil -} - -type ReturnResponse struct { -} - -func (m *ReturnResponse) Reset() { *m = ReturnResponse{} } -func (m *ReturnResponse) String() string { return proto.CompactTextString(m) } -func (*ReturnResponse) ProtoMessage() {} -func (*ReturnResponse) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{3} } - -type ResolveImageConfigRequest struct { - Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` - Platform *pb.Platform `protobuf:"bytes,2,opt,name=Platform" json:"Platform,omitempty"` - ResolveMode string `protobuf:"bytes,3,opt,name=ResolveMode,proto3" json:"ResolveMode,omitempty"` - LogName string `protobuf:"bytes,4,opt,name=LogName,proto3" json:"LogName,omitempty"` -} - -func (m *ResolveImageConfigRequest) Reset() { *m = ResolveImageConfigRequest{} } -func (m *ResolveImageConfigRequest) String() string { return proto.CompactTextString(m) } -func (*ResolveImageConfigRequest) ProtoMessage() {} -func (*ResolveImageConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{4} } - -func (m *ResolveImageConfigRequest) GetRef() string { - if m != nil { - return m.Ref - } - return "" -} - -func (m *ResolveImageConfigRequest) GetPlatform() *pb.Platform { - if m != nil { - return m.Platform - } - return nil -} - -func (m *ResolveImageConfigRequest) GetResolveMode() string { - if m != nil { - return m.ResolveMode - } - return "" -} - -func (m *ResolveImageConfigRequest) GetLogName() string { - if m != nil { - return m.LogName - } - return "" -} - -type ResolveImageConfigResponse struct { - Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=Digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"Digest"` - Config []byte `protobuf:"bytes,2,opt,name=Config,proto3" json:"Config,omitempty"` -} - -func (m *ResolveImageConfigResponse) Reset() { *m = ResolveImageConfigResponse{} } -func (m *ResolveImageConfigResponse) String() string { return proto.CompactTextString(m) } -func (*ResolveImageConfigResponse) ProtoMessage() {} -func (*ResolveImageConfigResponse) Descriptor() ([]byte, []int) { - return fileDescriptorGateway, []int{5} -} - -func (m *ResolveImageConfigResponse) GetConfig() []byte { - if m != nil { - return m.Config - } - return nil -} - -type SolveRequest struct { - Definition *pb.Definition `protobuf:"bytes,1,opt,name=Definition" json:"Definition,omitempty"` - Frontend string `protobuf:"bytes,2,opt,name=Frontend,proto3" json:"Frontend,omitempty"` - FrontendOpt map[string]string `protobuf:"bytes,3,rep,name=FrontendOpt" json:"FrontendOpt,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - ImportCacheRefs []string `protobuf:"bytes,4,rep,name=ImportCacheRefs" json:"ImportCacheRefs,omitempty"` - AllowResultReturn bool `protobuf:"varint,5,opt,name=allowResultReturn,proto3" json:"allowResultReturn,omitempty"` - // apicaps.CapSolveInlineReturn deprecated - Final bool `protobuf:"varint,10,opt,name=Final,proto3" json:"Final,omitempty"` - ExporterAttr []byte `protobuf:"bytes,11,opt,name=ExporterAttr,proto3" json:"ExporterAttr,omitempty"` -} - -func (m *SolveRequest) Reset() { *m = SolveRequest{} } -func (m *SolveRequest) String() string { return proto.CompactTextString(m) } -func (*SolveRequest) ProtoMessage() {} -func (*SolveRequest) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{6} } - -func (m *SolveRequest) GetDefinition() *pb.Definition { - if m != nil { - return m.Definition - } - return nil -} - -func (m *SolveRequest) GetFrontend() string { - if m != nil { - return m.Frontend - } - return "" -} - -func (m *SolveRequest) GetFrontendOpt() map[string]string { - if m != nil { - return m.FrontendOpt - } - return nil -} - -func (m *SolveRequest) GetImportCacheRefs() []string { - if m != nil { - return m.ImportCacheRefs - } - return nil -} - -func (m *SolveRequest) GetAllowResultReturn() bool { - if m != nil { - return m.AllowResultReturn - } - return false -} - -func (m *SolveRequest) GetFinal() bool { - if m != nil { - return m.Final - } - return false -} - -func (m *SolveRequest) GetExporterAttr() []byte { - if m != nil { - return m.ExporterAttr - } - return nil -} - -type SolveResponse struct { - // deprecated - Ref string `protobuf:"bytes,1,opt,name=ref,proto3" json:"ref,omitempty"` - // these fields are returned when allowMapReturn was set - Result *Result `protobuf:"bytes,3,opt,name=result" json:"result,omitempty"` -} - -func (m *SolveResponse) Reset() { *m = SolveResponse{} } -func (m *SolveResponse) String() string { return proto.CompactTextString(m) } -func (*SolveResponse) ProtoMessage() {} -func (*SolveResponse) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{7} } - -func (m *SolveResponse) GetRef() string { - if m != nil { - return m.Ref - } - return "" -} - -func (m *SolveResponse) GetResult() *Result { - if m != nil { - return m.Result - } - return nil -} - -type ReadFileRequest struct { - Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` - FilePath string `protobuf:"bytes,2,opt,name=FilePath,proto3" json:"FilePath,omitempty"` - Range *FileRange `protobuf:"bytes,3,opt,name=Range" json:"Range,omitempty"` -} - -func (m *ReadFileRequest) Reset() { *m = ReadFileRequest{} } -func (m *ReadFileRequest) String() string { return proto.CompactTextString(m) } -func (*ReadFileRequest) ProtoMessage() {} -func (*ReadFileRequest) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{8} } - -func (m *ReadFileRequest) GetRef() string { - if m != nil { - return m.Ref - } - return "" -} - -func (m *ReadFileRequest) GetFilePath() string { - if m != nil { - return m.FilePath - } - return "" -} - -func (m *ReadFileRequest) GetRange() *FileRange { - if m != nil { - return m.Range - } - return nil -} - -type FileRange struct { - Offset int64 `protobuf:"varint,1,opt,name=Offset,proto3" json:"Offset,omitempty"` - Length int64 `protobuf:"varint,2,opt,name=Length,proto3" json:"Length,omitempty"` -} - -func (m *FileRange) Reset() { *m = FileRange{} } -func (m *FileRange) String() string { return proto.CompactTextString(m) } -func (*FileRange) ProtoMessage() {} -func (*FileRange) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{9} } - -func (m *FileRange) GetOffset() int64 { - if m != nil { - return m.Offset - } - return 0 -} - -func (m *FileRange) GetLength() int64 { - if m != nil { - return m.Length - } - return 0 -} - -type ReadFileResponse struct { - Data []byte `protobuf:"bytes,1,opt,name=Data,proto3" json:"Data,omitempty"` -} - -func (m *ReadFileResponse) Reset() { *m = ReadFileResponse{} } -func (m *ReadFileResponse) String() string { return proto.CompactTextString(m) } -func (*ReadFileResponse) ProtoMessage() {} -func (*ReadFileResponse) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{10} } - -func (m *ReadFileResponse) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -type ReadDirRequest struct { - Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` - DirPath string `protobuf:"bytes,2,opt,name=DirPath,proto3" json:"DirPath,omitempty"` - IncludePattern string `protobuf:"bytes,3,opt,name=IncludePattern,proto3" json:"IncludePattern,omitempty"` -} - -func (m *ReadDirRequest) Reset() { *m = ReadDirRequest{} } -func (m *ReadDirRequest) String() string { return proto.CompactTextString(m) } -func (*ReadDirRequest) ProtoMessage() {} -func (*ReadDirRequest) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{11} } - -func (m *ReadDirRequest) GetRef() string { - if m != nil { - return m.Ref - } - return "" -} - -func (m *ReadDirRequest) GetDirPath() string { - if m != nil { - return m.DirPath - } - return "" -} - -func (m *ReadDirRequest) GetIncludePattern() string { - if m != nil { - return m.IncludePattern - } - return "" -} - -type ReadDirResponse struct { - Entries []*fsutil_types.Stat `protobuf:"bytes,1,rep,name=entries" json:"entries,omitempty"` -} - -func (m *ReadDirResponse) Reset() { *m = ReadDirResponse{} } -func (m *ReadDirResponse) String() string { return proto.CompactTextString(m) } -func (*ReadDirResponse) ProtoMessage() {} -func (*ReadDirResponse) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{12} } - -func (m *ReadDirResponse) GetEntries() []*fsutil_types.Stat { - if m != nil { - return m.Entries - } - return nil -} - -type StatFileRequest struct { - Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` - Path string `protobuf:"bytes,2,opt,name=Path,proto3" json:"Path,omitempty"` -} - -func (m *StatFileRequest) Reset() { *m = StatFileRequest{} } -func (m *StatFileRequest) String() string { return proto.CompactTextString(m) } -func (*StatFileRequest) ProtoMessage() {} -func (*StatFileRequest) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{13} } - -func (m *StatFileRequest) GetRef() string { - if m != nil { - return m.Ref - } - return "" -} - -func (m *StatFileRequest) GetPath() string { - if m != nil { - return m.Path - } - return "" -} - -type StatFileResponse struct { - Stat *fsutil_types.Stat `protobuf:"bytes,1,opt,name=stat" json:"stat,omitempty"` -} - -func (m *StatFileResponse) Reset() { *m = StatFileResponse{} } -func (m *StatFileResponse) String() string { return proto.CompactTextString(m) } -func (*StatFileResponse) ProtoMessage() {} -func (*StatFileResponse) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{14} } - -func (m *StatFileResponse) GetStat() *fsutil_types.Stat { - if m != nil { - return m.Stat - } - return nil -} - -type PingRequest struct { -} - -func (m *PingRequest) Reset() { *m = PingRequest{} } -func (m *PingRequest) String() string { return proto.CompactTextString(m) } -func (*PingRequest) ProtoMessage() {} -func (*PingRequest) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{15} } - -type PongResponse struct { - FrontendAPICaps []moby_buildkit_v1_apicaps.APICap `protobuf:"bytes,1,rep,name=FrontendAPICaps" json:"FrontendAPICaps"` - LLBCaps []moby_buildkit_v1_apicaps.APICap `protobuf:"bytes,2,rep,name=LLBCaps" json:"LLBCaps"` - Workers []*moby_buildkit_v1_types.WorkerRecord `protobuf:"bytes,3,rep,name=Workers" json:"Workers,omitempty"` -} - -func (m *PongResponse) Reset() { *m = PongResponse{} } -func (m *PongResponse) String() string { return proto.CompactTextString(m) } -func (*PongResponse) ProtoMessage() {} -func (*PongResponse) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{16} } - -func (m *PongResponse) GetFrontendAPICaps() []moby_buildkit_v1_apicaps.APICap { - if m != nil { - return m.FrontendAPICaps - } - return nil -} - -func (m *PongResponse) GetLLBCaps() []moby_buildkit_v1_apicaps.APICap { - if m != nil { - return m.LLBCaps - } - return nil -} - -func (m *PongResponse) GetWorkers() []*moby_buildkit_v1_types.WorkerRecord { - if m != nil { - return m.Workers - } - return nil -} - -func init() { - proto.RegisterType((*Result)(nil), "moby.buildkit.v1.frontend.Result") - proto.RegisterType((*RefMap)(nil), "moby.buildkit.v1.frontend.RefMap") - proto.RegisterType((*ReturnRequest)(nil), "moby.buildkit.v1.frontend.ReturnRequest") - proto.RegisterType((*ReturnResponse)(nil), "moby.buildkit.v1.frontend.ReturnResponse") - proto.RegisterType((*ResolveImageConfigRequest)(nil), "moby.buildkit.v1.frontend.ResolveImageConfigRequest") - proto.RegisterType((*ResolveImageConfigResponse)(nil), "moby.buildkit.v1.frontend.ResolveImageConfigResponse") - proto.RegisterType((*SolveRequest)(nil), "moby.buildkit.v1.frontend.SolveRequest") - proto.RegisterType((*SolveResponse)(nil), "moby.buildkit.v1.frontend.SolveResponse") - proto.RegisterType((*ReadFileRequest)(nil), "moby.buildkit.v1.frontend.ReadFileRequest") - proto.RegisterType((*FileRange)(nil), "moby.buildkit.v1.frontend.FileRange") - proto.RegisterType((*ReadFileResponse)(nil), "moby.buildkit.v1.frontend.ReadFileResponse") - proto.RegisterType((*ReadDirRequest)(nil), "moby.buildkit.v1.frontend.ReadDirRequest") - proto.RegisterType((*ReadDirResponse)(nil), "moby.buildkit.v1.frontend.ReadDirResponse") - proto.RegisterType((*StatFileRequest)(nil), "moby.buildkit.v1.frontend.StatFileRequest") - proto.RegisterType((*StatFileResponse)(nil), "moby.buildkit.v1.frontend.StatFileResponse") - proto.RegisterType((*PingRequest)(nil), "moby.buildkit.v1.frontend.PingRequest") - proto.RegisterType((*PongResponse)(nil), "moby.buildkit.v1.frontend.PongResponse") -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for LLBBridge service - -type LLBBridgeClient interface { - // apicaps:CapResolveImage - ResolveImageConfig(ctx context.Context, in *ResolveImageConfigRequest, opts ...grpc.CallOption) (*ResolveImageConfigResponse, error) - // apicaps:CapSolveBase - Solve(ctx context.Context, in *SolveRequest, opts ...grpc.CallOption) (*SolveResponse, error) - // apicaps:CapReadFile - ReadFile(ctx context.Context, in *ReadFileRequest, opts ...grpc.CallOption) (*ReadFileResponse, error) - // apicaps:CapReadDir - ReadDir(ctx context.Context, in *ReadDirRequest, opts ...grpc.CallOption) (*ReadDirResponse, error) - // apicaps:CapStatFile - StatFile(ctx context.Context, in *StatFileRequest, opts ...grpc.CallOption) (*StatFileResponse, error) - Ping(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PongResponse, error) - Return(ctx context.Context, in *ReturnRequest, opts ...grpc.CallOption) (*ReturnResponse, error) -} - -type lLBBridgeClient struct { - cc *grpc.ClientConn -} - -func NewLLBBridgeClient(cc *grpc.ClientConn) LLBBridgeClient { - return &lLBBridgeClient{cc} -} - -func (c *lLBBridgeClient) ResolveImageConfig(ctx context.Context, in *ResolveImageConfigRequest, opts ...grpc.CallOption) (*ResolveImageConfigResponse, error) { - out := new(ResolveImageConfigResponse) - err := grpc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/ResolveImageConfig", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lLBBridgeClient) Solve(ctx context.Context, in *SolveRequest, opts ...grpc.CallOption) (*SolveResponse, error) { - out := new(SolveResponse) - err := grpc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/Solve", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lLBBridgeClient) ReadFile(ctx context.Context, in *ReadFileRequest, opts ...grpc.CallOption) (*ReadFileResponse, error) { - out := new(ReadFileResponse) - err := grpc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/ReadFile", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lLBBridgeClient) ReadDir(ctx context.Context, in *ReadDirRequest, opts ...grpc.CallOption) (*ReadDirResponse, error) { - out := new(ReadDirResponse) - err := grpc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/ReadDir", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lLBBridgeClient) StatFile(ctx context.Context, in *StatFileRequest, opts ...grpc.CallOption) (*StatFileResponse, error) { - out := new(StatFileResponse) - err := grpc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/StatFile", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lLBBridgeClient) Ping(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PongResponse, error) { - out := new(PongResponse) - err := grpc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/Ping", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lLBBridgeClient) Return(ctx context.Context, in *ReturnRequest, opts ...grpc.CallOption) (*ReturnResponse, error) { - out := new(ReturnResponse) - err := grpc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/Return", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for LLBBridge service - -type LLBBridgeServer interface { - // apicaps:CapResolveImage - ResolveImageConfig(context.Context, *ResolveImageConfigRequest) (*ResolveImageConfigResponse, error) - // apicaps:CapSolveBase - Solve(context.Context, *SolveRequest) (*SolveResponse, error) - // apicaps:CapReadFile - ReadFile(context.Context, *ReadFileRequest) (*ReadFileResponse, error) - // apicaps:CapReadDir - ReadDir(context.Context, *ReadDirRequest) (*ReadDirResponse, error) - // apicaps:CapStatFile - StatFile(context.Context, *StatFileRequest) (*StatFileResponse, error) - Ping(context.Context, *PingRequest) (*PongResponse, error) - Return(context.Context, *ReturnRequest) (*ReturnResponse, error) -} - -func RegisterLLBBridgeServer(s *grpc.Server, srv LLBBridgeServer) { - s.RegisterService(&_LLBBridge_serviceDesc, srv) -} - -func _LLBBridge_ResolveImageConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ResolveImageConfigRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LLBBridgeServer).ResolveImageConfig(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/ResolveImageConfig", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LLBBridgeServer).ResolveImageConfig(ctx, req.(*ResolveImageConfigRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _LLBBridge_Solve_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SolveRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LLBBridgeServer).Solve(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/Solve", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LLBBridgeServer).Solve(ctx, req.(*SolveRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _LLBBridge_ReadFile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ReadFileRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LLBBridgeServer).ReadFile(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/ReadFile", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LLBBridgeServer).ReadFile(ctx, req.(*ReadFileRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _LLBBridge_ReadDir_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ReadDirRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LLBBridgeServer).ReadDir(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/ReadDir", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LLBBridgeServer).ReadDir(ctx, req.(*ReadDirRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _LLBBridge_StatFile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(StatFileRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LLBBridgeServer).StatFile(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/StatFile", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LLBBridgeServer).StatFile(ctx, req.(*StatFileRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _LLBBridge_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PingRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LLBBridgeServer).Ping(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/Ping", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LLBBridgeServer).Ping(ctx, req.(*PingRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _LLBBridge_Return_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ReturnRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LLBBridgeServer).Return(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/Return", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LLBBridgeServer).Return(ctx, req.(*ReturnRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _LLBBridge_serviceDesc = grpc.ServiceDesc{ - ServiceName: "moby.buildkit.v1.frontend.LLBBridge", - HandlerType: (*LLBBridgeServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "ResolveImageConfig", - Handler: _LLBBridge_ResolveImageConfig_Handler, - }, - { - MethodName: "Solve", - Handler: _LLBBridge_Solve_Handler, - }, - { - MethodName: "ReadFile", - Handler: _LLBBridge_ReadFile_Handler, - }, - { - MethodName: "ReadDir", - Handler: _LLBBridge_ReadDir_Handler, - }, - { - MethodName: "StatFile", - Handler: _LLBBridge_StatFile_Handler, - }, - { - MethodName: "Ping", - Handler: _LLBBridge_Ping_Handler, - }, - { - MethodName: "Return", - Handler: _LLBBridge_Return_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "gateway.proto", -} - -func (m *Result) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Result) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Result != nil { - nn1, err := m.Result.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += nn1 - } - if len(m.Metadata) > 0 { - for k, _ := range m.Metadata { - dAtA[i] = 0x52 - i++ - v := m.Metadata[k] - byteSize := 0 - if len(v) > 0 { - byteSize = 1 + len(v) + sovGateway(uint64(len(v))) - } - mapSize := 1 + len(k) + sovGateway(uint64(len(k))) + byteSize - i = encodeVarintGateway(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGateway(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - if len(v) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintGateway(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - } - return i, nil -} - -func (m *Result_Ref) MarshalTo(dAtA []byte) (int, error) { - i := 0 - dAtA[i] = 0xa - i++ - i = encodeVarintGateway(dAtA, i, uint64(len(m.Ref))) - i += copy(dAtA[i:], m.Ref) - return i, nil -} -func (m *Result_Refs) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.Refs != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGateway(dAtA, i, uint64(m.Refs.Size())) - n2, err := m.Refs.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - } - return i, nil -} -func (m *RefMap) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RefMap) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Refs) > 0 { - for k, _ := range m.Refs { - dAtA[i] = 0xa - i++ - v := m.Refs[k] - mapSize := 1 + len(k) + sovGateway(uint64(len(k))) + 1 + len(v) + sovGateway(uint64(len(v))) - i = encodeVarintGateway(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGateway(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGateway(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - return i, nil -} - -func (m *ReturnRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReturnRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Result != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGateway(dAtA, i, uint64(m.Result.Size())) - n3, err := m.Result.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - } - if m.Error != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGateway(dAtA, i, uint64(m.Error.Size())) - n4, err := m.Error.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - } - return i, nil -} - -func (m *ReturnResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReturnResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - return i, nil -} - -func (m *ResolveImageConfigRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResolveImageConfigRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Ref) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintGateway(dAtA, i, uint64(len(m.Ref))) - i += copy(dAtA[i:], m.Ref) - } - if m.Platform != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGateway(dAtA, i, uint64(m.Platform.Size())) - n5, err := m.Platform.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - } - if len(m.ResolveMode) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintGateway(dAtA, i, uint64(len(m.ResolveMode))) - i += copy(dAtA[i:], m.ResolveMode) - } - if len(m.LogName) > 0 { - dAtA[i] = 0x22 - i++ - i = encodeVarintGateway(dAtA, i, uint64(len(m.LogName))) - i += copy(dAtA[i:], m.LogName) - } - return i, nil -} - -func (m *ResolveImageConfigResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResolveImageConfigResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Digest) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintGateway(dAtA, i, uint64(len(m.Digest))) - i += copy(dAtA[i:], m.Digest) - } - if len(m.Config) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintGateway(dAtA, i, uint64(len(m.Config))) - i += copy(dAtA[i:], m.Config) - } - return i, nil -} - -func (m *SolveRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SolveRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Definition != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGateway(dAtA, i, uint64(m.Definition.Size())) - n6, err := m.Definition.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - } - if len(m.Frontend) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintGateway(dAtA, i, uint64(len(m.Frontend))) - i += copy(dAtA[i:], m.Frontend) - } - if len(m.FrontendOpt) > 0 { - for k, _ := range m.FrontendOpt { - dAtA[i] = 0x1a - i++ - v := m.FrontendOpt[k] - mapSize := 1 + len(k) + sovGateway(uint64(len(k))) + 1 + len(v) + sovGateway(uint64(len(v))) - i = encodeVarintGateway(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGateway(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGateway(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if len(m.ImportCacheRefs) > 0 { - for _, s := range m.ImportCacheRefs { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if m.AllowResultReturn { - dAtA[i] = 0x28 - i++ - if m.AllowResultReturn { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.Final { - dAtA[i] = 0x50 - i++ - if m.Final { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if len(m.ExporterAttr) > 0 { - dAtA[i] = 0x5a - i++ - i = encodeVarintGateway(dAtA, i, uint64(len(m.ExporterAttr))) - i += copy(dAtA[i:], m.ExporterAttr) - } - return i, nil -} - -func (m *SolveResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SolveResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Ref) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintGateway(dAtA, i, uint64(len(m.Ref))) - i += copy(dAtA[i:], m.Ref) - } - if m.Result != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGateway(dAtA, i, uint64(m.Result.Size())) - n7, err := m.Result.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - } - return i, nil -} - -func (m *ReadFileRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReadFileRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Ref) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintGateway(dAtA, i, uint64(len(m.Ref))) - i += copy(dAtA[i:], m.Ref) - } - if len(m.FilePath) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintGateway(dAtA, i, uint64(len(m.FilePath))) - i += copy(dAtA[i:], m.FilePath) - } - if m.Range != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGateway(dAtA, i, uint64(m.Range.Size())) - n8, err := m.Range.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - } - return i, nil -} - -func (m *FileRange) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *FileRange) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Offset != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintGateway(dAtA, i, uint64(m.Offset)) - } - if m.Length != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintGateway(dAtA, i, uint64(m.Length)) - } - return i, nil -} - -func (m *ReadFileResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReadFileResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Data) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintGateway(dAtA, i, uint64(len(m.Data))) - i += copy(dAtA[i:], m.Data) - } - return i, nil -} - -func (m *ReadDirRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReadDirRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Ref) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintGateway(dAtA, i, uint64(len(m.Ref))) - i += copy(dAtA[i:], m.Ref) - } - if len(m.DirPath) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintGateway(dAtA, i, uint64(len(m.DirPath))) - i += copy(dAtA[i:], m.DirPath) - } - if len(m.IncludePattern) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintGateway(dAtA, i, uint64(len(m.IncludePattern))) - i += copy(dAtA[i:], m.IncludePattern) - } - return i, nil -} - -func (m *ReadDirResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReadDirResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Entries) > 0 { - for _, msg := range m.Entries { - dAtA[i] = 0xa - i++ - i = encodeVarintGateway(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *StatFileRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatFileRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Ref) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintGateway(dAtA, i, uint64(len(m.Ref))) - i += copy(dAtA[i:], m.Ref) - } - if len(m.Path) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintGateway(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - } - return i, nil -} - -func (m *StatFileResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatFileResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Stat != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGateway(dAtA, i, uint64(m.Stat.Size())) - n9, err := m.Stat.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - } - return i, nil -} - -func (m *PingRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PingRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - return i, nil -} - -func (m *PongResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PongResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.FrontendAPICaps) > 0 { - for _, msg := range m.FrontendAPICaps { - dAtA[i] = 0xa - i++ - i = encodeVarintGateway(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.LLBCaps) > 0 { - for _, msg := range m.LLBCaps { - dAtA[i] = 0x12 - i++ - i = encodeVarintGateway(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Workers) > 0 { - for _, msg := range m.Workers { - dAtA[i] = 0x1a - i++ - i = encodeVarintGateway(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func encodeVarintGateway(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *Result) Size() (n int) { - var l int - _ = l - if m.Result != nil { - n += m.Result.Size() - } - if len(m.Metadata) > 0 { - for k, v := range m.Metadata { - _ = k - _ = v - l = 0 - if len(v) > 0 { - l = 1 + len(v) + sovGateway(uint64(len(v))) - } - mapEntrySize := 1 + len(k) + sovGateway(uint64(len(k))) + l - n += mapEntrySize + 1 + sovGateway(uint64(mapEntrySize)) - } - } - return n -} - -func (m *Result_Ref) Size() (n int) { - var l int - _ = l - l = len(m.Ref) - n += 1 + l + sovGateway(uint64(l)) - return n -} -func (m *Result_Refs) Size() (n int) { - var l int - _ = l - if m.Refs != nil { - l = m.Refs.Size() - n += 1 + l + sovGateway(uint64(l)) - } - return n -} -func (m *RefMap) Size() (n int) { - var l int - _ = l - if len(m.Refs) > 0 { - for k, v := range m.Refs { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGateway(uint64(len(k))) + 1 + len(v) + sovGateway(uint64(len(v))) - n += mapEntrySize + 1 + sovGateway(uint64(mapEntrySize)) - } - } - return n -} - -func (m *ReturnRequest) Size() (n int) { - var l int - _ = l - if m.Result != nil { - l = m.Result.Size() - n += 1 + l + sovGateway(uint64(l)) - } - if m.Error != nil { - l = m.Error.Size() - n += 1 + l + sovGateway(uint64(l)) - } - return n -} - -func (m *ReturnResponse) Size() (n int) { - var l int - _ = l - return n -} - -func (m *ResolveImageConfigRequest) Size() (n int) { - var l int - _ = l - l = len(m.Ref) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - if m.Platform != nil { - l = m.Platform.Size() - n += 1 + l + sovGateway(uint64(l)) - } - l = len(m.ResolveMode) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - l = len(m.LogName) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - return n -} - -func (m *ResolveImageConfigResponse) Size() (n int) { - var l int - _ = l - l = len(m.Digest) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - l = len(m.Config) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - return n -} - -func (m *SolveRequest) Size() (n int) { - var l int - _ = l - if m.Definition != nil { - l = m.Definition.Size() - n += 1 + l + sovGateway(uint64(l)) - } - l = len(m.Frontend) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - if len(m.FrontendOpt) > 0 { - for k, v := range m.FrontendOpt { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGateway(uint64(len(k))) + 1 + len(v) + sovGateway(uint64(len(v))) - n += mapEntrySize + 1 + sovGateway(uint64(mapEntrySize)) - } - } - if len(m.ImportCacheRefs) > 0 { - for _, s := range m.ImportCacheRefs { - l = len(s) - n += 1 + l + sovGateway(uint64(l)) - } - } - if m.AllowResultReturn { - n += 2 - } - if m.Final { - n += 2 - } - l = len(m.ExporterAttr) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - return n -} - -func (m *SolveResponse) Size() (n int) { - var l int - _ = l - l = len(m.Ref) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - if m.Result != nil { - l = m.Result.Size() - n += 1 + l + sovGateway(uint64(l)) - } - return n -} - -func (m *ReadFileRequest) Size() (n int) { - var l int - _ = l - l = len(m.Ref) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - l = len(m.FilePath) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - if m.Range != nil { - l = m.Range.Size() - n += 1 + l + sovGateway(uint64(l)) - } - return n -} - -func (m *FileRange) Size() (n int) { - var l int - _ = l - if m.Offset != 0 { - n += 1 + sovGateway(uint64(m.Offset)) - } - if m.Length != 0 { - n += 1 + sovGateway(uint64(m.Length)) - } - return n -} - -func (m *ReadFileResponse) Size() (n int) { - var l int - _ = l - l = len(m.Data) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - return n -} - -func (m *ReadDirRequest) Size() (n int) { - var l int - _ = l - l = len(m.Ref) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - l = len(m.DirPath) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - l = len(m.IncludePattern) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - return n -} - -func (m *ReadDirResponse) Size() (n int) { - var l int - _ = l - if len(m.Entries) > 0 { - for _, e := range m.Entries { - l = e.Size() - n += 1 + l + sovGateway(uint64(l)) - } - } - return n -} - -func (m *StatFileRequest) Size() (n int) { - var l int - _ = l - l = len(m.Ref) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - l = len(m.Path) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - return n -} - -func (m *StatFileResponse) Size() (n int) { - var l int - _ = l - if m.Stat != nil { - l = m.Stat.Size() - n += 1 + l + sovGateway(uint64(l)) - } - return n -} - -func (m *PingRequest) Size() (n int) { - var l int - _ = l - return n -} - -func (m *PongResponse) Size() (n int) { - var l int - _ = l - if len(m.FrontendAPICaps) > 0 { - for _, e := range m.FrontendAPICaps { - l = e.Size() - n += 1 + l + sovGateway(uint64(l)) - } - } - if len(m.LLBCaps) > 0 { - for _, e := range m.LLBCaps { - l = e.Size() - n += 1 + l + sovGateway(uint64(l)) - } - } - if len(m.Workers) > 0 { - for _, e := range m.Workers { - l = e.Size() - n += 1 + l + sovGateway(uint64(l)) - } - } - return n -} - -func sovGateway(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGateway(x uint64) (n int) { - return sovGateway(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Result) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Result: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Result: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Result = &Result_Ref{string(dAtA[iNdEx:postIndex])} - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Refs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &RefMap{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Result = &Result_Refs{v} - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Metadata == nil { - m.Metadata = make(map[string][]byte) - } - var mapkey string - mapvalue := []byte{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGateway - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapbyteLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapbyteLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intMapbyteLen := int(mapbyteLen) - if intMapbyteLen < 0 { - return ErrInvalidLengthGateway - } - postbytesIndex := iNdEx + intMapbyteLen - if postbytesIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = make([]byte, mapbyteLen) - copy(mapvalue, dAtA[iNdEx:postbytesIndex]) - iNdEx = postbytesIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Metadata[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RefMap) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RefMap: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RefMap: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Refs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Refs == nil { - m.Refs = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGateway - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGateway - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Refs[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReturnRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReturnRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReturnRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Result == nil { - m.Result = &Result{} - } - if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Error == nil { - m.Error = &google_rpc.Status{} - } - if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReturnResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReturnResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReturnResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResolveImageConfigRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResolveImageConfigRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResolveImageConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ref = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Platform", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Platform == nil { - m.Platform = &pb.Platform{} - } - if err := m.Platform.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResolveMode", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResolveMode = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LogName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LogName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResolveImageConfigResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResolveImageConfigResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResolveImageConfigResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Config = append(m.Config[:0], dAtA[iNdEx:postIndex]...) - if m.Config == nil { - m.Config = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SolveRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SolveRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SolveRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Definition", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Definition == nil { - m.Definition = &pb.Definition{} - } - if err := m.Definition.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Frontend", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Frontend = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FrontendOpt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.FrontendOpt == nil { - m.FrontendOpt = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGateway - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGateway - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.FrontendOpt[mapkey] = mapvalue - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImportCacheRefs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ImportCacheRefs = append(m.ImportCacheRefs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowResultReturn", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.AllowResultReturn = bool(v != 0) - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Final", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Final = bool(v != 0) - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExporterAttr", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExporterAttr = append(m.ExporterAttr[:0], dAtA[iNdEx:postIndex]...) - if m.ExporterAttr == nil { - m.ExporterAttr = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SolveResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SolveResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SolveResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ref = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Result == nil { - m.Result = &Result{} - } - if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReadFileRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReadFileRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReadFileRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ref = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FilePath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FilePath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Range", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Range == nil { - m.Range = &FileRange{} - } - if err := m.Range.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *FileRange) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: FileRange: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: FileRange: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) - } - m.Offset = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Offset |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Length", wireType) - } - m.Length = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Length |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReadFileResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReadFileResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReadFileResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReadDirRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReadDirRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReadDirRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ref = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DirPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DirPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IncludePattern", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IncludePattern = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReadDirResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReadDirResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReadDirResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Entries = append(m.Entries, &fsutil_types.Stat{}) - if err := m.Entries[len(m.Entries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatFileRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatFileRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatFileRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ref = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatFileResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatFileResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatFileResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stat", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Stat == nil { - m.Stat = &fsutil_types.Stat{} - } - if err := m.Stat.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PingRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PingRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PingRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PongResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PongResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PongResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FrontendAPICaps", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FrontendAPICaps = append(m.FrontendAPICaps, moby_buildkit_v1_apicaps.APICap{}) - if err := m.FrontendAPICaps[len(m.FrontendAPICaps)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LLBCaps", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LLBCaps = append(m.LLBCaps, moby_buildkit_v1_apicaps.APICap{}) - if err := m.LLBCaps[len(m.LLBCaps)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Workers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Workers = append(m.Workers, &moby_buildkit_v1_types.WorkerRecord{}) - if err := m.Workers[len(m.Workers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGateway(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGateway - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGateway - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGateway - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGateway - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGateway - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGateway(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGateway = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGateway = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("gateway.proto", fileDescriptorGateway) } - -var fileDescriptorGateway = []byte{ - // 1144 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x56, 0xcf, 0x4f, 0x1b, 0xc7, - 0x17, 0x67, 0xb1, 0x8d, 0xed, 0x67, 0x03, 0xfe, 0x8e, 0xbe, 0xaa, 0x36, 0x7b, 0x20, 0xee, 0xaa, - 0xa2, 0x0e, 0x21, 0xbb, 0x2a, 0x69, 0x45, 0x4a, 0xa4, 0xa4, 0x31, 0x04, 0x85, 0xd6, 0x34, 0xd6, - 0xe4, 0x10, 0x29, 0x6a, 0xa5, 0xae, 0xed, 0xf1, 0x32, 0x62, 0xbd, 0xb3, 0x9d, 0x1d, 0x43, 0x51, - 0x2f, 0x6d, 0x4f, 0xbd, 0xf7, 0x9f, 0xca, 0xad, 0x3d, 0xf7, 0x10, 0x55, 0xdc, 0xfa, 0x5f, 0x54, - 0xf3, 0x63, 0xed, 0xc5, 0x80, 0x81, 0xd3, 0xce, 0x9b, 0x79, 0x9f, 0xf7, 0x3e, 0x6f, 0xde, 0x8f, - 0x59, 0x58, 0x0e, 0x03, 0x41, 0x4e, 0x83, 0x33, 0x2f, 0xe1, 0x4c, 0x30, 0x74, 0x6f, 0xc4, 0x7a, - 0x67, 0x5e, 0x6f, 0x4c, 0xa3, 0xc1, 0x31, 0x15, 0xde, 0xc9, 0x67, 0xde, 0x90, 0xb3, 0x58, 0x90, - 0x78, 0xe0, 0x3c, 0x0a, 0xa9, 0x38, 0x1a, 0xf7, 0xbc, 0x3e, 0x1b, 0xf9, 0x21, 0x0b, 0x99, 0xaf, - 0x10, 0xbd, 0xf1, 0x50, 0x49, 0x4a, 0x50, 0x2b, 0x6d, 0xc9, 0xd9, 0x9a, 0x55, 0x0f, 0x19, 0x0b, - 0x23, 0x12, 0x24, 0x34, 0x35, 0x4b, 0x9f, 0x27, 0x7d, 0x3f, 0x15, 0x81, 0x18, 0xa7, 0x06, 0xb3, - 0x99, 0xc3, 0x48, 0x22, 0x7e, 0x46, 0xc4, 0x4f, 0x59, 0x74, 0x42, 0xb8, 0x9f, 0xf4, 0x7c, 0x96, - 0x64, 0xda, 0xfe, 0xb5, 0xda, 0x41, 0x42, 0x7d, 0x71, 0x96, 0x90, 0xd4, 0x3f, 0x65, 0xfc, 0x98, - 0x70, 0x03, 0x78, 0x7c, 0x2d, 0x60, 0x2c, 0x68, 0x24, 0x51, 0xfd, 0x20, 0x49, 0xa5, 0x13, 0xf9, - 0x35, 0xa0, 0x7c, 0xd8, 0x82, 0xc5, 0x34, 0x15, 0x94, 0x86, 0xd4, 0x1f, 0xa6, 0x0a, 0xa3, 0xbd, - 0xc8, 0x20, 0xb4, 0xba, 0xfb, 0xaf, 0x05, 0x4b, 0x98, 0xa4, 0xe3, 0x48, 0x20, 0x04, 0x05, 0x4e, - 0x86, 0xb6, 0xd5, 0xb4, 0x5a, 0xd5, 0x57, 0x0b, 0x58, 0x0a, 0x68, 0x1b, 0x8a, 0x9c, 0x0c, 0x53, - 0x7b, 0xb1, 0x69, 0xb5, 0x6a, 0x5b, 0x1f, 0x7b, 0xd7, 0x5e, 0xb7, 0x87, 0xc9, 0xf0, 0x30, 0x48, - 0x5e, 0x2d, 0x60, 0x05, 0x40, 0xdf, 0x40, 0x65, 0x44, 0x44, 0x30, 0x08, 0x44, 0x60, 0x43, 0xb3, - 0xd0, 0xaa, 0x6d, 0xf9, 0x73, 0xc1, 0x92, 0x81, 0x77, 0x68, 0x10, 0x2f, 0x63, 0xc1, 0xcf, 0xf0, - 0xc4, 0x80, 0xf3, 0x14, 0x96, 0x2f, 0x1c, 0xa1, 0x06, 0x14, 0x8e, 0xc9, 0x99, 0xa6, 0x8a, 0xe5, - 0x12, 0xfd, 0x1f, 0x4a, 0x27, 0x41, 0x34, 0x26, 0x8a, 0x69, 0x1d, 0x6b, 0x61, 0x67, 0xf1, 0x89, - 0xd5, 0xae, 0xc0, 0x12, 0x57, 0xe6, 0xdd, 0xdf, 0x54, 0xac, 0x92, 0x26, 0x7a, 0x6e, 0xe2, 0xb2, - 0x14, 0xb5, 0x87, 0x37, 0xc6, 0x25, 0x3f, 0xa9, 0xa6, 0xa5, 0x80, 0xce, 0x36, 0x54, 0x27, 0x5b, - 0x37, 0xd1, 0xa9, 0xe6, 0xe8, 0xb8, 0x02, 0x96, 0x31, 0x11, 0x63, 0x1e, 0x63, 0xf2, 0xe3, 0x98, - 0xa4, 0x02, 0x7d, 0x99, 0xf1, 0x53, 0xf8, 0x9b, 0x2e, 0x59, 0x2a, 0x62, 0x03, 0x40, 0x2d, 0x28, - 0x11, 0xce, 0x19, 0x37, 0xe9, 0x41, 0x9e, 0x2e, 0x54, 0x8f, 0x27, 0x7d, 0xef, 0x8d, 0x2a, 0x54, - 0xac, 0x15, 0xdc, 0x06, 0xac, 0x64, 0x5e, 0xd3, 0x84, 0xc5, 0x29, 0x71, 0xff, 0xb0, 0xe0, 0x1e, - 0x26, 0xaa, 0x4e, 0x0f, 0x46, 0x41, 0x48, 0x76, 0x59, 0x3c, 0xa4, 0x61, 0x46, 0xaa, 0x01, 0x05, - 0x9c, 0xd5, 0x02, 0x96, 0x4b, 0xd4, 0x82, 0x4a, 0x37, 0x0a, 0xc4, 0x90, 0xf1, 0x91, 0x71, 0x57, - 0xf7, 0x92, 0x9e, 0x97, 0xed, 0xe1, 0xc9, 0x29, 0x6a, 0x42, 0xcd, 0x18, 0x3e, 0x64, 0x03, 0x62, - 0x17, 0x94, 0x8d, 0xfc, 0x16, 0xb2, 0xa1, 0xdc, 0x61, 0xe1, 0xb7, 0xc1, 0x88, 0xd8, 0x45, 0x75, - 0x9a, 0x89, 0xee, 0x2f, 0x16, 0x38, 0x57, 0xb1, 0xd2, 0xa4, 0xd1, 0xd7, 0xb0, 0xb4, 0x47, 0x43, - 0x92, 0xea, 0xbb, 0xaa, 0xb6, 0xb7, 0xde, 0x7f, 0xb8, 0xbf, 0xf0, 0xf7, 0x87, 0xfb, 0x1b, 0xb9, - 0xa2, 0x67, 0x09, 0x89, 0xfb, 0x2c, 0x16, 0x01, 0x8d, 0x09, 0x97, 0xbd, 0xfb, 0x68, 0xa0, 0x20, - 0x9e, 0x46, 0x62, 0x63, 0x01, 0x7d, 0x04, 0x4b, 0xda, 0xba, 0x29, 0x19, 0x23, 0xb9, 0xbf, 0x17, - 0xa0, 0xfe, 0x46, 0x12, 0xc8, 0xee, 0xc2, 0x03, 0xd8, 0x23, 0x43, 0x1a, 0x53, 0x41, 0x59, 0x6c, - 0x92, 0xb4, 0x22, 0x63, 0x9f, 0xee, 0xe2, 0x9c, 0x06, 0x72, 0xa0, 0xb2, 0x6f, 0x12, 0x66, 0xd2, - 0x3f, 0x91, 0xd1, 0x3b, 0xa8, 0x65, 0xeb, 0xd7, 0x89, 0xb0, 0x0b, 0xaa, 0xfc, 0x9e, 0xcc, 0xc9, - 0x78, 0x9e, 0x89, 0x97, 0x83, 0xea, 0x5a, 0xcc, 0x1b, 0x43, 0x2d, 0x58, 0x3d, 0x18, 0x25, 0x8c, - 0x8b, 0xdd, 0xa0, 0x7f, 0x44, 0x64, 0x75, 0xda, 0xc5, 0x66, 0xa1, 0x55, 0xc5, 0xb3, 0xdb, 0x68, - 0x13, 0xfe, 0x17, 0x44, 0x11, 0x3b, 0x35, 0xe5, 0xa4, 0x0a, 0xc3, 0x2e, 0x35, 0xad, 0x56, 0x05, - 0x5f, 0x3e, 0x90, 0xb5, 0xbc, 0x4f, 0xe3, 0x20, 0xb2, 0x41, 0x69, 0x68, 0x01, 0xb9, 0x50, 0x7f, - 0xf9, 0x93, 0x34, 0x4b, 0xf8, 0x0b, 0x21, 0xb8, 0x5d, 0x53, 0x97, 0x78, 0x61, 0xcf, 0x79, 0x06, - 0x8d, 0x59, 0xca, 0x77, 0xea, 0x95, 0xef, 0x60, 0xd9, 0xc4, 0x6f, 0xf2, 0xdf, 0xc8, 0x8d, 0x28, - 0x3d, 0xa0, 0xa6, 0xdd, 0x53, 0xb8, 0x63, 0xf7, 0xb8, 0x3f, 0xc3, 0x2a, 0x26, 0xc1, 0x60, 0x9f, - 0x46, 0xe4, 0xfa, 0xb2, 0x97, 0xc9, 0xa4, 0x11, 0xe9, 0x06, 0xe2, 0x68, 0x92, 0x4c, 0x23, 0xa3, - 0x1d, 0x28, 0xe1, 0x20, 0x0e, 0x89, 0x71, 0xfd, 0xc9, 0x1c, 0xd7, 0xca, 0x89, 0xd4, 0xc5, 0x1a, - 0xe2, 0x3e, 0x85, 0xea, 0x64, 0x4f, 0x96, 0xe2, 0xeb, 0xe1, 0x30, 0x25, 0xba, 0xac, 0x0b, 0xd8, - 0x48, 0x72, 0xbf, 0x43, 0xe2, 0xd0, 0xb8, 0x2e, 0x60, 0x23, 0xb9, 0xeb, 0xd0, 0x98, 0x32, 0x37, - 0x57, 0x83, 0xa0, 0xb8, 0x27, 0x87, 0xad, 0xa5, 0xf2, 0xa0, 0xd6, 0xee, 0x40, 0x76, 0x7d, 0x30, - 0xd8, 0xa3, 0xfc, 0xfa, 0x00, 0x6d, 0x28, 0xef, 0x51, 0x9e, 0x8b, 0x2f, 0x13, 0xd1, 0x3a, 0xac, - 0x1c, 0xc4, 0xfd, 0x68, 0x3c, 0x90, 0xd1, 0x0a, 0xc2, 0x63, 0xd3, 0xca, 0x33, 0xbb, 0xee, 0x73, - 0x7d, 0x8f, 0xca, 0x8b, 0x21, 0xb3, 0x09, 0x65, 0x12, 0x0b, 0x4e, 0x49, 0x36, 0x61, 0x91, 0xa7, - 0x1f, 0x20, 0x4f, 0x3d, 0x40, 0x6a, 0x38, 0xe1, 0x4c, 0xc5, 0xdd, 0x86, 0x55, 0xb9, 0x31, 0x3f, - 0x11, 0x08, 0x8a, 0x39, 0x92, 0x6a, 0xed, 0xee, 0x40, 0x63, 0x0a, 0x34, 0xae, 0xd7, 0xa1, 0x28, - 0x9f, 0x37, 0xd3, 0xa7, 0x57, 0xf9, 0x55, 0xe7, 0xee, 0x32, 0xd4, 0xba, 0x34, 0xce, 0x06, 0x9e, - 0x7b, 0x6e, 0x41, 0xbd, 0xcb, 0xe2, 0xe9, 0xa8, 0xe9, 0xc2, 0x6a, 0x56, 0xbb, 0x2f, 0xba, 0x07, - 0xbb, 0x41, 0x92, 0x85, 0xd2, 0xbc, 0x9c, 0x66, 0xf3, 0x12, 0x7b, 0x5a, 0xb1, 0x5d, 0x94, 0x53, - 0x09, 0xcf, 0xc2, 0xd1, 0x57, 0x50, 0xee, 0x74, 0xda, 0xca, 0xd2, 0xe2, 0x9d, 0x2c, 0x65, 0x30, - 0xf4, 0x0c, 0xca, 0x6f, 0xd5, 0x0f, 0x42, 0x6a, 0x26, 0xc7, 0x15, 0x25, 0xa7, 0x03, 0xd5, 0x6a, - 0x98, 0xf4, 0x19, 0x1f, 0xe0, 0x0c, 0xb4, 0xf5, 0x67, 0x09, 0xaa, 0x9d, 0x4e, 0xbb, 0xcd, 0xe9, - 0x20, 0x24, 0xe8, 0x57, 0x0b, 0xd0, 0xe5, 0x59, 0x8b, 0x3e, 0x9f, 0xdf, 0x41, 0x57, 0x3f, 0x18, - 0xce, 0x17, 0x77, 0x44, 0x99, 0x5b, 0x7e, 0x07, 0x25, 0xd5, 0xe1, 0xe8, 0xd3, 0x5b, 0xce, 0x40, - 0xa7, 0x75, 0xb3, 0xa2, 0xb1, 0xdd, 0x87, 0x4a, 0xd6, 0x25, 0x68, 0x63, 0x2e, 0xbd, 0x0b, 0x43, - 0xc0, 0x79, 0x78, 0x2b, 0x5d, 0xe3, 0xe4, 0x07, 0x28, 0x9b, 0xe2, 0x47, 0x0f, 0x6e, 0xc0, 0x4d, - 0xdb, 0xd0, 0xd9, 0xb8, 0x8d, 0xea, 0x34, 0x8c, 0xac, 0xc8, 0xe7, 0x86, 0x31, 0xd3, 0x42, 0x73, - 0xc3, 0xb8, 0xd4, 0x35, 0x6f, 0xa1, 0x28, 0xbb, 0x01, 0xad, 0xcf, 0x01, 0xe5, 0xda, 0xc5, 0x99, - 0x97, 0xae, 0x0b, 0x6d, 0xf4, 0xbd, 0xfc, 0xe5, 0x52, 0xcf, 0x48, 0x6b, 0x6e, 0xcc, 0xb9, 0x3f, - 0x22, 0xe7, 0xc1, 0x2d, 0x34, 0xb5, 0xf9, 0x76, 0xfd, 0xfd, 0xf9, 0x9a, 0xf5, 0xd7, 0xf9, 0x9a, - 0xf5, 0xcf, 0xf9, 0x9a, 0xd5, 0x5b, 0x52, 0xff, 0xb4, 0x8f, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, - 0x80, 0x7e, 0xd2, 0xb5, 0x25, 0x0c, 0x00, 0x00, -} diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto b/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto deleted file mode 100644 index 7699959e5374..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto +++ /dev/null @@ -1,125 +0,0 @@ -syntax = "proto3"; - -package moby.buildkit.v1.frontend; - -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; -import "github.com/gogo/googleapis/google/rpc/status.proto"; -import "github.com/moby/buildkit/solver/pb/ops.proto"; -import "github.com/moby/buildkit/api/types/worker.proto"; -import "github.com/moby/buildkit/util/apicaps/pb/caps.proto"; -import "github.com/tonistiigi/fsutil/types/stat.proto"; - -option (gogoproto.sizer_all) = true; -option (gogoproto.marshaler_all) = true; -option (gogoproto.unmarshaler_all) = true; - -service LLBBridge { - // apicaps:CapResolveImage - rpc ResolveImageConfig(ResolveImageConfigRequest) returns (ResolveImageConfigResponse); - // apicaps:CapSolveBase - rpc Solve(SolveRequest) returns (SolveResponse); - // apicaps:CapReadFile - rpc ReadFile(ReadFileRequest) returns (ReadFileResponse); - // apicaps:CapReadDir - rpc ReadDir(ReadDirRequest) returns (ReadDirResponse); - // apicaps:CapStatFile - rpc StatFile(StatFileRequest) returns (StatFileResponse); - rpc Ping(PingRequest) returns (PongResponse); - rpc Return(ReturnRequest) returns (ReturnResponse); -} - -message Result { - oneof result { - string ref = 1; - RefMap refs = 2; - } - map metadata = 10; -} - -message RefMap { - map refs = 1; -} - -message ReturnRequest { - Result result = 1; - google.rpc.Status error = 2; -} - -message ReturnResponse { -} - -message ResolveImageConfigRequest { - string Ref = 1; - pb.Platform Platform = 2; - string ResolveMode = 3; - string LogName = 4; -} - -message ResolveImageConfigResponse { - string Digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; - bytes Config = 2; -} - -message SolveRequest { - pb.Definition Definition = 1; - string Frontend = 2; - map FrontendOpt = 3; - repeated string ImportCacheRefs = 4; - bool allowResultReturn = 5; - - // apicaps.CapSolveInlineReturn deprecated - bool Final = 10; - bytes ExporterAttr = 11; -} - -message SolveResponse { - // deprecated - string ref = 1; // can be used by readfile request - // deprecated -/* bytes ExporterAttr = 2;*/ - - // these fields are returned when allowMapReturn was set - Result result = 3; -} - -message ReadFileRequest { - string Ref = 1; - string FilePath = 2; - FileRange Range = 3; -} - -message FileRange { - int64 Offset = 1; - int64 Length = 2; -} - -message ReadFileResponse { - bytes Data = 1; -} - -message ReadDirRequest { - string Ref = 1; - string DirPath = 2; - string IncludePattern = 3; -} - -message ReadDirResponse { - repeated fsutil.types.Stat entries = 1; -} - -message StatFileRequest { - string Ref = 1; - string Path = 2; -} - -message StatFileResponse { - fsutil.types.Stat stat = 1; -} - -message PingRequest{ -} -message PongResponse{ - repeated moby.buildkit.v1.apicaps.APICap FrontendAPICaps = 1 [(gogoproto.nullable) = false]; - repeated moby.buildkit.v1.apicaps.APICap LLBCaps = 2 [(gogoproto.nullable) = false]; - repeated moby.buildkit.v1.types.WorkerRecord Workers = 3; -} \ No newline at end of file diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/pb/generate.go b/vendor/github.com/moby/buildkit/frontend/gateway/pb/generate.go deleted file mode 100644 index 4ab07c6d4a75..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/gateway/pb/generate.go +++ /dev/null @@ -1,3 +0,0 @@ -package moby_buildkit_v1_frontend - -//go:generate protoc -I=. -I=../../../vendor/ -I=../../../../../../ --gogo_out=plugins=grpc:. gateway.proto diff --git a/vendor/github.com/moby/buildkit/frontend/result.go b/vendor/github.com/moby/buildkit/frontend/result.go deleted file mode 100644 index 37715de8a3f9..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/result.go +++ /dev/null @@ -1,23 +0,0 @@ -package frontend - -import "github.com/moby/buildkit/solver" - -type Result struct { - Ref solver.CachedResult - Refs map[string]solver.CachedResult - Metadata map[string][]byte -} - -func (r *Result) EachRef(fn func(solver.CachedResult) error) (err error) { - if r.Ref != nil { - err = fn(r.Ref) - } - for _, r := range r.Refs { - if r != nil { - if err1 := fn(r); err1 != nil && err == nil { - err = err1 - } - } - } - return err -} diff --git a/vendor/github.com/moby/buildkit/gometalinter.json b/vendor/github.com/moby/buildkit/gometalinter.json deleted file mode 100644 index e79b3f10c886..000000000000 --- a/vendor/github.com/moby/buildkit/gometalinter.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "Vendor": true, - "Deadline": "8m", - "Exclude": [".*.pb.go"], - "DisableAll": true, - "Enable": [ - "gofmt", - "goimports", - "ineffassign", - "vet", - "deadcode" - ] -} diff --git a/vendor/github.com/moby/buildkit/hack/binaries b/vendor/github.com/moby/buildkit/hack/binaries deleted file mode 100755 index a01b549e901b..000000000000 --- a/vendor/github.com/moby/buildkit/hack/binaries +++ /dev/null @@ -1,83 +0,0 @@ -#!/usr/bin/env bash - -. $(dirname $0)/util - -: ${TARGETPLATFORM=} -: ${CONTINUOUS_INTEGRATION=} - -set -ex - -progressFlag="" -if [ "$CONTINUOUS_INTEGRATION" == "true" ]; then progressFlag="--progress=plain"; fi - -binariesLegacy() { - mkdir -p bin - - iidfile=$(mktemp -t docker-iidfile.XXXXXXXXXX) - copysrc="/usr/bin/." - - case "$(echo "$TARGETPLATFORM" | cut -d"/" -f1)" in - "darwin") - docker build --iidfile $iidfile --target buildctl-darwin -f ./hack/dockerfiles/test.Dockerfile --force-rm . - copysrc="/out/." - ;; - "windows") - docker build --iidfile $iidfile --target buildctl.exe -f ./hack/dockerfiles/test.Dockerfile --force-rm . - copysrc="/out/." - ;; - *) - docker build --iidfile $iidfile --target buildkit-binaries -f ./hack/dockerfiles/test.Dockerfile --force-rm . - ;; - esac - - iid=$(cat $iidfile) - containerID=$(docker create $iid copy) - docker cp $containerID:$copysrc bin - docker rm $containerID - chmod +x bin/* - rm -f $iidfile -} - -binariesDocker() { - mkdir -p bin/tmp - export DOCKER_BUILDKIT=1 - iidfile=$(mktemp -t docker-iidfile.XXXXXXXXXX) - target=$(echo "$TARGETPLATFORM" | cut -d"/" -f1) - if [ -z "$target" ]; then - target="linux" - fi - - docker build $platformFlag --target binaries-$target --iidfile $iidfile -f ./hack/dockerfiles/test.buildkit.Dockerfile --force-rm . - iid=$(cat $iidfile) - containerID=$(docker create $iid copy) - docker cp $containerID:/ bin/tmp - mv bin/tmp/build* bin/ - rm -rf bin/tmp - docker rm $containerID - docker rmi -f $iid - rm -f $iidfile -} - -binaries() { - platformFlag="" - if [ ! -z "$TARGETPLATFORM" ]; then - platformFlag="--frontend-opt=platform=$TARGETPLATFORM" - fi - buildctl build $progressFlag --frontend=dockerfile.v0 \ - --local context=. --local dockerfile=. \ - --frontend-opt filename=./hack/dockerfiles/test.buildkit.Dockerfile \ - --frontend-opt target=binaries $platformFlag \ - --exporter=local --exporter-opt output=./bin/ -} - -case $buildmode in -"buildkit") - binaries - ;; -"docker-buildkit") - binariesDocker - ;; -*) - binariesLegacy - ;; -esac diff --git a/vendor/github.com/moby/buildkit/hack/cross b/vendor/github.com/moby/buildkit/hack/cross deleted file mode 100755 index 40112de5e112..000000000000 --- a/vendor/github.com/moby/buildkit/hack/cross +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env bash - -. $(dirname $0)/util - -: ${PLATFORMS=linux/arm} - -set -ex - -buildctl build --progress=plain --frontend=dockerfile.v0 --local context=. --local dockerfile=. --frontend-opt filename=./hack/dockerfiles/test.buildkit.Dockerfile --frontend-opt platform=$PLATFORMS \ No newline at end of file diff --git a/vendor/github.com/moby/buildkit/hack/dockerfiles/generated-files.Dockerfile b/vendor/github.com/moby/buildkit/hack/dockerfiles/generated-files.Dockerfile deleted file mode 100644 index 70c8a733f08e..000000000000 --- a/vendor/github.com/moby/buildkit/hack/dockerfiles/generated-files.Dockerfile +++ /dev/null @@ -1,35 +0,0 @@ -# protoc is dynamically linked to glibc to can't use golang:1.11-alpine -FROM golang:1.11 AS gobuild-base -ARG PROTOC_VERSION=3.1.0 -ARG GOGO_VERSION=master -RUN apt-get update && apt-get install -y \ - git \ - unzip \ - && true -RUN wget -q https://github.com/google/protobuf/releases/download/v${PROTOC_VERSION}/protoc-${PROTOC_VERSION}-linux-x86_64.zip && unzip protoc-${PROTOC_VERSION}-linux-x86_64.zip -d /usr/local - -RUN go get -d github.com/gogo/protobuf/protoc-gen-gogofaster \ - && cd /go/src/github.com/gogo/protobuf \ - && git checkout -q $GOGO_VERSION \ - && go install ./protoc-gen-gogo ./protoc-gen-gogofaster ./protoc-gen-gogoslick - -WORKDIR /go/src/github.com/moby/buildkit -COPY . . -RUN go generate ./... - -# Generate into a subdirectory because if it is in the root then the -# extraction with `docker export` ends up putting `.dockerenv`, `dev`, -# `sys` and `proc` into the source directory. With this we can use -# `tar --strip-components=1 generated-files` on the output of `docker -# export`. -FROM gobuild-base AS generated -RUN mkdir /generated-files -RUN find . -name "*.pb.go" ! -path ./vendor/\* | tar -cf - --files-from - | tar -C /generated-files -xf - - -FROM scratch AS update - -COPY --from=generated generated-files /generated-files - -FROM gobuild-base AS validate - -RUN ./hack/validate-generated-files check diff --git a/vendor/github.com/moby/buildkit/hack/dockerfiles/generated-files.buildkit.Dockerfile b/vendor/github.com/moby/buildkit/hack/dockerfiles/generated-files.buildkit.Dockerfile deleted file mode 100644 index 6f72f93f3ace..000000000000 --- a/vendor/github.com/moby/buildkit/hack/dockerfiles/generated-files.buildkit.Dockerfile +++ /dev/null @@ -1,39 +0,0 @@ -# syntax=tonistiigi/dockerfile:runmount20180828 - -# protoc is dynamically linked to glibc to can't use golang:1.10-alpine -FROM golang:1.11 AS gobuild-base -ARG PROTOC_VERSION=3.1.0 -ARG GOGO_VERSION=master -RUN apt-get update && apt-get install -y \ - git \ - unzip \ - && true -RUN wget -q https://github.com/google/protobuf/releases/download/v${PROTOC_VERSION}/protoc-${PROTOC_VERSION}-linux-x86_64.zip && unzip protoc-${PROTOC_VERSION}-linux-x86_64.zip -d /usr/local - -RUN go get -d github.com/gogo/protobuf/protoc-gen-gogofaster \ - && cd /go/src/github.com/gogo/protobuf \ - && git checkout -q $GOGO_VERSION \ - && go install ./protoc-gen-gogo ./protoc-gen-gogofaster ./protoc-gen-gogoslick - -WORKDIR /go/src/github.com/moby/buildkit - -# Generate into a subdirectory because if it is in the root then the -# extraction with `docker export` ends up putting `.dockerenv`, `dev`, -# `sys` and `proc` into the source directory. With this we can use -# `tar --strip-components=1 generated-files` on the output of `docker -# export`. -FROM gobuild-base AS generated -RUN mkdir /generated-files -RUN --mount=target=/tmp/src \ - cp -r /tmp/src/. . && \ - git add -A && \ - go generate ./... && \ - git ls-files -m --others -- **/*.pb.go | tar -cf - --files-from - | tar -C /generated-files -xf - - -FROM scratch AS update -COPY --from=generated generated-files / - -FROM gobuild-base AS validate -RUN --mount=target=/tmp/src \ - cp -r /tmp/src/. . && \ - go generate ./... && git diff && ./hack/validate-generated-files check diff --git a/vendor/github.com/moby/buildkit/hack/dockerfiles/lint.Dockerfile b/vendor/github.com/moby/buildkit/hack/dockerfiles/lint.Dockerfile deleted file mode 100644 index 8a195797c20d..000000000000 --- a/vendor/github.com/moby/buildkit/hack/dockerfiles/lint.Dockerfile +++ /dev/null @@ -1,8 +0,0 @@ -FROM golang:1.11-alpine -RUN apk add --no-cache git -RUN go get -u gopkg.in/alecthomas/gometalinter.v1 \ - && mv /go/bin/gometalinter.v1 /go/bin/gometalinter \ - && gometalinter --install -WORKDIR /go/src/github.com/moby/buildkit -COPY . . -RUN gometalinter --config=gometalinter.json ./... diff --git a/vendor/github.com/moby/buildkit/hack/dockerfiles/lint.buildkit.Dockerfile b/vendor/github.com/moby/buildkit/hack/dockerfiles/lint.buildkit.Dockerfile deleted file mode 100644 index de622c2165c6..000000000000 --- a/vendor/github.com/moby/buildkit/hack/dockerfiles/lint.buildkit.Dockerfile +++ /dev/null @@ -1,10 +0,0 @@ -# syntax=tonistiigi/dockerfile:runmount20180828 - -FROM golang:1.11-alpine -RUN apk add --no-cache git -RUN go get -u gopkg.in/alecthomas/gometalinter.v1 \ - && mv /go/bin/gometalinter.v1 /go/bin/gometalinter \ - && gometalinter --install -WORKDIR /go/src/github.com/moby/buildkit -RUN --mount=target=/go/src/github.com/moby/buildkit \ - gometalinter --config=gometalinter.json ./... diff --git a/vendor/github.com/moby/buildkit/hack/dockerfiles/test.Dockerfile b/vendor/github.com/moby/buildkit/hack/dockerfiles/test.Dockerfile deleted file mode 100644 index dc48392dbf2c..000000000000 --- a/vendor/github.com/moby/buildkit/hack/dockerfiles/test.Dockerfile +++ /dev/null @@ -1,192 +0,0 @@ -ARG RUNC_VERSION=a00bf0190895aa465a5fbed0268888e2c8ddfe85 -ARG CONTAINERD_VERSION=v1.2.0-rc.1 -# containerd v1.0 for integration tests -ARG CONTAINERD10_VERSION=v1.0.3 -# available targets: buildkitd, buildkitd.oci_only, buildkitd.containerd_only -ARG BUILDKIT_TARGET=buildkitd -ARG REGISTRY_VERSION=v2.7.0-rc.0 -ARG ROOTLESSKIT_VERSION=4f7ae4607d626f0a22fb495056d55b17cce8c01b - -# The `buildkitd` stage and the `buildctl` stage are placed here -# so that they can be built quickly with legacy DAG-unaware `docker build --target=...` - -FROM golang:1.11-alpine AS gobuild-base -RUN apk add --no-cache g++ linux-headers -RUN apk add --no-cache git libseccomp-dev make - -FROM gobuild-base AS buildkit-base -WORKDIR /go/src/github.com/moby/buildkit -COPY . . -RUN mkdir .tmp; \ - PKG=github.com/moby/buildkit VERSION=$(git describe --match 'v[0-9]*' --dirty='.m' --always) REVISION=$(git rev-parse HEAD)$(if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi); \ - echo "-X ${PKG}/version.Version=${VERSION} -X ${PKG}/version.Revision=${REVISION} -X ${PKG}/version.Package=${PKG}" | tee .tmp/ldflags - -FROM buildkit-base AS buildctl -ENV CGO_ENABLED=0 -RUN go build -ldflags "$(cat .tmp/ldflags) -d" -o /usr/bin/buildctl ./cmd/buildctl - -FROM buildkit-base AS buildctl-darwin -ENV CGO_ENABLED=0 -ENV GOOS=darwin -RUN go build -ldflags "$(cat .tmp/ldflags)" -o /out/buildctl-darwin ./cmd/buildctl -# reset GOOS for legacy builder -ENV GOOS=linux - -FROM buildkit-base AS buildkitd -ENV CGO_ENABLED=1 -RUN go build -installsuffix netgo -ldflags "$(cat .tmp/ldflags) -w -extldflags -static" -tags 'seccomp netgo cgo static_build' -o /usr/bin/buildkitd ./cmd/buildkitd - -# test dependencies begin here -FROM gobuild-base AS runc -ARG RUNC_VERSION -ENV CGO_ENABLED=1 -RUN git clone https://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \ - && cd "$GOPATH/src/github.com/opencontainers/runc" \ - && git checkout -q "$RUNC_VERSION" \ - && go build -installsuffix netgo -ldflags '-w -extldflags -static' -tags 'seccomp netgo cgo static_build' -o /usr/bin/runc ./ - -FROM gobuild-base AS containerd-base -RUN apk add --no-cache btrfs-progs-dev -RUN git clone https://github.com/containerd/containerd.git /go/src/github.com/containerd/containerd -WORKDIR /go/src/github.com/containerd/containerd - -FROM containerd-base as containerd -ARG CONTAINERD_VERSION -RUN git checkout -q "$CONTAINERD_VERSION" \ - && make bin/containerd \ - && make bin/containerd-shim \ - && make bin/ctr - -# containerd v1.0 for integration tests -FROM containerd-base as containerd10 -ARG CONTAINERD10_VERSION -RUN git checkout -q "$CONTAINERD10_VERSION" \ - && make bin/containerd \ - && make bin/containerd-shim - -FROM buildkit-base AS buildkitd.oci_only -ENV CGO_ENABLED=1 -# mitigate https://github.com/moby/moby/pull/35456 -WORKDIR /go/src/github.com/moby/buildkit -RUN go build -installsuffix netgo -ldflags "$(cat .tmp/ldflags) -w -extldflags -static" -tags 'no_containerd_worker seccomp netgo cgo static_build' -o /usr/bin/buildkitd.oci_only ./cmd/buildkitd - -FROM buildkit-base AS buildkitd.containerd_only -ENV CGO_ENABLED=0 -RUN go build -ldflags "$(cat .tmp/ldflags) -d" -o /usr/bin/buildkitd.containerd_only -tags no_oci_worker ./cmd/buildkitd - -FROM tonistiigi/registry:$REGISTRY_VERSION AS registry - -FROM gobuild-base AS rootlesskit-base -RUN git clone https://github.com/rootless-containers/rootlesskit.git /go/src/github.com/rootless-containers/rootlesskit -WORKDIR /go/src/github.com/rootless-containers/rootlesskit - -FROM rootlesskit-base as rootlesskit -ARG ROOTLESSKIT_VERSION -# mitigate https://github.com/moby/moby/pull/35456 -ENV GOOS=linux -RUN git checkout -q "$ROOTLESSKIT_VERSION" \ -&& go build -o /rootlesskit ./cmd/rootlesskit - -FROM scratch AS buildkit-binaries -COPY --from=runc /usr/bin/runc /usr/bin/buildkit-runc -COPY --from=buildctl /usr/bin/buildctl /usr/bin/ -COPY --from=buildkitd /usr/bin/buildkitd /usr/bin - -FROM buildkit-base AS integration-tests -ENV BUILDKIT_INTEGRATION_ROOTLESS_IDPAIR="1000:1000" -RUN apk add --no-cache shadow shadow-uidmap sudo \ - && useradd --create-home --home-dir /home/user --uid 1000 -s /bin/sh user \ - && echo "XDG_RUNTIME_DIR=/run/user/1000; export XDG_RUNTIME_DIR" >> /home/user/.profile \ - && mkdir -m 0700 -p /run/user/1000 \ - && chown -R user /run/user/1000 /home/user -ENV BUILDKIT_INTEGRATION_CONTAINERD_EXTRA="containerd-1.0=/opt/containerd-1.0/bin" -COPY --from=runc /usr/bin/runc /usr/bin/buildkit-runc -COPY --from=containerd /go/src/github.com/containerd/containerd/bin/containerd* /usr/bin/ -COPY --from=containerd10 /go/src/github.com/containerd/containerd/bin/containerd* /opt/containerd-1.0/bin/ -COPY --from=buildctl /usr/bin/buildctl /usr/bin/ -COPY --from=buildkitd /usr/bin/buildkitd /usr/bin -COPY --from=registry /bin/registry /usr/bin -COPY --from=rootlesskit /rootlesskit /usr/bin/ - -FROM buildkit-base AS cross-windows -ENV GOOS=windows - -FROM cross-windows AS buildctl.exe -RUN go build -ldflags "$(cat .tmp/ldflags)" -o /out/buildctl.exe ./cmd/buildctl - -FROM cross-windows AS buildkitd.exe -ENV CGO_ENABLED=0 -RUN go build -ldflags "$(cat .tmp/ldflags)" -o /out/buildkitd.exe ./cmd/buildkitd - -FROM alpine AS buildkit-export -RUN apk add --no-cache git -VOLUME /var/lib/buildkit - -# Copy together all binaries for oci+containerd mode -FROM buildkit-export AS buildkit-buildkitd -COPY --from=runc /usr/bin/runc /usr/bin/ -COPY --from=buildkitd /usr/bin/buildkitd /usr/bin/ -COPY --from=buildctl /usr/bin/buildctl /usr/bin/ -ENTRYPOINT ["buildkitd"] - -# Copy together all binaries needed for oci worker mode -FROM buildkit-export AS buildkit-buildkitd.oci_only -COPY --from=buildkitd.oci_only /usr/bin/buildkitd.oci_only /usr/bin/ -COPY --from=buildctl /usr/bin/buildctl /usr/bin/ -ENTRYPOINT ["buildkitd.oci_only"] - -# Copy together all binaries for containerd worker mode -FROM buildkit-export AS buildkit-buildkitd.containerd_only -COPY --from=buildkitd.containerd_only /usr/bin/buildkitd.containerd_only /usr/bin/ -COPY --from=buildctl /usr/bin/buildctl /usr/bin/ -ENTRYPOINT ["buildkitd.containerd_only"] - -FROM alpine AS containerd-runtime -COPY --from=runc /usr/bin/runc /usr/bin/ -COPY --from=containerd /go/src/github.com/containerd/containerd/bin/containerd* /usr/bin/ -COPY --from=containerd /go/src/github.com/containerd/containerd/bin/ctr /usr/bin/ -VOLUME /var/lib/containerd -VOLUME /run/containerd -ENTRYPOINT ["containerd"] - -# To allow running buildkit in a container without CAP_SYS_ADMIN, we need to do either -# a) install newuidmap/newgidmap with file capabilities rather than SETUID (requires kernel >= 4.14) -# b) install newuidmap/newgidmap >= 20181028 -# We choose b) until kernel >= 4.14 gets widely adopted. -# See https://github.com/shadow-maint/shadow/pull/132 https://github.com/shadow-maint/shadow/pull/138 -# (Note: we don't use the patched idmap for the testsuite image) -FROM alpine:3.8 AS idmap -RUN apk add --no-cache autoconf automake build-base byacc gettext gettext-dev gcc git libcap-dev libtool libxslt -RUN git clone https://github.com/shadow-maint/shadow.git /shadow -WORKDIR /shadow -RUN git checkout 42324e501768675993235e03f7e4569135802d18 -RUN ./autogen.sh --disable-nls --disable-man --without-audit --without-selinux --without-acl --without-attr --without-tcb --without-nscd \ - && make \ - && cp src/newuidmap src/newgidmap /usr/bin - -# Rootless mode. -# Still requires `--privileged`. -FROM buildkit-buildkitd AS rootless -COPY --from=idmap /usr/bin/newuidmap /usr/bin/newuidmap -COPY --from=idmap /usr/bin/newgidmap /usr/bin/newgidmap -RUN chmod u+s /usr/bin/newuidmap /usr/bin/newgidmap \ - && adduser -D -u 1000 user \ - && mkdir -p /run/user/1000 /home/user/.local/tmp /home/user/.local/share/buildkit \ - && chown -R user /run/user/1000 /home/user \ - && echo user:100000:65536 | tee /etc/subuid | tee /etc/subgid \ - && passwd -l root -# As of v3.8.1, Alpine does not set SUID bit on the busybox version of /bin/su. -# However, future version may set SUID bit on /bin/su. -# We lock the root account by `passwd -l root`, so as to disable su completely. -COPY --from=rootlesskit /rootlesskit /usr/bin/ -USER user -ENV HOME /home/user -ENV USER user -ENV XDG_RUNTIME_DIR=/run/user/1000 -ENV TMPDIR=/home/user/.local/tmp -VOLUME /home/user/.local/share/buildkit -ENTRYPOINT ["rootlesskit", "buildkitd"] - -FROM buildkit-${BUILDKIT_TARGET} - - diff --git a/vendor/github.com/moby/buildkit/hack/dockerfiles/test.buildkit.Dockerfile b/vendor/github.com/moby/buildkit/hack/dockerfiles/test.buildkit.Dockerfile deleted file mode 100644 index 939648b84bb5..000000000000 --- a/vendor/github.com/moby/buildkit/hack/dockerfiles/test.buildkit.Dockerfile +++ /dev/null @@ -1,255 +0,0 @@ -# syntax = tonistiigi/dockerfile:runmount20180925 - -ARG RUNC_VERSION=a00bf0190895aa465a5fbed0268888e2c8ddfe85 -ARG CONTAINERD_VERSION=v1.2.0-rc.1 -# containerd v1.0 for integration tests -ARG CONTAINERD10_VERSION=v1.0.3 -# available targets: buildkitd, buildkitd.oci_only, buildkitd.containerd_only -ARG BUILDKIT_TARGET=buildkitd -ARG REGISTRY_VERSION=v2.7.0-rc.0 -ARG ROOTLESSKIT_VERSION=4f7ae4607d626f0a22fb495056d55b17cce8c01b -ARG ROOTLESS_BASE_MODE=external - -# git stage is used for checking out remote repository sources -FROM --platform=$BUILDPLATFORM alpine AS git -RUN apk add --no-cache git - -# xgo is a helper for golang cross-compilation -FROM --platform=$BUILDPLATFORM tonistiigi/xx:golang@sha256:6f7d999551dd471b58f70716754290495690efa8421e0a1fcf18eb11d0c0a537 AS xgo - -# gobuild is base stage for compiling go/cgo -FROM --platform=$BUILDPLATFORM golang:1.11 AS gobuild-minimal -COPY --from=xgo / / -RUN apt-get update && apt-get install --no-install-recommends -y libseccomp-dev file - -# on amd64 you can also cross-compile to other platforms -FROM gobuild-minimal AS gobuild-cross-amd64 -RUN dpkg --add-architecture s390x && \ - dpkg --add-architecture ppc64el && \ - dpkg --add-architecture armel && \ - dpkg --add-architecture armhf && \ - dpkg --add-architecture arm64 && \ - apt-get update && \ - apt-get install -y \ - gcc-s390x-linux-gnu libc6-dev-s390x-cross libseccomp-dev:s390x \ - crossbuild-essential-ppc64el libseccomp-dev:ppc64el \ - crossbuild-essential-armel libseccomp-dev:armel \ - crossbuild-essential-armhf libseccomp-dev:armhf \ - crossbuild-essential-arm64 libseccomp-dev:arm64 \ - --no-install-recommends - -# define all valid target configurations for compilation -FROM gobuild-minimal AS gobuild-amd64-amd64 -FROM gobuild-minimal AS gobuild-arm-arm -FROM gobuild-minimal AS gobuild-s390x-s390x -FROM gobuild-minimal AS gobuild-ppc64le-ppc64le -FROM gobuild-minimal AS gobuild-arm64-arm64 -FROM gobuild-cross-amd64 AS gobuild-amd64-arm -FROM gobuild-cross-amd64 AS gobuild-amd64-s390x -FROM gobuild-cross-amd64 AS gobuild-amd64-ppc64le -FROM gobuild-cross-amd64 AS gobuild-amd64-arm64 -FROM gobuild-$BUILDARCH-$TARGETARCH AS gobuild-base - -# runc source -FROM git AS runc-src -ARG RUNC_VERSION -WORKDIR /usr/src -RUN git clone git://github.com/opencontainers/runc.git runc \ - && cd runc && git checkout -q "$RUNC_VERSION" - -# build runc binary -FROM gobuild-base AS runc -WORKDIR $GOPATH/src/github.com/opencontainers/runc -ARG TARGETPLATFORM -RUN --mount=from=runc-src,src=/usr/src/runc,target=. --mount=target=/root/.cache,type=cache \ - CGO_ENABLED=1 go build -ldflags '-w -extldflags -static' -tags 'seccomp netgo cgo static_build osusergo' -o /usr/bin/runc ./ && \ - file /usr/bin/runc | grep "statically linked" - -FROM gobuild-base AS buildkit-base -WORKDIR /go/src/github.com/moby/buildkit - -# scan the version/revision info -FROM buildkit-base AS buildkit-version -RUN --mount=target=. \ - PKG=github.com/moby/buildkit VERSION=$(git describe --match 'v[0-9]*' --dirty='.m' --always --tags) REVISION=$(git rev-parse HEAD)$(if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi); \ - echo "-X ${PKG}/version.Version=${VERSION} -X ${PKG}/version.Revision=${REVISION} -X ${PKG}/version.Package=${PKG}" | tee /tmp/.ldflags; \ - echo -n "${VERSION}" | tee /tmp/.version; - -# build buildctl binary -FROM buildkit-base AS buildctl -ENV CGO_ENABLED=0 -ARG TARGETPLATFORM -RUN --mount=target=. --mount=target=/root/.cache,type=cache \ - --mount=source=/tmp/.ldflags,target=/tmp/.ldflags,from=buildkit-version \ - set -x; go build -ldflags "$(cat /tmp/.ldflags)" -o /usr/bin/buildctl ./cmd/buildctl && \ - file /usr/bin/buildctl && file /usr/bin/buildctl | egrep "statically linked|Mach-O|Windows" - -# build buildkitd binary -FROM buildkit-base AS buildkitd -ENV CGO_ENABLED=1 -ARG TARGETPLATFORM -RUN --mount=target=. --mount=target=/root/.cache,type=cache \ - --mount=source=/tmp/.ldflags,target=/tmp/.ldflags,from=buildkit-version \ - go build -ldflags "$(cat /tmp/.ldflags) -w -extldflags -static" -tags 'osusergo seccomp netgo cgo static_build ' -o /usr/bin/buildkitd ./cmd/buildkitd && \ - file /usr/bin/buildkitd | grep "statically linked" - -FROM scratch AS binaries-linux -COPY --from=runc /usr/bin/runc /buildkit-runc -COPY --from=buildctl /usr/bin/buildctl / -COPY --from=buildkitd /usr/bin/buildkitd / - -FROM scratch AS binaries-darwin -COPY --from=buildctl /usr/bin/buildctl / - -FROM scratch AS binaries-windows -COPY --from=buildctl /usr/bin/buildctl /buildctl.exe - -FROM binaries-$TARGETOS AS binaries - -FROM --platform=$BUILDPLATFORM alpine AS releaser -RUN apk add --no-cache tar gzip -WORKDIR /work -ARG TARGETPLATFORM -RUN --mount=from=binaries \ - --mount=source=/tmp/.version,target=/tmp/.version,from=buildkit-version \ - mkdir -p /out && tar czvf "/out/buildkit-$(cat /tmp/.version).$(echo $TARGETPLATFORM | sed 's/\//-/g').tar.gz" --mtime='2015-10-21 00:00Z' --sort=name --transform 's/^./bin/' . - -FROM scratch AS release -COPY --from=releaser /out/ / - -FROM tonistiigi/git@sha256:704fcc24a17b40833625ee37c4a4acf0e4aa90d0aa276926d63847097134defd AS buildkit-export -VOLUME /var/lib/buildkit - -FROM git AS containerd-src -ARG CONTAINERD_VERSION -WORKDIR /usr/src -RUN git clone https://github.com/containerd/containerd.git containerd - -FROM gobuild-base AS containerd-base -RUN apt-get install -y --no-install-recommends btrfs-tools btrfs-progs -WORKDIR /go/src/github.com/containerd/containerd - -FROM containerd-base AS containerd -ARG CONTAINERD_VERSION -RUN --mount=from=containerd-src,src=/usr/src/containerd,readwrite --mount=target=/root/.cache,type=cache \ - git fetch origin \ - && git checkout -q "$CONTAINERD_VERSION" \ - && make bin/containerd \ - && make bin/containerd-shim \ - && make bin/ctr \ - && mv bin /out - -# containerd v1.0 for integration tests -FROM containerd-base as containerd10 -ARG CONTAINERD10_VERSION -RUN --mount=from=containerd-src,src=/usr/src/containerd,readwrite --mount=target=/root/.cache,type=cache \ - git fetch origin \ - && git checkout -q "$CONTAINERD10_VERSION" \ - && make bin/containerd \ - && make bin/containerd-shim \ - && mv bin /out - -FROM tonistiigi/registry:$REGISTRY_VERSION AS registry - -FROM gobuild-base AS rootlesskit -ARG ROOTLESSKIT_VERSION -RUN git clone https://github.com/rootless-containers/rootlesskit.git /go/src/github.com/rootless-containers/rootlesskit -WORKDIR /go/src/github.com/rootless-containers/rootlesskit -ARG TARGETPLATFORM -RUN --mount=target=/root/.cache,type=cache \ - git checkout -q "$ROOTLESSKIT_VERSION" && \ - CGO_ENABLED=0 go build -o /rootlesskit ./cmd/rootlesskit && \ - file /rootlesskit | grep "statically linked" - -# Copy together all binaries needed for oci worker mode -FROM buildkit-export AS buildkit-buildkitd.oci_only -COPY --from=buildkitd.oci_only /usr/bin/buildkitd.oci_only /usr/bin/ -COPY --from=buildctl /usr/bin/buildctl /usr/bin/ -ENTRYPOINT ["buildkitd.oci_only"] - -# Copy together all binaries for containerd worker mode -FROM buildkit-export AS buildkit-buildkitd.containerd_only -COPY --from=buildkitd.containerd_only /usr/bin/buildkitd.containerd_only /usr/bin/ -COPY --from=buildctl /usr/bin/buildctl /usr/bin/ -ENTRYPOINT ["buildkitd.containerd_only"] - -# Copy together all binaries for oci+containerd mode -FROM buildkit-export AS buildkit-buildkitd -COPY --from=binaries / /usr/bin/ -ENTRYPOINT ["buildkitd"] - -FROM alpine AS containerd-runtime -COPY --from=runc /usr/bin/runc /usr/bin/ -COPY --from=containerd /out/containerd* /usr/bin/ -COPY --from=containerd /out/ctr /usr/bin/ -VOLUME /var/lib/containerd -VOLUME /run/containerd -ENTRYPOINT ["containerd"] - -FROM buildkit-base AS integration-tests -ENV BUILDKIT_INTEGRATION_ROOTLESS_IDPAIR="1000:1000" -RUN apt-get install -y --no-install-recommends uidmap sudo \ - && useradd --create-home --home-dir /home/user --uid 1000 -s /bin/sh user \ - && echo "XDG_RUNTIME_DIR=/run/user/1000; export XDG_RUNTIME_DIR" >> /home/user/.profile \ - && mkdir -m 0700 -p /run/user/1000 \ - && chown -R user /run/user/1000 /home/user - # musl is needed to directly use the registry binary that is built on alpine -ENV BUILDKIT_INTEGRATION_CONTAINERD_EXTRA="containerd-1.0=/opt/containerd-1.0/bin" -COPY --from=rootlesskit /rootlesskit /usr/bin/ -COPY --from=containerd10 /out/containerd* /opt/containerd-1.0/bin/ -COPY --from=registry /bin/registry /usr/bin -COPY --from=runc /usr/bin/runc /usr/bin -COPY --from=containerd /out/containerd* /usr/bin/ -COPY --from=binaries / /usr/bin/ -COPY . . - -# To allow running buildkit in a container without CAP_SYS_ADMIN, we need to do either -# a) install newuidmap/newgidmap with file capabilities rather than SETUID (requires kernel >= 4.14) -# b) install newuidmap/newgidmap >= 20181028 -# We choose b) until kernel >= 4.14 gets widely adopted. -# See https://github.com/shadow-maint/shadow/pull/132 https://github.com/shadow-maint/shadow/pull/138 -# (Note: we don't use the patched idmap for the testsuite image) -FROM alpine:3.8 AS idmap -RUN apk add --no-cache autoconf automake build-base byacc gettext gettext-dev gcc git libcap-dev libtool libxslt -RUN git clone https://github.com/shadow-maint/shadow.git /shadow -WORKDIR /shadow -RUN git checkout 42324e501768675993235e03f7e4569135802d18 -RUN ./autogen.sh --disable-nls --disable-man --without-audit --without-selinux --without-acl --without-attr --without-tcb --without-nscd \ - && make \ - && cp src/newuidmap src/newgidmap /usr/bin - -FROM alpine AS rootless-base-internal -RUN apk add --no-cache git -COPY --from=idmap /usr/bin/newuidmap /usr/bin/newuidmap -COPY --from=idmap /usr/bin/newgidmap /usr/bin/newgidmap -RUN chmod u+s /usr/bin/newuidmap /usr/bin/newgidmap \ - && adduser -D -u 1000 user \ - && mkdir -p /run/user/1000 /home/user/.local/tmp /home/user/.local/share/buildkit \ - && chown -R user /run/user/1000 /home/user \ - && echo user:100000:65536 | tee /etc/subuid | tee /etc/subgid \ - && passwd -l root -# As of v3.8.1, Alpine does not set SUID bit on the busybox version of /bin/su. -# However, future version may set SUID bit on /bin/su. -# We lock the root account by `passwd -l root`, so as to disable su completely. - -# tonistiigi/buildkit:rootless-base is a pre-built multi-arch version of rootless-base-internal https://github.com/moby/buildkit/pull/666#pullrequestreview-161872350 -FROM tonistiigi/buildkit:rootless-base@sha256:51a8017db80e9757fc05071996947abb5d3e91508c3d641b01cfcaeff77e676e AS rootless-base-external -FROM rootless-base-$ROOTLESS_BASE_MODE AS rootless-base - -# Rootless mode. -# Still requires `--privileged`. -FROM rootless-base AS rootless -COPY --from=rootlesskit /rootlesskit /usr/bin/ -COPY --from=binaries / /usr/bin/ -USER user -ENV HOME /home/user -ENV USER user -ENV XDG_RUNTIME_DIR=/run/user/1000 -ENV TMPDIR=/home/user/.local/tmp -VOLUME /home/user/.local/share/buildkit -ENTRYPOINT ["rootlesskit", "buildkitd"] - - -FROM buildkit-${BUILDKIT_TARGET} - - diff --git a/vendor/github.com/moby/buildkit/hack/dockerfiles/vendor.Dockerfile b/vendor/github.com/moby/buildkit/hack/dockerfiles/vendor.Dockerfile deleted file mode 100644 index 2746799d3c08..000000000000 --- a/vendor/github.com/moby/buildkit/hack/dockerfiles/vendor.Dockerfile +++ /dev/null @@ -1,13 +0,0 @@ -FROM golang:1.11-alpine AS vndr -RUN apk add --no-cache git -# NOTE: hack scripts override VNDR_VERSION to a specific revision -ARG VNDR_VERSION=master -RUN go get -d github.com/LK4D4/vndr \ - && cd /go/src/github.com/LK4D4/vndr \ - && git checkout $VNDR_VERSION \ - && go install ./ -WORKDIR /go/src/github.com/moby/buildkit -COPY . . -# Remove vendor first to workaround https://github.com/LK4D4/vndr/issues/63. -RUN rm -rf vendor -RUN vndr --verbose --strict diff --git a/vendor/github.com/moby/buildkit/hack/lint b/vendor/github.com/moby/buildkit/hack/lint deleted file mode 100755 index 9e87c106dd9d..000000000000 --- a/vendor/github.com/moby/buildkit/hack/lint +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/env bash - -. $(dirname $0)/util -set -eu -o pipefail -x - -: ${CONTINUOUS_INTEGRATION=} - -progressFlag="" -if [ "$CONTINUOUS_INTEGRATION" == "true" ]; then progressFlag="--progress=plain"; fi - -lintLegacy() { - docker build -f ./hack/dockerfiles/lint.Dockerfile --force-rm . -} - -lintDocker() { - export DOCKER_BUILDKIT=1 - iidfile=$(mktemp -t docker-iidfile.XXXXXXXXXX) - docker build --iidfile $iidfile -f ./hack/dockerfiles/lint.buildkit.Dockerfile --force-rm . - iid=$(cat $iidfile) - docker rmi $iid - rm -f $iidfile -} - -lint() { - buildctl build $progressFlag --frontend=dockerfile.v0 \ - --local context=. --local dockerfile=. \ - --frontend-opt filename=./hack/dockerfiles/lint.buildkit.Dockerfile -} - -case $buildmode in -"buildkit") - lint - ;; -"docker-buildkit") - lintDocker - ;; -*) - lintLegacy - ;; -esac diff --git a/vendor/github.com/moby/buildkit/hack/release b/vendor/github.com/moby/buildkit/hack/release deleted file mode 100755 index 726b81b4e7ba..000000000000 --- a/vendor/github.com/moby/buildkit/hack/release +++ /dev/null @@ -1,52 +0,0 @@ -#!/usr/bin/env bash - -TAG=$1 -REPO=$2 -PUSH=$3 - -set -eu -o pipefail - -: ${PLATFORMS=linux/amd64} -: ${CONTINUOUS_INTEGRATION=} - -progressFlag="" -if [ "$CONTINUOUS_INTEGRATION" == "true" ]; then progressFlag="--progress=plain"; fi - - -usage() { - echo "usage: ./hack/release [push]" - exit 1 -} - -if [ -z "$TAG" ] || [ -z "$REPO" ]; then - usage -fi - -pushFlag="" -if [ "$PUSH" = "push" ]; then - pushFlag="--exporter-opt push=true" -fi - -tagLatest="" -tagLatestRootless="" -if [[ "$(git describe --tags --match "v[0-9]*")" == "$TAG" ]]; then - tagLatest=",$REPO:latest" - tagLatestRootless=",$REPO:rootless" -fi - -set -x - -buildctl build $progressFlag --frontend=dockerfile.v0 \ - --local context=. --local dockerfile=. \ - --frontend-opt filename=./hack/dockerfiles/test.buildkit.Dockerfile \ - --frontend-opt platform=$PLATFORMS \ - --exporter image \ - --exporter-opt name=$REPO:$TAG$tagLatest $pushFlag - -buildctl build $progressFlag --frontend=dockerfile.v0 \ - --local context=. --local dockerfile=. \ - --frontend-opt target=rootless \ - --frontend-opt filename=./hack/dockerfiles/test.buildkit.Dockerfile \ - --frontend-opt platform=$PLATFORMS \ - --exporter image \ - --exporter-opt name=$REPO:$TAG-rootless$tagLatestRootless $pushFlag \ No newline at end of file diff --git a/vendor/github.com/moby/buildkit/hack/release-tar b/vendor/github.com/moby/buildkit/hack/release-tar deleted file mode 100755 index e93ea8032e5b..000000000000 --- a/vendor/github.com/moby/buildkit/hack/release-tar +++ /dev/null @@ -1,33 +0,0 @@ -#!/usr/bin/env bash - -TAG=$1 -OUT=$2 - -set -eu -o pipefail - -: ${PLATFORMS=linux/amd64} -: ${CONTINUOUS_INTEGRATION=} - -progressFlag="" -if [ "$CONTINUOUS_INTEGRATION" == "true" ]; then progressFlag="--progress=plain"; fi - - -usage() { - echo "usage: ./hack/release-tar " - exit 1 -} - -if [ -z "$TAG" ] || [ -z "$OUT" ]; then - usage -fi - - -set -x - -buildctl build $progressFlag --frontend=dockerfile.v0 \ - --local context=. --local dockerfile=. \ - --frontend-opt filename=./hack/dockerfiles/test.buildkit.Dockerfile \ - --frontend-opt target=release \ - --frontend-opt platform=$PLATFORMS \ - --exporter local \ - --exporter-opt output=$OUT diff --git a/vendor/github.com/moby/buildkit/hack/test b/vendor/github.com/moby/buildkit/hack/test deleted file mode 100755 index f1a1251f0592..000000000000 --- a/vendor/github.com/moby/buildkit/hack/test +++ /dev/null @@ -1,122 +0,0 @@ -#!/usr/bin/env bash - -. $(dirname $0)/util -set -eu -o pipefail - -: ${TEST_INTEGRATION=} -: ${TEST_GATEWAY=} -: ${TEST_DOCKERFILE=} -: ${DOCKERFILE_RELEASES=} -: ${CONTINUOUS_INTEGRATION=} -: ${BUILDKIT_REGISTRY_MIRROR_DIR=} - -progressFlag="" -if [ "$CONTINUOUS_INTEGRATION" == "true" ]; then progressFlag="--progress=plain"; fi - -if [ "$#" == 0 ]; then TEST_INTEGRATION=1; fi - -while test $# -gt 0 - do - case "$1" in - gateway) - TEST_GATEWAY=1 - ;; - dockerfile) - TEST_DOCKERFILE=1 - ;; - integration) - TEST_INTEGRATION=1 - ;; - *) - echo "unknown arg $1" - ;; - esac - shift -done - -iid="buildkit-tests" -iidfile=$(mktemp -t docker-iidfile.XXXXXXXXXX) -set -x - -case $buildmode in -"buildkit") - tmpfile=$(mktemp -t docker-iidfile.XXXXXXXXXX) - buildctl build $progressFlag --frontend=dockerfile.v0 --local context=. --local dockerfile=. \ - --frontend-opt target=integration-tests \ - --frontend-opt filename=./hack/dockerfiles/test.buildkit.Dockerfile \ - --exporter=docker --exporter-opt name=$iid --exporter-opt output=$tmpfile - docker load -i $tmpfile - rm $tmpfile - ;; -*) - case $buildmode in - "docker-buildkit") - export DOCKER_BUILDKIT=1 - docker build --iidfile $iidfile -f ./hack/dockerfiles/test.buildkit.Dockerfile --target integration-tests --force-rm . - ;; - *) - docker build --iidfile $iidfile -f ./hack/dockerfiles/test.Dockerfile --target integration-tests --force-rm . - ;; - esac - iid=$(cat $iidfile) - ;; -esac - -cacheVolume=$(docker create -v /root/.cache -v /root/.cache/registry alpine) - -if [ "$TEST_INTEGRATION" == 1 ]; then - docker run --rm -v /tmp --volumes-from=$cacheVolume -e BUILDKIT_REGISTRY_MIRROR_DIR=/root/.cache/registry --privileged $iid go test ${TESTFLAGS:--v} ${TESTPKGS:-./...} -fi - - -if [ "$TEST_GATEWAY" == 1 ]; then - docker run --rm $iid go build ./frontend/gateway/client -fi - - -if [ "$TEST_DOCKERFILE" == 1 ]; then - if [ -z $DOCKERFILE_RELEASES ]; then - DOCKERFILE_RELEASES="mainline experimental mounts secrets ssh" - fi - - - for release in $DOCKERFILE_RELEASES; do - buildtags=$(cat ./frontend/dockerfile/release/$release/tags) - tarout=$(mktemp -t dockerfile-frontend.XXXXXXXXXX) - case $buildmode in - "buildkit") - buildctl build $progressFlag --frontend=dockerfile.v0 --local context=. --local dockerfile=. \ - --frontend-opt filename=./frontend/dockerfile/cmd/dockerfile-frontend/Dockerfile \ - --frontend-opt build-arg:BUILDTAGS="$buildtags" \ - --exporter=oci --exporter-opt output=$tarout - ;; - "docker-buildkit") - dfiidfile=$(mktemp -t docker-iidfile.XXXXXXXXXX) - docker build --iidfile=$dfiidfile -f ./frontend/dockerfile/cmd/dockerfile-frontend/Dockerfile --build-arg BUILDTAGS="$buildtags" . - dfiid=$(cat $dfiidfile) - docker save -o $tarout $dfiid - docker rmi $dfiid - rm $dfiidfile - ;; - esac - - if [ -s $tarout ]; then - cid=$(docker create -v /tmp --rm --privileged --volumes-from=$cacheVolume -e BUILDKIT_REGISTRY_MIRROR_DIR=/root/.cache/registry -e BUILDKIT_WORKER_RANDOM=1 -e FRONTEND_GATEWAY_ONLY=local:/$release.tar -e EXTERNAL_DF_FRONTEND=/dockerfile-frontend $iid go test --count=1 -tags "$buildtags" ${TESTFLAGS:--v} ./frontend/dockerfile) - docker cp $tarout $cid:/$release.tar - docker start -a $cid - fi - rm $tarout - done -fi - -docker rm -v $cacheVolume - -case $buildmode in -"docker-buildkit") - rm "$iidfile" - docker rmi $iid - ;; -"legacy") - rm "$iidfile" - ;; -esac diff --git a/vendor/github.com/moby/buildkit/hack/update-generated-files b/vendor/github.com/moby/buildkit/hack/update-generated-files deleted file mode 100755 index df25e793b5ce..000000000000 --- a/vendor/github.com/moby/buildkit/hack/update-generated-files +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env bash - -. $(dirname $0)/util -set -eu -o pipefail -x - -: ${CONTINUOUS_INTEGRATION=} - -progressFlag="" -if [ "$CONTINUOUS_INTEGRATION" == "true" ]; then progressFlag="--progress=plain"; fi - -gogo_version=$(awk '$1 == "github.com/gogo/protobuf" { print $2 }' vendor.conf) -case $buildmode in -"buildkit") - buildctl build $progressFlag --frontend=dockerfile.v0 --local context=. --local dockerfile=. \ - --frontend-opt build-arg:GOGO_VERSION=$gogo_version \ - --frontend-opt target=update \ - --frontend-opt filename=./hack/dockerfiles/generated-files.buildkit.Dockerfile \ - --exporter=local --exporter-opt output=. - ;; -*) - iidfile=$(mktemp -t docker-iidfile.XXXXXXXXXX) - case $buildmode in - "docker-buildkit") - export DOCKER_BUILDKIT=1 - docker build --build-arg GOGO_VERSION=$gogo_version --iidfile $iidfile -f ./hack/dockerfiles/generated-files.buildkit.Dockerfile --target update --force-rm . - ;; - *) - docker build --build-arg GOGO_VERSION=$gogo_version --iidfile $iidfile -f ./hack/dockerfiles/generated-files.Dockerfile --target update --force-rm . - ;; - esac - iid=$(cat $iidfile) - cid=$(docker create $iid noop) - - case $buildmode in - "docker-buildkit") - docker export $cid | tar -xf - - ;; - *) - docker export $cid | tar -xf - --strip-components=1 generated-files - ;; - esac - - docker rm $cid - - rm -f $iidfile - ;; -esac diff --git a/vendor/github.com/moby/buildkit/hack/update-vendor b/vendor/github.com/moby/buildkit/hack/update-vendor deleted file mode 100755 index 590951517d03..000000000000 --- a/vendor/github.com/moby/buildkit/hack/update-vendor +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env bash - -set -eu -o pipefail -x - -iidfile=$(mktemp -t docker-iidfile.XXXXXXXXXX) -docker build --build-arg VNDR_VERSION=1fc68ee0c852556a9ed53cbde16247033f104111 --iidfile $iidfile -f ./hack/dockerfiles/vendor.Dockerfile --force-rm . -iid=$(cat $iidfile) -cid=$(docker create $iid noop) -rm -rf ./vendor -docker cp $cid:/go/src/github.com/moby/buildkit/vendor . -docker rm $cid -rm -f $iidfile diff --git a/vendor/github.com/moby/buildkit/hack/util b/vendor/github.com/moby/buildkit/hack/util deleted file mode 100755 index 9948e92e85d7..000000000000 --- a/vendor/github.com/moby/buildkit/hack/util +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env bash - -: ${PREFER_BUILDCTL=} -: ${PREFER_LEGACY=} - -newerEqualThan() { # $1=minimum wanted version $2=actual-version - [ "$1" = "$(echo -e "$1\n$2" | sort -V | head -n 1)" ] -} - -buildmode="legacy" -if [ "$PREFER_BUILDCTL" == "1" ]; then - buildmode="buildkit"; -else - serverVersion=$(docker info --format '{{.ServerVersion}}') - experimental=$(docker info --format '{{.ExperimentalBuild}}') - if [ "$PREFER_LEGACY" != "1" ] && ( newerEqualThan "18.09" $serverVersion || \ - ( newerEqualThan "18.06" $serverVersion && [ "true" = "$experimental" ] ) || \ - [ "$DOCKER_BUILDKIT" = "1" ]); then - buildmode="docker-buildkit"; - fi -fi \ No newline at end of file diff --git a/vendor/github.com/moby/buildkit/hack/validate-generated-files b/vendor/github.com/moby/buildkit/hack/validate-generated-files deleted file mode 100755 index 731a500e0d3d..000000000000 --- a/vendor/github.com/moby/buildkit/hack/validate-generated-files +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env bash - -: ${CONTINUOUS_INTEGRATION=} - -progressFlag="" -if [ "$CONTINUOUS_INTEGRATION" == "true" ]; then progressFlag="--progress=plain"; fi - -case ${1:-} in -'') - . $(dirname $0)/util - gogo_version=$(awk '$1 == "github.com/gogo/protobuf" { print $2 }' vendor.conf) - case $buildmode in - "buildkit") - buildctl build $progressFlag --frontend=dockerfile.v0 --local context=. --local dockerfile=. --frontend-opt build-arg:GOGO_VERSION=$gogo_version --frontend-opt filename=./hack/dockerfiles/generated-files.buildkit.Dockerfile - ;; - "docker-buildkit") - export DOCKER_BUILDKIT=1 - iidfile=$(mktemp -t docker-iidfile.XXXXXXXXXX) - docker build --iidfile $iidfile --build-arg GOGO_VERSION=$gogo_version -f ./hack/dockerfiles/generated-files.buildkit.Dockerfile --target validate --force-rm . || exit 1 - iid=$(cat $iidfile) - docker rmi $iid - rm -f $iidfile - ;; - *) - docker build --build-arg GOGO_VERSION=$gogo_version -f ./hack/dockerfiles/generated-files.Dockerfile --target validate --force-rm . - ;; - esac - ;; -check) - diffs="$(git status --porcelain -- **/*.pb.go 2>/dev/null)" - set +x - if [ "$diffs" ] ; then - { - echo 'The result of "go generate" differs' - echo - echo "$diffs" - echo - echo 'Please update with "make generated-files"' - echo - } >&2 - exit 1 - fi - echo 'Congratulations! All auto generated files are correct.' - ;; -esac diff --git a/vendor/github.com/moby/buildkit/hack/validate-vendor b/vendor/github.com/moby/buildkit/hack/validate-vendor deleted file mode 100755 index ecb9298cf611..000000000000 --- a/vendor/github.com/moby/buildkit/hack/validate-vendor +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env bash - -set -eu -o pipefail -x - -iidfile=$(mktemp -t docker-iidfile.XXXXXXXXXX) -docker build --build-arg VNDR_VERSION=1fc68ee0c852556a9ed53cbde16247033f104111 --iidfile $iidfile -f ./hack/dockerfiles/vendor.Dockerfile --force-rm . -iid=$(cat $iidfile) -diffs="$(docker run --rm $iid git status --porcelain -- vendor 2>/dev/null)" -if [ "$diffs" ]; then - { - set +x - echo 'The result of vndr differs' - echo - echo "$diffs" - echo - echo 'Please vendor your package with github.com/LK4D4/vndr.' - echo - } >&2 - false -fi -echo 'Congratulations! All vendoring changes are done the right way.' -rm -f $iidfile diff --git a/vendor/github.com/moby/buildkit/identity/randomid.go b/vendor/github.com/moby/buildkit/identity/randomid.go deleted file mode 100644 index 0eb13527aac5..000000000000 --- a/vendor/github.com/moby/buildkit/identity/randomid.go +++ /dev/null @@ -1,53 +0,0 @@ -package identity - -import ( - cryptorand "crypto/rand" - "fmt" - "io" - "math/big" -) - -var ( - // idReader is used for random id generation. This declaration allows us to - // replace it for testing. - idReader = cryptorand.Reader -) - -// parameters for random identifier generation. We can tweak this when there is -// time for further analysis. -const ( - randomIDEntropyBytes = 17 - randomIDBase = 36 - - // To ensure that all identifiers are fixed length, we make sure they - // get padded out or truncated to 25 characters. - // - // For academics, f5lxx1zz5pnorynqglhzmsp33 == 2^128 - 1. This value - // was calculated from floor(log(2^128-1, 36)) + 1. - // - // While 128 bits is the largest whole-byte size that fits into 25 - // base-36 characters, we generate an extra byte of entropy to fill - // in the high bits, which would otherwise be 0. This gives us a more - // even distribution of the first character. - // - // See http://mathworld.wolfram.com/NumberLength.html for more information. - maxRandomIDLength = 25 -) - -// NewID generates a new identifier for use where random identifiers with low -// collision probability are required. -// -// With the parameters in this package, the generated identifier will provide -// ~129 bits of entropy encoded with base36. Leading padding is added if the -// string is less 25 bytes. We do not intend to maintain this interface, so -// identifiers should be treated opaquely. -func NewID() string { - var p [randomIDEntropyBytes]byte - - if _, err := io.ReadFull(idReader, p[:]); err != nil { - panic(fmt.Errorf("failed to read random bytes: %v", err)) - } - - p[0] |= 0x80 // set high bit to avoid the need for padding - return (&big.Int{}).SetBytes(p[:]).Text(randomIDBase)[1 : maxRandomIDLength+1] -} diff --git a/vendor/github.com/moby/buildkit/session/auth/auth.go b/vendor/github.com/moby/buildkit/session/auth/auth.go deleted file mode 100644 index 2b96a7cef1a3..000000000000 --- a/vendor/github.com/moby/buildkit/session/auth/auth.go +++ /dev/null @@ -1,26 +0,0 @@ -package auth - -import ( - "context" - - "github.com/moby/buildkit/session" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -func CredentialsFunc(ctx context.Context, c session.Caller) func(string) (string, string, error) { - return func(host string) (string, string, error) { - client := NewAuthClient(c.Conn()) - - resp, err := client.Credentials(ctx, &CredentialsRequest{ - Host: host, - }) - if err != nil { - if st, ok := status.FromError(err); ok && st.Code() == codes.Unimplemented { - return "", "", nil - } - return "", "", err - } - return resp.Username, resp.Secret, nil - } -} diff --git a/vendor/github.com/moby/buildkit/session/auth/auth.pb.go b/vendor/github.com/moby/buildkit/session/auth/auth.pb.go deleted file mode 100644 index 8993b85b96ed..000000000000 --- a/vendor/github.com/moby/buildkit/session/auth/auth.pb.go +++ /dev/null @@ -1,673 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: auth.proto - -/* - Package auth is a generated protocol buffer package. - - It is generated from these files: - auth.proto - - It has these top-level messages: - CredentialsRequest - CredentialsResponse -*/ -package auth - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import strings "strings" -import reflect "reflect" - -import context "golang.org/x/net/context" -import grpc "google.golang.org/grpc" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -type CredentialsRequest struct { - Host string `protobuf:"bytes,1,opt,name=Host,proto3" json:"Host,omitempty"` -} - -func (m *CredentialsRequest) Reset() { *m = CredentialsRequest{} } -func (*CredentialsRequest) ProtoMessage() {} -func (*CredentialsRequest) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{0} } - -func (m *CredentialsRequest) GetHost() string { - if m != nil { - return m.Host - } - return "" -} - -type CredentialsResponse struct { - Username string `protobuf:"bytes,1,opt,name=Username,proto3" json:"Username,omitempty"` - Secret string `protobuf:"bytes,2,opt,name=Secret,proto3" json:"Secret,omitempty"` -} - -func (m *CredentialsResponse) Reset() { *m = CredentialsResponse{} } -func (*CredentialsResponse) ProtoMessage() {} -func (*CredentialsResponse) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{1} } - -func (m *CredentialsResponse) GetUsername() string { - if m != nil { - return m.Username - } - return "" -} - -func (m *CredentialsResponse) GetSecret() string { - if m != nil { - return m.Secret - } - return "" -} - -func init() { - proto.RegisterType((*CredentialsRequest)(nil), "moby.filesync.v1.CredentialsRequest") - proto.RegisterType((*CredentialsResponse)(nil), "moby.filesync.v1.CredentialsResponse") -} -func (this *CredentialsRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*CredentialsRequest) - if !ok { - that2, ok := that.(CredentialsRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Host != that1.Host { - return false - } - return true -} -func (this *CredentialsResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*CredentialsResponse) - if !ok { - that2, ok := that.(CredentialsResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Username != that1.Username { - return false - } - if this.Secret != that1.Secret { - return false - } - return true -} -func (this *CredentialsRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&auth.CredentialsRequest{") - s = append(s, "Host: "+fmt.Sprintf("%#v", this.Host)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *CredentialsResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&auth.CredentialsResponse{") - s = append(s, "Username: "+fmt.Sprintf("%#v", this.Username)+",\n") - s = append(s, "Secret: "+fmt.Sprintf("%#v", this.Secret)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func valueToGoStringAuth(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for Auth service - -type AuthClient interface { - Credentials(ctx context.Context, in *CredentialsRequest, opts ...grpc.CallOption) (*CredentialsResponse, error) -} - -type authClient struct { - cc *grpc.ClientConn -} - -func NewAuthClient(cc *grpc.ClientConn) AuthClient { - return &authClient{cc} -} - -func (c *authClient) Credentials(ctx context.Context, in *CredentialsRequest, opts ...grpc.CallOption) (*CredentialsResponse, error) { - out := new(CredentialsResponse) - err := grpc.Invoke(ctx, "/moby.filesync.v1.Auth/Credentials", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for Auth service - -type AuthServer interface { - Credentials(context.Context, *CredentialsRequest) (*CredentialsResponse, error) -} - -func RegisterAuthServer(s *grpc.Server, srv AuthServer) { - s.RegisterService(&_Auth_serviceDesc, srv) -} - -func _Auth_Credentials_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CredentialsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).Credentials(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/moby.filesync.v1.Auth/Credentials", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).Credentials(ctx, req.(*CredentialsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Auth_serviceDesc = grpc.ServiceDesc{ - ServiceName: "moby.filesync.v1.Auth", - HandlerType: (*AuthServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Credentials", - Handler: _Auth_Credentials_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "auth.proto", -} - -func (m *CredentialsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CredentialsRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Host) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintAuth(dAtA, i, uint64(len(m.Host))) - i += copy(dAtA[i:], m.Host) - } - return i, nil -} - -func (m *CredentialsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CredentialsResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Username) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintAuth(dAtA, i, uint64(len(m.Username))) - i += copy(dAtA[i:], m.Username) - } - if len(m.Secret) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintAuth(dAtA, i, uint64(len(m.Secret))) - i += copy(dAtA[i:], m.Secret) - } - return i, nil -} - -func encodeVarintAuth(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *CredentialsRequest) Size() (n int) { - var l int - _ = l - l = len(m.Host) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) - } - return n -} - -func (m *CredentialsResponse) Size() (n int) { - var l int - _ = l - l = len(m.Username) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) - } - l = len(m.Secret) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) - } - return n -} - -func sovAuth(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozAuth(x uint64) (n int) { - return sovAuth(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *CredentialsRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CredentialsRequest{`, - `Host:` + fmt.Sprintf("%v", this.Host) + `,`, - `}`, - }, "") - return s -} -func (this *CredentialsResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CredentialsResponse{`, - `Username:` + fmt.Sprintf("%v", this.Username) + `,`, - `Secret:` + fmt.Sprintf("%v", this.Secret) + `,`, - `}`, - }, "") - return s -} -func valueToStringAuth(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *CredentialsRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CredentialsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CredentialsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Host = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAuth(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthAuth - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CredentialsResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CredentialsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CredentialsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Username = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Secret = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAuth(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthAuth - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipAuth(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAuth - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAuth - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAuth - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthAuth - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAuth - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipAuth(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthAuth = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowAuth = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("auth.proto", fileDescriptorAuth) } - -var fileDescriptorAuth = []byte{ - // 224 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4a, 0x2c, 0x2d, 0xc9, - 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0xc8, 0xcd, 0x4f, 0xaa, 0xd4, 0x4b, 0xcb, 0xcc, - 0x49, 0x2d, 0xae, 0xcc, 0x4b, 0xd6, 0x2b, 0x33, 0x54, 0xd2, 0xe0, 0x12, 0x72, 0x2e, 0x4a, 0x4d, - 0x49, 0xcd, 0x2b, 0xc9, 0x4c, 0xcc, 0x29, 0x0e, 0x4a, 0x2d, 0x2c, 0x4d, 0x2d, 0x2e, 0x11, 0x12, - 0xe2, 0x62, 0xf1, 0xc8, 0x2f, 0x2e, 0x91, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0c, 0x02, 0xb3, 0x95, - 0x3c, 0xb9, 0x84, 0x51, 0x54, 0x16, 0x17, 0xe4, 0xe7, 0x15, 0xa7, 0x0a, 0x49, 0x71, 0x71, 0x84, - 0x16, 0xa7, 0x16, 0xe5, 0x25, 0xe6, 0xa6, 0x42, 0x95, 0xc3, 0xf9, 0x42, 0x62, 0x5c, 0x6c, 0xc1, - 0xa9, 0xc9, 0x45, 0xa9, 0x25, 0x12, 0x4c, 0x60, 0x19, 0x28, 0xcf, 0x28, 0x89, 0x8b, 0xc5, 0xb1, - 0xb4, 0x24, 0x43, 0x28, 0x8a, 0x8b, 0x1b, 0xc9, 0x48, 0x21, 0x15, 0x3d, 0x74, 0xe7, 0xe9, 0x61, - 0xba, 0x4d, 0x4a, 0x95, 0x80, 0x2a, 0x88, 0xbb, 0x9c, 0x8c, 0x2e, 0x3c, 0x94, 0x63, 0xb8, 0xf1, - 0x50, 0x8e, 0xe1, 0xc3, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, 0x8c, 0x2b, 0x1e, 0xc9, 0x31, 0x9e, - 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x2f, 0x1e, 0xc9, 0x31, - 0x7c, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0x43, 0x14, 0x0b, 0x28, 0x90, 0x92, 0xd8, 0xc0, - 0xa1, 0x64, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0xaa, 0x73, 0xf3, 0xd5, 0x33, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/moby/buildkit/session/auth/auth.proto b/vendor/github.com/moby/buildkit/session/auth/auth.proto deleted file mode 100644 index 593312747950..000000000000 --- a/vendor/github.com/moby/buildkit/session/auth/auth.proto +++ /dev/null @@ -1,19 +0,0 @@ -syntax = "proto3"; - -package moby.filesync.v1; - -option go_package = "auth"; - -service Auth{ - rpc Credentials(CredentialsRequest) returns (CredentialsResponse); -} - - -message CredentialsRequest { - string Host = 1; -} - -message CredentialsResponse { - string Username = 1; - string Secret = 2; -} diff --git a/vendor/github.com/moby/buildkit/session/auth/authprovider/authprovider.go b/vendor/github.com/moby/buildkit/session/auth/authprovider/authprovider.go deleted file mode 100644 index a286567e44d4..000000000000 --- a/vendor/github.com/moby/buildkit/session/auth/authprovider/authprovider.go +++ /dev/null @@ -1,44 +0,0 @@ -package authprovider - -import ( - "context" - "io/ioutil" - - "github.com/docker/cli/cli/config" - "github.com/docker/cli/cli/config/configfile" - "github.com/moby/buildkit/session" - "github.com/moby/buildkit/session/auth" - "google.golang.org/grpc" -) - -func NewDockerAuthProvider() session.Attachable { - return &authProvider{ - config: config.LoadDefaultConfigFile(ioutil.Discard), - } -} - -type authProvider struct { - config *configfile.ConfigFile -} - -func (ap *authProvider) Register(server *grpc.Server) { - auth.RegisterAuthServer(server, ap) -} - -func (ap *authProvider) Credentials(ctx context.Context, req *auth.CredentialsRequest) (*auth.CredentialsResponse, error) { - if req.Host == "registry-1.docker.io" { - req.Host = "https://index.docker.io/v1/" - } - ac, err := ap.config.GetAuthConfig(req.Host) - if err != nil { - return nil, err - } - res := &auth.CredentialsResponse{} - if ac.IdentityToken != "" { - res.Secret = ac.IdentityToken - } else { - res.Username = ac.Username - res.Secret = ac.Password - } - return res, nil -} diff --git a/vendor/github.com/moby/buildkit/session/auth/generate.go b/vendor/github.com/moby/buildkit/session/auth/generate.go deleted file mode 100644 index 687aa7cc0b5b..000000000000 --- a/vendor/github.com/moby/buildkit/session/auth/generate.go +++ /dev/null @@ -1,3 +0,0 @@ -package auth - -//go:generate protoc --gogoslick_out=plugins=grpc:. auth.proto diff --git a/vendor/github.com/moby/buildkit/session/context.go b/vendor/github.com/moby/buildkit/session/context.go deleted file mode 100644 index 31a29f0868a3..000000000000 --- a/vendor/github.com/moby/buildkit/session/context.go +++ /dev/null @@ -1,22 +0,0 @@ -package session - -import "context" - -type contextKeyT string - -var contextKey = contextKeyT("buildkit/session-id") - -func NewContext(ctx context.Context, id string) context.Context { - if id != "" { - return context.WithValue(ctx, contextKey, id) - } - return ctx -} - -func FromContext(ctx context.Context) string { - v := ctx.Value(contextKey) - if v == nil { - return "" - } - return v.(string) -} diff --git a/vendor/github.com/moby/buildkit/session/filesync/diffcopy.go b/vendor/github.com/moby/buildkit/session/filesync/diffcopy.go deleted file mode 100644 index 8334ab60db28..000000000000 --- a/vendor/github.com/moby/buildkit/session/filesync/diffcopy.go +++ /dev/null @@ -1,110 +0,0 @@ -package filesync - -import ( - "bufio" - io "io" - "os" - "time" - - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "github.com/tonistiigi/fsutil" - fstypes "github.com/tonistiigi/fsutil/types" - "google.golang.org/grpc" -) - -func sendDiffCopy(stream grpc.Stream, fs fsutil.FS, progress progressCb) error { - return fsutil.Send(stream.Context(), stream, fs, progress) -} - -func newStreamWriter(stream grpc.ClientStream) io.WriteCloser { - wc := &streamWriterCloser{ClientStream: stream} - return &bufferedWriteCloser{Writer: bufio.NewWriter(wc), Closer: wc} -} - -type bufferedWriteCloser struct { - *bufio.Writer - io.Closer -} - -func (bwc *bufferedWriteCloser) Close() error { - if err := bwc.Writer.Flush(); err != nil { - return err - } - return bwc.Closer.Close() -} - -type streamWriterCloser struct { - grpc.ClientStream -} - -func (wc *streamWriterCloser) Write(dt []byte) (int, error) { - if err := wc.ClientStream.SendMsg(&BytesMessage{Data: dt}); err != nil { - return 0, err - } - return len(dt), nil -} - -func (wc *streamWriterCloser) Close() error { - if err := wc.ClientStream.CloseSend(); err != nil { - return err - } - // block until receiver is done - var bm BytesMessage - if err := wc.ClientStream.RecvMsg(&bm); err != io.EOF { - return err - } - return nil -} - -func recvDiffCopy(ds grpc.Stream, dest string, cu CacheUpdater, progress progressCb) error { - st := time.Now() - defer func() { - logrus.Debugf("diffcopy took: %v", time.Since(st)) - }() - var cf fsutil.ChangeFunc - var ch fsutil.ContentHasher - if cu != nil { - cu.MarkSupported(true) - cf = cu.HandleChange - ch = cu.ContentHasher() - } - return fsutil.Receive(ds.Context(), ds, dest, fsutil.ReceiveOpt{ - NotifyHashed: cf, - ContentHasher: ch, - ProgressCb: progress, - }) -} - -func syncTargetDiffCopy(ds grpc.Stream, dest string) error { - if err := os.MkdirAll(dest, 0700); err != nil { - return err - } - return fsutil.Receive(ds.Context(), ds, dest, fsutil.ReceiveOpt{ - Merge: true, - Filter: func() func(*fstypes.Stat) bool { - uid := os.Getuid() - gid := os.Getgid() - return func(st *fstypes.Stat) bool { - st.Uid = uint32(uid) - st.Gid = uint32(gid) - return true - } - }(), - }) -} - -func writeTargetFile(ds grpc.Stream, wc io.WriteCloser) error { - for { - bm := BytesMessage{} - if err := ds.RecvMsg(&bm); err != nil { - if errors.Cause(err) == io.EOF { - return nil - } - return err - } - if _, err := wc.Write(bm.Data); err != nil { - return err - } - } -} diff --git a/vendor/github.com/moby/buildkit/session/filesync/filesync.go b/vendor/github.com/moby/buildkit/session/filesync/filesync.go deleted file mode 100644 index ae6775f70b92..000000000000 --- a/vendor/github.com/moby/buildkit/session/filesync/filesync.go +++ /dev/null @@ -1,297 +0,0 @@ -package filesync - -import ( - "context" - "fmt" - io "io" - "os" - "strings" - - "github.com/moby/buildkit/session" - "github.com/pkg/errors" - "github.com/tonistiigi/fsutil" - fstypes "github.com/tonistiigi/fsutil/types" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -const ( - keyOverrideExcludes = "override-excludes" - keyIncludePatterns = "include-patterns" - keyExcludePatterns = "exclude-patterns" - keyFollowPaths = "followpaths" - keyDirName = "dir-name" -) - -type fsSyncProvider struct { - dirs map[string]SyncedDir - p progressCb - doneCh chan error -} - -type SyncedDir struct { - Name string - Dir string - Excludes []string - Map func(*fstypes.Stat) bool -} - -// NewFSSyncProvider creates a new provider for sending files from client -func NewFSSyncProvider(dirs []SyncedDir) session.Attachable { - p := &fsSyncProvider{ - dirs: map[string]SyncedDir{}, - } - for _, d := range dirs { - p.dirs[d.Name] = d - } - return p -} - -func (sp *fsSyncProvider) Register(server *grpc.Server) { - RegisterFileSyncServer(server, sp) -} - -func (sp *fsSyncProvider) DiffCopy(stream FileSync_DiffCopyServer) error { - return sp.handle("diffcopy", stream) -} -func (sp *fsSyncProvider) TarStream(stream FileSync_TarStreamServer) error { - return sp.handle("tarstream", stream) -} - -func (sp *fsSyncProvider) handle(method string, stream grpc.ServerStream) (retErr error) { - var pr *protocol - for _, p := range supportedProtocols { - if method == p.name && isProtoSupported(p.name) { - pr = &p - break - } - } - if pr == nil { - return errors.New("failed to negotiate protocol") - } - - opts, _ := metadata.FromIncomingContext(stream.Context()) // if no metadata continue with empty object - - dirName := "" - name, ok := opts[keyDirName] - if ok && len(name) > 0 { - dirName = name[0] - } - - dir, ok := sp.dirs[dirName] - if !ok { - return status.Errorf(codes.NotFound, "no access allowed to dir %q", dirName) - } - - excludes := opts[keyExcludePatterns] - if len(dir.Excludes) != 0 && (len(opts[keyOverrideExcludes]) == 0 || opts[keyOverrideExcludes][0] != "true") { - excludes = dir.Excludes - } - includes := opts[keyIncludePatterns] - - followPaths := opts[keyFollowPaths] - - var progress progressCb - if sp.p != nil { - progress = sp.p - sp.p = nil - } - - var doneCh chan error - if sp.doneCh != nil { - doneCh = sp.doneCh - sp.doneCh = nil - } - err := pr.sendFn(stream, fsutil.NewFS(dir.Dir, &fsutil.WalkOpt{ - ExcludePatterns: excludes, - IncludePatterns: includes, - FollowPaths: followPaths, - Map: dir.Map, - }), progress) - if doneCh != nil { - if err != nil { - doneCh <- err - } - close(doneCh) - } - return err -} - -func (sp *fsSyncProvider) SetNextProgressCallback(f func(int, bool), doneCh chan error) { - sp.p = f - sp.doneCh = doneCh -} - -type progressCb func(int, bool) - -type protocol struct { - name string - sendFn func(stream grpc.Stream, fs fsutil.FS, progress progressCb) error - recvFn func(stream grpc.Stream, destDir string, cu CacheUpdater, progress progressCb) error -} - -func isProtoSupported(p string) bool { - // TODO: this should be removed after testing if stability is confirmed - if override := os.Getenv("BUILD_STREAM_PROTOCOL"); override != "" { - return strings.EqualFold(p, override) - } - return true -} - -var supportedProtocols = []protocol{ - { - name: "diffcopy", - sendFn: sendDiffCopy, - recvFn: recvDiffCopy, - }, -} - -// FSSendRequestOpt defines options for FSSend request -type FSSendRequestOpt struct { - Name string - IncludePatterns []string - ExcludePatterns []string - FollowPaths []string - OverrideExcludes bool // deprecated: this is used by docker/cli for automatically loading .dockerignore from the directory - DestDir string - CacheUpdater CacheUpdater - ProgressCb func(int, bool) -} - -// CacheUpdater is an object capable of sending notifications for the cache hash changes -type CacheUpdater interface { - MarkSupported(bool) - HandleChange(fsutil.ChangeKind, string, os.FileInfo, error) error - ContentHasher() fsutil.ContentHasher -} - -// FSSync initializes a transfer of files -func FSSync(ctx context.Context, c session.Caller, opt FSSendRequestOpt) error { - var pr *protocol - for _, p := range supportedProtocols { - if isProtoSupported(p.name) && c.Supports(session.MethodURL(_FileSync_serviceDesc.ServiceName, p.name)) { - pr = &p - break - } - } - if pr == nil { - return errors.New("no local sources enabled") - } - - opts := make(map[string][]string) - if opt.OverrideExcludes { - opts[keyOverrideExcludes] = []string{"true"} - } - - if opt.IncludePatterns != nil { - opts[keyIncludePatterns] = opt.IncludePatterns - } - - if opt.ExcludePatterns != nil { - opts[keyExcludePatterns] = opt.ExcludePatterns - } - - if opt.FollowPaths != nil { - opts[keyFollowPaths] = opt.FollowPaths - } - - opts[keyDirName] = []string{opt.Name} - - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - client := NewFileSyncClient(c.Conn()) - - var stream grpc.ClientStream - - ctx = metadata.NewOutgoingContext(ctx, opts) - - switch pr.name { - case "tarstream": - cc, err := client.TarStream(ctx) - if err != nil { - return err - } - stream = cc - case "diffcopy": - cc, err := client.DiffCopy(ctx) - if err != nil { - return err - } - stream = cc - default: - panic(fmt.Sprintf("invalid protocol: %q", pr.name)) - } - - return pr.recvFn(stream, opt.DestDir, opt.CacheUpdater, opt.ProgressCb) -} - -// NewFSSyncTargetDir allows writing into a directory -func NewFSSyncTargetDir(outdir string) session.Attachable { - p := &fsSyncTarget{ - outdir: outdir, - } - return p -} - -// NewFSSyncTarget allows writing into an io.WriteCloser -func NewFSSyncTarget(w io.WriteCloser) session.Attachable { - p := &fsSyncTarget{ - outfile: w, - } - return p -} - -type fsSyncTarget struct { - outdir string - outfile io.WriteCloser -} - -func (sp *fsSyncTarget) Register(server *grpc.Server) { - RegisterFileSendServer(server, sp) -} - -func (sp *fsSyncTarget) DiffCopy(stream FileSend_DiffCopyServer) error { - if sp.outdir != "" { - return syncTargetDiffCopy(stream, sp.outdir) - } - if sp.outfile == nil { - return errors.New("empty outfile and outdir") - } - defer sp.outfile.Close() - return writeTargetFile(stream, sp.outfile) -} - -func CopyToCaller(ctx context.Context, fs fsutil.FS, c session.Caller, progress func(int, bool)) error { - method := session.MethodURL(_FileSend_serviceDesc.ServiceName, "diffcopy") - if !c.Supports(method) { - return errors.Errorf("method %s not supported by the client", method) - } - - client := NewFileSendClient(c.Conn()) - - cc, err := client.DiffCopy(ctx) - if err != nil { - return err - } - - return sendDiffCopy(cc, fs, progress) -} - -func CopyFileWriter(ctx context.Context, c session.Caller) (io.WriteCloser, error) { - method := session.MethodURL(_FileSend_serviceDesc.ServiceName, "diffcopy") - if !c.Supports(method) { - return nil, errors.Errorf("method %s not supported by the client", method) - } - - client := NewFileSendClient(c.Conn()) - - cc, err := client.DiffCopy(ctx) - if err != nil { - return nil, err - } - - return newStreamWriter(cc), nil -} diff --git a/vendor/github.com/moby/buildkit/session/filesync/filesync.pb.go b/vendor/github.com/moby/buildkit/session/filesync/filesync.pb.go deleted file mode 100644 index 4a9697e34232..000000000000 --- a/vendor/github.com/moby/buildkit/session/filesync/filesync.pb.go +++ /dev/null @@ -1,644 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: filesync.proto - -/* -Package filesync is a generated protocol buffer package. - -It is generated from these files: - filesync.proto - -It has these top-level messages: - BytesMessage -*/ -package filesync - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import bytes "bytes" - -import strings "strings" -import reflect "reflect" - -import context "golang.org/x/net/context" -import grpc "google.golang.org/grpc" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -// BytesMessage contains a chunk of byte data -type BytesMessage struct { - Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` -} - -func (m *BytesMessage) Reset() { *m = BytesMessage{} } -func (*BytesMessage) ProtoMessage() {} -func (*BytesMessage) Descriptor() ([]byte, []int) { return fileDescriptorFilesync, []int{0} } - -func (m *BytesMessage) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -func init() { - proto.RegisterType((*BytesMessage)(nil), "moby.filesync.v1.BytesMessage") -} -func (this *BytesMessage) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*BytesMessage) - if !ok { - that2, ok := that.(BytesMessage) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !bytes.Equal(this.Data, that1.Data) { - return false - } - return true -} -func (this *BytesMessage) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&filesync.BytesMessage{") - s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func valueToGoStringFilesync(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for FileSync service - -type FileSyncClient interface { - DiffCopy(ctx context.Context, opts ...grpc.CallOption) (FileSync_DiffCopyClient, error) - TarStream(ctx context.Context, opts ...grpc.CallOption) (FileSync_TarStreamClient, error) -} - -type fileSyncClient struct { - cc *grpc.ClientConn -} - -func NewFileSyncClient(cc *grpc.ClientConn) FileSyncClient { - return &fileSyncClient{cc} -} - -func (c *fileSyncClient) DiffCopy(ctx context.Context, opts ...grpc.CallOption) (FileSync_DiffCopyClient, error) { - stream, err := grpc.NewClientStream(ctx, &_FileSync_serviceDesc.Streams[0], c.cc, "/moby.filesync.v1.FileSync/DiffCopy", opts...) - if err != nil { - return nil, err - } - x := &fileSyncDiffCopyClient{stream} - return x, nil -} - -type FileSync_DiffCopyClient interface { - Send(*BytesMessage) error - Recv() (*BytesMessage, error) - grpc.ClientStream -} - -type fileSyncDiffCopyClient struct { - grpc.ClientStream -} - -func (x *fileSyncDiffCopyClient) Send(m *BytesMessage) error { - return x.ClientStream.SendMsg(m) -} - -func (x *fileSyncDiffCopyClient) Recv() (*BytesMessage, error) { - m := new(BytesMessage) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *fileSyncClient) TarStream(ctx context.Context, opts ...grpc.CallOption) (FileSync_TarStreamClient, error) { - stream, err := grpc.NewClientStream(ctx, &_FileSync_serviceDesc.Streams[1], c.cc, "/moby.filesync.v1.FileSync/TarStream", opts...) - if err != nil { - return nil, err - } - x := &fileSyncTarStreamClient{stream} - return x, nil -} - -type FileSync_TarStreamClient interface { - Send(*BytesMessage) error - Recv() (*BytesMessage, error) - grpc.ClientStream -} - -type fileSyncTarStreamClient struct { - grpc.ClientStream -} - -func (x *fileSyncTarStreamClient) Send(m *BytesMessage) error { - return x.ClientStream.SendMsg(m) -} - -func (x *fileSyncTarStreamClient) Recv() (*BytesMessage, error) { - m := new(BytesMessage) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// Server API for FileSync service - -type FileSyncServer interface { - DiffCopy(FileSync_DiffCopyServer) error - TarStream(FileSync_TarStreamServer) error -} - -func RegisterFileSyncServer(s *grpc.Server, srv FileSyncServer) { - s.RegisterService(&_FileSync_serviceDesc, srv) -} - -func _FileSync_DiffCopy_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(FileSyncServer).DiffCopy(&fileSyncDiffCopyServer{stream}) -} - -type FileSync_DiffCopyServer interface { - Send(*BytesMessage) error - Recv() (*BytesMessage, error) - grpc.ServerStream -} - -type fileSyncDiffCopyServer struct { - grpc.ServerStream -} - -func (x *fileSyncDiffCopyServer) Send(m *BytesMessage) error { - return x.ServerStream.SendMsg(m) -} - -func (x *fileSyncDiffCopyServer) Recv() (*BytesMessage, error) { - m := new(BytesMessage) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _FileSync_TarStream_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(FileSyncServer).TarStream(&fileSyncTarStreamServer{stream}) -} - -type FileSync_TarStreamServer interface { - Send(*BytesMessage) error - Recv() (*BytesMessage, error) - grpc.ServerStream -} - -type fileSyncTarStreamServer struct { - grpc.ServerStream -} - -func (x *fileSyncTarStreamServer) Send(m *BytesMessage) error { - return x.ServerStream.SendMsg(m) -} - -func (x *fileSyncTarStreamServer) Recv() (*BytesMessage, error) { - m := new(BytesMessage) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -var _FileSync_serviceDesc = grpc.ServiceDesc{ - ServiceName: "moby.filesync.v1.FileSync", - HandlerType: (*FileSyncServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "DiffCopy", - Handler: _FileSync_DiffCopy_Handler, - ServerStreams: true, - ClientStreams: true, - }, - { - StreamName: "TarStream", - Handler: _FileSync_TarStream_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "filesync.proto", -} - -// Client API for FileSend service - -type FileSendClient interface { - DiffCopy(ctx context.Context, opts ...grpc.CallOption) (FileSend_DiffCopyClient, error) -} - -type fileSendClient struct { - cc *grpc.ClientConn -} - -func NewFileSendClient(cc *grpc.ClientConn) FileSendClient { - return &fileSendClient{cc} -} - -func (c *fileSendClient) DiffCopy(ctx context.Context, opts ...grpc.CallOption) (FileSend_DiffCopyClient, error) { - stream, err := grpc.NewClientStream(ctx, &_FileSend_serviceDesc.Streams[0], c.cc, "/moby.filesync.v1.FileSend/DiffCopy", opts...) - if err != nil { - return nil, err - } - x := &fileSendDiffCopyClient{stream} - return x, nil -} - -type FileSend_DiffCopyClient interface { - Send(*BytesMessage) error - Recv() (*BytesMessage, error) - grpc.ClientStream -} - -type fileSendDiffCopyClient struct { - grpc.ClientStream -} - -func (x *fileSendDiffCopyClient) Send(m *BytesMessage) error { - return x.ClientStream.SendMsg(m) -} - -func (x *fileSendDiffCopyClient) Recv() (*BytesMessage, error) { - m := new(BytesMessage) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// Server API for FileSend service - -type FileSendServer interface { - DiffCopy(FileSend_DiffCopyServer) error -} - -func RegisterFileSendServer(s *grpc.Server, srv FileSendServer) { - s.RegisterService(&_FileSend_serviceDesc, srv) -} - -func _FileSend_DiffCopy_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(FileSendServer).DiffCopy(&fileSendDiffCopyServer{stream}) -} - -type FileSend_DiffCopyServer interface { - Send(*BytesMessage) error - Recv() (*BytesMessage, error) - grpc.ServerStream -} - -type fileSendDiffCopyServer struct { - grpc.ServerStream -} - -func (x *fileSendDiffCopyServer) Send(m *BytesMessage) error { - return x.ServerStream.SendMsg(m) -} - -func (x *fileSendDiffCopyServer) Recv() (*BytesMessage, error) { - m := new(BytesMessage) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -var _FileSend_serviceDesc = grpc.ServiceDesc{ - ServiceName: "moby.filesync.v1.FileSend", - HandlerType: (*FileSendServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "DiffCopy", - Handler: _FileSend_DiffCopy_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "filesync.proto", -} - -func (m *BytesMessage) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *BytesMessage) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Data) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintFilesync(dAtA, i, uint64(len(m.Data))) - i += copy(dAtA[i:], m.Data) - } - return i, nil -} - -func encodeVarintFilesync(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *BytesMessage) Size() (n int) { - var l int - _ = l - l = len(m.Data) - if l > 0 { - n += 1 + l + sovFilesync(uint64(l)) - } - return n -} - -func sovFilesync(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozFilesync(x uint64) (n int) { - return sovFilesync(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *BytesMessage) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&BytesMessage{`, - `Data:` + fmt.Sprintf("%v", this.Data) + `,`, - `}`, - }, "") - return s -} -func valueToStringFilesync(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *BytesMessage) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowFilesync - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BytesMessage: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BytesMessage: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowFilesync - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthFilesync - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipFilesync(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthFilesync - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipFilesync(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowFilesync - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowFilesync - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowFilesync - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthFilesync - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowFilesync - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipFilesync(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthFilesync = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowFilesync = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("filesync.proto", fileDescriptorFilesync) } - -var fileDescriptorFilesync = []byte{ - // 208 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4b, 0xcb, 0xcc, 0x49, - 0x2d, 0xae, 0xcc, 0x4b, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0xc8, 0xcd, 0x4f, 0xaa, - 0xd4, 0x83, 0x0b, 0x96, 0x19, 0x2a, 0x29, 0x71, 0xf1, 0x38, 0x55, 0x96, 0xa4, 0x16, 0xfb, 0xa6, - 0x16, 0x17, 0x27, 0xa6, 0xa7, 0x0a, 0x09, 0x71, 0xb1, 0xa4, 0x24, 0x96, 0x24, 0x4a, 0x30, 0x2a, - 0x30, 0x6a, 0xf0, 0x04, 0x81, 0xd9, 0x46, 0xab, 0x19, 0xb9, 0x38, 0xdc, 0x32, 0x73, 0x52, 0x83, - 0x2b, 0xf3, 0x92, 0x85, 0xfc, 0xb8, 0x38, 0x5c, 0x32, 0xd3, 0xd2, 0x9c, 0xf3, 0x0b, 0x2a, 0x85, - 0xe4, 0xf4, 0xd0, 0xcd, 0xd3, 0x43, 0x36, 0x4c, 0x8a, 0x80, 0xbc, 0x06, 0xa3, 0x01, 0xa3, 0x90, - 0x3f, 0x17, 0x67, 0x48, 0x62, 0x51, 0x70, 0x49, 0x51, 0x6a, 0x62, 0x2e, 0x35, 0x0c, 0x34, 0x8a, - 0x82, 0x3a, 0x36, 0x35, 0x2f, 0x85, 0xda, 0x8e, 0x75, 0x32, 0xbb, 0xf0, 0x50, 0x8e, 0xe1, 0xc6, - 0x43, 0x39, 0x86, 0x0f, 0x0f, 0xe5, 0x18, 0x1b, 0x1e, 0xc9, 0x31, 0xae, 0x78, 0x24, 0xc7, 0x78, - 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0xbe, 0x78, 0x24, 0xc7, - 0xf0, 0xe1, 0x91, 0x1c, 0xe3, 0x84, 0xc7, 0x72, 0x0c, 0x51, 0x1c, 0x30, 0xb3, 0x92, 0xd8, 0xc0, - 0xc1, 0x6f, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x72, 0x81, 0x1a, 0x91, 0x90, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/moby/buildkit/session/filesync/filesync.proto b/vendor/github.com/moby/buildkit/session/filesync/filesync.proto deleted file mode 100644 index 0ae293736809..000000000000 --- a/vendor/github.com/moby/buildkit/session/filesync/filesync.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; - -package moby.filesync.v1; - -option go_package = "filesync"; - -service FileSync{ - rpc DiffCopy(stream BytesMessage) returns (stream BytesMessage); - rpc TarStream(stream BytesMessage) returns (stream BytesMessage); -} - -service FileSend{ - rpc DiffCopy(stream BytesMessage) returns (stream BytesMessage); -} - - -// BytesMessage contains a chunk of byte data -message BytesMessage{ - bytes data = 1; -} \ No newline at end of file diff --git a/vendor/github.com/moby/buildkit/session/filesync/filesync_test.go b/vendor/github.com/moby/buildkit/session/filesync/filesync_test.go deleted file mode 100644 index 39a0125eb6e5..000000000000 --- a/vendor/github.com/moby/buildkit/session/filesync/filesync_test.go +++ /dev/null @@ -1,74 +0,0 @@ -package filesync - -import ( - "context" - "io/ioutil" - "path/filepath" - "testing" - - "github.com/moby/buildkit/session" - "github.com/moby/buildkit/session/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "golang.org/x/sync/errgroup" -) - -func TestFileSyncIncludePatterns(t *testing.T) { - ctx := context.TODO() - t.Parallel() - tmpDir, err := ioutil.TempDir("", "fsynctest") - require.NoError(t, err) - - destDir, err := ioutil.TempDir("", "fsynctest") - require.NoError(t, err) - - err = ioutil.WriteFile(filepath.Join(tmpDir, "foo"), []byte("content1"), 0600) - require.NoError(t, err) - - err = ioutil.WriteFile(filepath.Join(tmpDir, "bar"), []byte("content2"), 0600) - require.NoError(t, err) - - s, err := session.NewSession(ctx, "foo", "bar") - require.NoError(t, err) - - m, err := session.NewManager() - require.NoError(t, err) - - fs := NewFSSyncProvider([]SyncedDir{{Name: "test0", Dir: tmpDir}}) - s.Allow(fs) - - dialer := session.Dialer(testutil.TestStream(testutil.Handler(m.HandleConn))) - - g, ctx := errgroup.WithContext(context.Background()) - - g.Go(func() error { - return s.Run(ctx, dialer) - }) - - g.Go(func() (reterr error) { - c, err := m.Get(ctx, s.ID()) - if err != nil { - return err - } - if err := FSSync(ctx, c, FSSendRequestOpt{ - Name: "test0", - DestDir: destDir, - IncludePatterns: []string{"ba*"}, - }); err != nil { - return err - } - - _, err = ioutil.ReadFile(filepath.Join(destDir, "foo")) - assert.Error(t, err) - - dt, err := ioutil.ReadFile(filepath.Join(destDir, "bar")) - if err != nil { - return err - } - assert.Equal(t, "content2", string(dt)) - return s.Close() - }) - - err = g.Wait() - require.NoError(t, err) -} diff --git a/vendor/github.com/moby/buildkit/session/filesync/generate.go b/vendor/github.com/moby/buildkit/session/filesync/generate.go deleted file mode 100644 index 261e87627238..000000000000 --- a/vendor/github.com/moby/buildkit/session/filesync/generate.go +++ /dev/null @@ -1,3 +0,0 @@ -package filesync - -//go:generate protoc --gogoslick_out=plugins=grpc:. filesync.proto diff --git a/vendor/github.com/moby/buildkit/session/grpc.go b/vendor/github.com/moby/buildkit/session/grpc.go deleted file mode 100644 index 2798b6abba86..000000000000 --- a/vendor/github.com/moby/buildkit/session/grpc.go +++ /dev/null @@ -1,81 +0,0 @@ -package session - -import ( - "context" - "net" - "sync/atomic" - "time" - - "github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc" - opentracing "github.com/opentracing/opentracing-go" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/net/http2" - "google.golang.org/grpc" - "google.golang.org/grpc/health/grpc_health_v1" -) - -func serve(ctx context.Context, grpcServer *grpc.Server, conn net.Conn) { - go func() { - <-ctx.Done() - conn.Close() - }() - logrus.Debugf("serving grpc connection") - (&http2.Server{}).ServeConn(conn, &http2.ServeConnOpts{Handler: grpcServer}) -} - -func grpcClientConn(ctx context.Context, conn net.Conn) (context.Context, *grpc.ClientConn, error) { - var dialCount int64 - dialer := grpc.WithDialer(func(addr string, d time.Duration) (net.Conn, error) { - if c := atomic.AddInt64(&dialCount, 1); c > 1 { - return nil, errors.Errorf("only one connection allowed") - } - return conn, nil - }) - - dialOpts := []grpc.DialOption{ - dialer, - grpc.WithInsecure(), - } - - if span := opentracing.SpanFromContext(ctx); span != nil { - tracer := span.Tracer() - dialOpts = append(dialOpts, - grpc.WithUnaryInterceptor(otgrpc.OpenTracingClientInterceptor(tracer, traceFilter())), - grpc.WithStreamInterceptor(otgrpc.OpenTracingStreamClientInterceptor(tracer, traceFilter())), - ) - } - - cc, err := grpc.DialContext(ctx, "", dialOpts...) - if err != nil { - return nil, nil, errors.Wrap(err, "failed to create grpc client") - } - - ctx, cancel := context.WithCancel(ctx) - go monitorHealth(ctx, cc, cancel) - - return ctx, cc, nil -} - -func monitorHealth(ctx context.Context, cc *grpc.ClientConn, cancelConn func()) { - defer cancelConn() - defer cc.Close() - - ticker := time.NewTicker(1 * time.Second) - defer ticker.Stop() - healthClient := grpc_health_v1.NewHealthClient(cc) - - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - ctx, cancel := context.WithTimeout(ctx, 10*time.Second) - _, err := healthClient.Check(ctx, &grpc_health_v1.HealthCheckRequest{}) - cancel() - if err != nil { - return - } - } - } -} diff --git a/vendor/github.com/moby/buildkit/session/grpchijack/dial.go b/vendor/github.com/moby/buildkit/session/grpchijack/dial.go deleted file mode 100644 index 151ab5498f3a..000000000000 --- a/vendor/github.com/moby/buildkit/session/grpchijack/dial.go +++ /dev/null @@ -1,156 +0,0 @@ -package grpchijack - -import ( - "context" - "io" - "net" - "strings" - "sync" - "time" - - controlapi "github.com/moby/buildkit/api/services/control" - "github.com/moby/buildkit/session" - "google.golang.org/grpc" - "google.golang.org/grpc/metadata" -) - -func Dialer(api controlapi.ControlClient) session.Dialer { - return func(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) { - - meta = lowerHeaders(meta) - - md := metadata.MD(meta) - - ctx = metadata.NewOutgoingContext(ctx, md) - - stream, err := api.Session(ctx) - if err != nil { - return nil, err - } - - c, _ := streamToConn(stream) - return c, nil - } -} - -func streamToConn(stream grpc.Stream) (net.Conn, <-chan struct{}) { - closeCh := make(chan struct{}) - c := &conn{stream: stream, buf: make([]byte, 32*1<<10), closeCh: closeCh} - return c, closeCh -} - -type conn struct { - stream grpc.Stream - buf []byte - lastBuf []byte - - closedOnce sync.Once - readMu sync.Mutex - err error - closeCh chan struct{} -} - -func (c *conn) Read(b []byte) (n int, err error) { - c.readMu.Lock() - defer c.readMu.Unlock() - - if c.lastBuf != nil { - n := copy(b, c.lastBuf) - c.lastBuf = c.lastBuf[n:] - if len(c.lastBuf) == 0 { - c.lastBuf = nil - } - return n, nil - } - m := new(controlapi.BytesMessage) - m.Data = c.buf - - if err := c.stream.RecvMsg(m); err != nil { - return 0, err - } - c.buf = m.Data[:cap(m.Data)] - - n = copy(b, m.Data) - if n < len(m.Data) { - c.lastBuf = m.Data[n:] - } - - return n, nil -} - -func (c *conn) Write(b []byte) (int, error) { - m := &controlapi.BytesMessage{Data: b} - if err := c.stream.SendMsg(m); err != nil { - return 0, err - } - return len(b), nil -} - -func (c *conn) Close() (err error) { - c.closedOnce.Do(func() { - defer func() { - close(c.closeCh) - }() - - if cs, ok := c.stream.(grpc.ClientStream); ok { - err = cs.CloseSend() - if err != nil { - return - } - } - - c.readMu.Lock() - for { - m := new(controlapi.BytesMessage) - m.Data = c.buf - err = c.stream.RecvMsg(m) - if err != nil { - if err != io.EOF { - return - } - err = nil - break - } - c.buf = m.Data[:cap(m.Data)] - c.lastBuf = append(c.lastBuf, c.buf...) - } - c.readMu.Unlock() - - }) - return nil -} - -func (c *conn) LocalAddr() net.Addr { - return dummyAddr{} -} -func (c *conn) RemoteAddr() net.Addr { - return dummyAddr{} -} -func (c *conn) SetDeadline(t time.Time) error { - return nil -} -func (c *conn) SetReadDeadline(t time.Time) error { - return nil -} -func (c *conn) SetWriteDeadline(t time.Time) error { - return nil -} - -type dummyAddr struct { -} - -func (d dummyAddr) Network() string { - return "tcp" -} - -func (d dummyAddr) String() string { - return "localhost" -} - -func lowerHeaders(in map[string][]string) map[string][]string { - out := map[string][]string{} - for k := range in { - out[strings.ToLower(k)] = in[k] - } - return out -} diff --git a/vendor/github.com/moby/buildkit/session/grpchijack/hijack.go b/vendor/github.com/moby/buildkit/session/grpchijack/hijack.go deleted file mode 100644 index 096a9e806f2f..000000000000 --- a/vendor/github.com/moby/buildkit/session/grpchijack/hijack.go +++ /dev/null @@ -1,15 +0,0 @@ -package grpchijack - -import ( - "net" - - controlapi "github.com/moby/buildkit/api/services/control" - "google.golang.org/grpc/metadata" -) - -// Hijack hijacks session to a connection. -func Hijack(stream controlapi.Control_SessionServer) (net.Conn, <-chan struct{}, map[string][]string) { - md, _ := metadata.FromIncomingContext(stream.Context()) - c, closeCh := streamToConn(stream) - return c, closeCh, md -} diff --git a/vendor/github.com/moby/buildkit/session/manager.go b/vendor/github.com/moby/buildkit/session/manager.go deleted file mode 100644 index f401c7fb33dd..000000000000 --- a/vendor/github.com/moby/buildkit/session/manager.go +++ /dev/null @@ -1,218 +0,0 @@ -package session - -import ( - "context" - "net" - "net/http" - "strings" - "sync" - - "github.com/pkg/errors" - "google.golang.org/grpc" -) - -// Caller can invoke requests on the session -type Caller interface { - Context() context.Context - Supports(method string) bool - Conn() *grpc.ClientConn - Name() string - SharedKey() string -} - -type client struct { - Session - cc *grpc.ClientConn - supported map[string]struct{} -} - -// Manager is a controller for accessing currently active sessions -type Manager struct { - sessions map[string]*client - mu sync.Mutex - updateCondition *sync.Cond -} - -// NewManager returns a new Manager -func NewManager() (*Manager, error) { - sm := &Manager{ - sessions: make(map[string]*client), - } - sm.updateCondition = sync.NewCond(&sm.mu) - return sm, nil -} - -// HandleHTTPRequest handles an incoming HTTP request -func (sm *Manager) HandleHTTPRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) error { - hijacker, ok := w.(http.Hijacker) - if !ok { - return errors.New("handler does not support hijack") - } - - id := r.Header.Get(headerSessionID) - - proto := r.Header.Get("Upgrade") - - sm.mu.Lock() - if _, ok := sm.sessions[id]; ok { - sm.mu.Unlock() - return errors.Errorf("session %s already exists", id) - } - - if proto == "" { - sm.mu.Unlock() - return errors.New("no upgrade proto in request") - } - - if proto != "h2c" { - sm.mu.Unlock() - return errors.Errorf("protocol %s not supported", proto) - } - - conn, _, err := hijacker.Hijack() - if err != nil { - sm.mu.Unlock() - return errors.Wrap(err, "failed to hijack connection") - } - - resp := &http.Response{ - StatusCode: http.StatusSwitchingProtocols, - ProtoMajor: 1, - ProtoMinor: 1, - Header: http.Header{}, - } - resp.Header.Set("Connection", "Upgrade") - resp.Header.Set("Upgrade", proto) - - // set raw mode - conn.Write([]byte{}) - resp.Write(conn) - - return sm.handleConn(ctx, conn, r.Header) -} - -// HandleConn handles an incoming raw connection -func (sm *Manager) HandleConn(ctx context.Context, conn net.Conn, opts map[string][]string) error { - sm.mu.Lock() - return sm.handleConn(ctx, conn, opts) -} - -// caller needs to take lock, this function will release it -func (sm *Manager) handleConn(ctx context.Context, conn net.Conn, opts map[string][]string) error { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - opts = canonicalHeaders(opts) - - h := http.Header(opts) - id := h.Get(headerSessionID) - name := h.Get(headerSessionName) - sharedKey := h.Get(headerSessionSharedKey) - - ctx, cc, err := grpcClientConn(ctx, conn) - if err != nil { - sm.mu.Unlock() - return err - } - - c := &client{ - Session: Session{ - id: id, - name: name, - sharedKey: sharedKey, - ctx: ctx, - cancelCtx: cancel, - done: make(chan struct{}), - }, - cc: cc, - supported: make(map[string]struct{}), - } - - for _, m := range opts[headerSessionMethod] { - c.supported[strings.ToLower(m)] = struct{}{} - } - sm.sessions[id] = c - sm.updateCondition.Broadcast() - sm.mu.Unlock() - - defer func() { - sm.mu.Lock() - delete(sm.sessions, id) - sm.mu.Unlock() - }() - - <-c.ctx.Done() - conn.Close() - close(c.done) - - return nil -} - -// Get returns a session by ID -func (sm *Manager) Get(ctx context.Context, id string) (Caller, error) { - // session prefix is used to identify vertexes with different contexts so - // they would not collide, but for lookup we don't need the prefix - if p := strings.SplitN(id, ":", 2); len(p) == 2 && len(p[1]) > 0 { - id = p[1] - } - - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - go func() { - select { - case <-ctx.Done(): - sm.updateCondition.Broadcast() - } - }() - - var c *client - - sm.mu.Lock() - for { - select { - case <-ctx.Done(): - sm.mu.Unlock() - return nil, errors.Wrapf(ctx.Err(), "no active session for %s", id) - default: - } - var ok bool - c, ok = sm.sessions[id] - if !ok || c.closed() { - sm.updateCondition.Wait() - continue - } - sm.mu.Unlock() - break - } - - return c, nil -} - -func (c *client) Context() context.Context { - return c.context() -} - -func (c *client) Name() string { - return c.name -} - -func (c *client) SharedKey() string { - return c.sharedKey -} - -func (c *client) Supports(url string) bool { - _, ok := c.supported[strings.ToLower(url)] - return ok -} -func (c *client) Conn() *grpc.ClientConn { - return c.cc -} - -func canonicalHeaders(in map[string][]string) map[string][]string { - out := map[string][]string{} - for k := range in { - out[http.CanonicalHeaderKey(k)] = in[k] - } - return out -} diff --git a/vendor/github.com/moby/buildkit/session/secrets/generate.go b/vendor/github.com/moby/buildkit/session/secrets/generate.go deleted file mode 100644 index 68716a95c665..000000000000 --- a/vendor/github.com/moby/buildkit/session/secrets/generate.go +++ /dev/null @@ -1,3 +0,0 @@ -package secrets - -//go:generate protoc --gogoslick_out=plugins=grpc:. secrets.proto diff --git a/vendor/github.com/moby/buildkit/session/secrets/secrets.go b/vendor/github.com/moby/buildkit/session/secrets/secrets.go deleted file mode 100644 index 6cfda18bb98c..000000000000 --- a/vendor/github.com/moby/buildkit/session/secrets/secrets.go +++ /dev/null @@ -1,30 +0,0 @@ -package secrets - -import ( - "context" - - "github.com/moby/buildkit/session" - "github.com/pkg/errors" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -type SecretStore interface { - GetSecret(context.Context, string) ([]byte, error) -} - -var ErrNotFound = errors.Errorf("not found") - -func GetSecret(ctx context.Context, c session.Caller, id string) ([]byte, error) { - client := NewSecretsClient(c.Conn()) - resp, err := client.GetSecret(ctx, &GetSecretRequest{ - ID: id, - }) - if err != nil { - if st, ok := status.FromError(err); ok && (st.Code() == codes.Unimplemented || st.Code() == codes.NotFound) { - return nil, errors.Wrapf(ErrNotFound, "secret %s not found", id) - } - return nil, err - } - return resp.Data, nil -} diff --git a/vendor/github.com/moby/buildkit/session/secrets/secrets.pb.go b/vendor/github.com/moby/buildkit/session/secrets/secrets.pb.go deleted file mode 100644 index 6f524b76d9a0..000000000000 --- a/vendor/github.com/moby/buildkit/session/secrets/secrets.pb.go +++ /dev/null @@ -1,813 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: secrets.proto - -/* - Package secrets is a generated protocol buffer package. - - It is generated from these files: - secrets.proto - - It has these top-level messages: - GetSecretRequest - GetSecretResponse -*/ -package secrets - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import bytes "bytes" - -import strings "strings" -import reflect "reflect" -import sortkeys "github.com/gogo/protobuf/sortkeys" - -import context "golang.org/x/net/context" -import grpc "google.golang.org/grpc" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -type GetSecretRequest struct { - ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` - Annotations map[string]string `protobuf:"bytes,2,rep,name=annotations" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (m *GetSecretRequest) Reset() { *m = GetSecretRequest{} } -func (*GetSecretRequest) ProtoMessage() {} -func (*GetSecretRequest) Descriptor() ([]byte, []int) { return fileDescriptorSecrets, []int{0} } - -func (m *GetSecretRequest) GetID() string { - if m != nil { - return m.ID - } - return "" -} - -func (m *GetSecretRequest) GetAnnotations() map[string]string { - if m != nil { - return m.Annotations - } - return nil -} - -type GetSecretResponse struct { - Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` -} - -func (m *GetSecretResponse) Reset() { *m = GetSecretResponse{} } -func (*GetSecretResponse) ProtoMessage() {} -func (*GetSecretResponse) Descriptor() ([]byte, []int) { return fileDescriptorSecrets, []int{1} } - -func (m *GetSecretResponse) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -func init() { - proto.RegisterType((*GetSecretRequest)(nil), "moby.buildkit.secrets.v1.GetSecretRequest") - proto.RegisterType((*GetSecretResponse)(nil), "moby.buildkit.secrets.v1.GetSecretResponse") -} -func (this *GetSecretRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*GetSecretRequest) - if !ok { - that2, ok := that.(GetSecretRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.ID != that1.ID { - return false - } - if len(this.Annotations) != len(that1.Annotations) { - return false - } - for i := range this.Annotations { - if this.Annotations[i] != that1.Annotations[i] { - return false - } - } - return true -} -func (this *GetSecretResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*GetSecretResponse) - if !ok { - that2, ok := that.(GetSecretResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !bytes.Equal(this.Data, that1.Data) { - return false - } - return true -} -func (this *GetSecretRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&secrets.GetSecretRequest{") - s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n") - keysForAnnotations := make([]string, 0, len(this.Annotations)) - for k, _ := range this.Annotations { - keysForAnnotations = append(keysForAnnotations, k) - } - sortkeys.Strings(keysForAnnotations) - mapStringForAnnotations := "map[string]string{" - for _, k := range keysForAnnotations { - mapStringForAnnotations += fmt.Sprintf("%#v: %#v,", k, this.Annotations[k]) - } - mapStringForAnnotations += "}" - if this.Annotations != nil { - s = append(s, "Annotations: "+mapStringForAnnotations+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *GetSecretResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&secrets.GetSecretResponse{") - s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func valueToGoStringSecrets(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for Secrets service - -type SecretsClient interface { - GetSecret(ctx context.Context, in *GetSecretRequest, opts ...grpc.CallOption) (*GetSecretResponse, error) -} - -type secretsClient struct { - cc *grpc.ClientConn -} - -func NewSecretsClient(cc *grpc.ClientConn) SecretsClient { - return &secretsClient{cc} -} - -func (c *secretsClient) GetSecret(ctx context.Context, in *GetSecretRequest, opts ...grpc.CallOption) (*GetSecretResponse, error) { - out := new(GetSecretResponse) - err := grpc.Invoke(ctx, "/moby.buildkit.secrets.v1.Secrets/GetSecret", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for Secrets service - -type SecretsServer interface { - GetSecret(context.Context, *GetSecretRequest) (*GetSecretResponse, error) -} - -func RegisterSecretsServer(s *grpc.Server, srv SecretsServer) { - s.RegisterService(&_Secrets_serviceDesc, srv) -} - -func _Secrets_GetSecret_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetSecretRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SecretsServer).GetSecret(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/moby.buildkit.secrets.v1.Secrets/GetSecret", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SecretsServer).GetSecret(ctx, req.(*GetSecretRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Secrets_serviceDesc = grpc.ServiceDesc{ - ServiceName: "moby.buildkit.secrets.v1.Secrets", - HandlerType: (*SecretsServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "GetSecret", - Handler: _Secrets_GetSecret_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "secrets.proto", -} - -func (m *GetSecretRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetSecretRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.ID) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintSecrets(dAtA, i, uint64(len(m.ID))) - i += copy(dAtA[i:], m.ID) - } - if len(m.Annotations) > 0 { - for k, _ := range m.Annotations { - dAtA[i] = 0x12 - i++ - v := m.Annotations[k] - mapSize := 1 + len(k) + sovSecrets(uint64(len(k))) + 1 + len(v) + sovSecrets(uint64(len(v))) - i = encodeVarintSecrets(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintSecrets(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintSecrets(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - return i, nil -} - -func (m *GetSecretResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetSecretResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Data) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintSecrets(dAtA, i, uint64(len(m.Data))) - i += copy(dAtA[i:], m.Data) - } - return i, nil -} - -func encodeVarintSecrets(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *GetSecretRequest) Size() (n int) { - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovSecrets(uint64(l)) - } - if len(m.Annotations) > 0 { - for k, v := range m.Annotations { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovSecrets(uint64(len(k))) + 1 + len(v) + sovSecrets(uint64(len(v))) - n += mapEntrySize + 1 + sovSecrets(uint64(mapEntrySize)) - } - } - return n -} - -func (m *GetSecretResponse) Size() (n int) { - var l int - _ = l - l = len(m.Data) - if l > 0 { - n += 1 + l + sovSecrets(uint64(l)) - } - return n -} - -func sovSecrets(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozSecrets(x uint64) (n int) { - return sovSecrets(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *GetSecretRequest) String() string { - if this == nil { - return "nil" - } - keysForAnnotations := make([]string, 0, len(this.Annotations)) - for k, _ := range this.Annotations { - keysForAnnotations = append(keysForAnnotations, k) - } - sortkeys.Strings(keysForAnnotations) - mapStringForAnnotations := "map[string]string{" - for _, k := range keysForAnnotations { - mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) - } - mapStringForAnnotations += "}" - s := strings.Join([]string{`&GetSecretRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `Annotations:` + mapStringForAnnotations + `,`, - `}`, - }, "") - return s -} -func (this *GetSecretResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&GetSecretResponse{`, - `Data:` + fmt.Sprintf("%v", this.Data) + `,`, - `}`, - }, "") - return s -} -func valueToStringSecrets(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *GetSecretRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSecrets - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetSecretRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetSecretRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSecrets - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSecrets - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSecrets - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSecrets - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Annotations == nil { - m.Annotations = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSecrets - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSecrets - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthSecrets - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSecrets - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthSecrets - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipSecrets(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthSecrets - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Annotations[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSecrets(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthSecrets - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetSecretResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSecrets - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetSecretResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetSecretResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSecrets - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthSecrets - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSecrets(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthSecrets - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipSecrets(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSecrets - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSecrets - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSecrets - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthSecrets - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSecrets - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipSecrets(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthSecrets = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowSecrets = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("secrets.proto", fileDescriptorSecrets) } - -var fileDescriptorSecrets = []byte{ - // 279 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2d, 0x4e, 0x4d, 0x2e, - 0x4a, 0x2d, 0x29, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x92, 0xc8, 0xcd, 0x4f, 0xaa, 0xd4, - 0x4b, 0x2a, 0xcd, 0xcc, 0x49, 0xc9, 0xce, 0x2c, 0xd1, 0x83, 0x49, 0x96, 0x19, 0x2a, 0x1d, 0x64, - 0xe4, 0x12, 0x70, 0x4f, 0x2d, 0x09, 0x06, 0x8b, 0x04, 0xa5, 0x16, 0x96, 0xa6, 0x16, 0x97, 0x08, - 0xf1, 0x71, 0x31, 0x79, 0xba, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x06, 0x31, 0x79, 0xba, 0x08, - 0xc5, 0x72, 0x71, 0x27, 0xe6, 0xe5, 0xe5, 0x97, 0x24, 0x96, 0x64, 0xe6, 0xe7, 0x15, 0x4b, 0x30, - 0x29, 0x30, 0x6b, 0x70, 0x1b, 0x59, 0xeb, 0xe1, 0x32, 0x54, 0x0f, 0xdd, 0x40, 0x3d, 0x47, 0x84, - 0x6e, 0xd7, 0xbc, 0x92, 0xa2, 0xca, 0x20, 0x64, 0xf3, 0xa4, 0xec, 0xb8, 0x04, 0xd0, 0x15, 0x08, - 0x09, 0x70, 0x31, 0x67, 0xa7, 0x56, 0x42, 0xdd, 0x00, 0x62, 0x0a, 0x89, 0x70, 0xb1, 0x96, 0x25, - 0xe6, 0x94, 0xa6, 0x4a, 0x30, 0x81, 0xc5, 0x20, 0x1c, 0x2b, 0x26, 0x0b, 0x46, 0x25, 0x75, 0x2e, - 0x41, 0x24, 0x1b, 0x8b, 0x0b, 0xf2, 0xf3, 0x8a, 0x53, 0x85, 0x84, 0xb8, 0x58, 0x52, 0x12, 0x4b, - 0x12, 0xc1, 0x26, 0xf0, 0x04, 0x81, 0xd9, 0x46, 0xf9, 0x5c, 0xec, 0x10, 0x55, 0xc5, 0x42, 0x29, - 0x5c, 0x9c, 0x70, 0x3d, 0x42, 0x5a, 0xc4, 0x7b, 0x45, 0x4a, 0x9b, 0x28, 0xb5, 0x10, 0x47, 0x38, - 0x99, 0x5e, 0x78, 0x28, 0xc7, 0x70, 0xe3, 0xa1, 0x1c, 0xc3, 0x87, 0x87, 0x72, 0x8c, 0x0d, 0x8f, - 0xe4, 0x18, 0x57, 0x3c, 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, - 0x8f, 0xe4, 0x18, 0x5f, 0x3c, 0x92, 0x63, 0xf8, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, - 0x28, 0x76, 0xa8, 0x59, 0x49, 0x6c, 0xe0, 0x58, 0x33, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x05, - 0x4e, 0x56, 0xde, 0xc6, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/moby/buildkit/session/secrets/secrets.proto b/vendor/github.com/moby/buildkit/session/secrets/secrets.proto deleted file mode 100644 index 17d862450d9c..000000000000 --- a/vendor/github.com/moby/buildkit/session/secrets/secrets.proto +++ /dev/null @@ -1,19 +0,0 @@ -syntax = "proto3"; - -package moby.buildkit.secrets.v1; - -option go_package = "secrets"; - -service Secrets{ - rpc GetSecret(GetSecretRequest) returns (GetSecretResponse); -} - - -message GetSecretRequest { - string ID = 1; - map annotations = 2; -} - -message GetSecretResponse { - bytes data = 1; -} diff --git a/vendor/github.com/moby/buildkit/session/secrets/secretsprovider/file.go b/vendor/github.com/moby/buildkit/session/secrets/secretsprovider/file.go deleted file mode 100644 index 58b3a928af7b..000000000000 --- a/vendor/github.com/moby/buildkit/session/secrets/secretsprovider/file.go +++ /dev/null @@ -1,54 +0,0 @@ -package secretsprovider - -import ( - "context" - "io/ioutil" - "os" - - "github.com/moby/buildkit/session/secrets" - "github.com/pkg/errors" -) - -type FileSource struct { - ID string - FilePath string -} - -func NewFileStore(files []FileSource) (secrets.SecretStore, error) { - m := map[string]FileSource{} - for _, f := range files { - if f.ID == "" { - return nil, errors.Errorf("secret missing ID") - } - if f.FilePath == "" { - f.FilePath = f.ID - } - fi, err := os.Stat(f.FilePath) - if err != nil { - return nil, errors.Wrapf(err, "failed to stat %s", f.FilePath) - } - if fi.Size() > MaxSecretSize { - return nil, errors.Errorf("secret %s too big. max size 500KB", f.ID) - } - m[f.ID] = f - } - return &fileStore{ - m: m, - }, nil -} - -type fileStore struct { - m map[string]FileSource -} - -func (fs *fileStore) GetSecret(ctx context.Context, id string) ([]byte, error) { - v, ok := fs.m[id] - if !ok { - return nil, errors.WithStack(secrets.ErrNotFound) - } - dt, err := ioutil.ReadFile(v.FilePath) - if err != nil { - return nil, err - } - return dt, nil -} diff --git a/vendor/github.com/moby/buildkit/session/secrets/secretsprovider/secretsprovider.go b/vendor/github.com/moby/buildkit/session/secrets/secretsprovider/secretsprovider.go deleted file mode 100644 index 7a0d6f3db859..000000000000 --- a/vendor/github.com/moby/buildkit/session/secrets/secretsprovider/secretsprovider.go +++ /dev/null @@ -1,60 +0,0 @@ -package secretsprovider - -import ( - "context" - - "github.com/moby/buildkit/session" - "github.com/moby/buildkit/session/secrets" - "github.com/pkg/errors" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// MaxSecretSize is the maximum byte length allowed for a secret -const MaxSecretSize = 500 * 1024 // 500KB - -func NewSecretProvider(store secrets.SecretStore) session.Attachable { - return &secretProvider{ - store: store, - } -} - -type secretProvider struct { - store secrets.SecretStore -} - -func (sp *secretProvider) Register(server *grpc.Server) { - secrets.RegisterSecretsServer(server, sp) -} - -func (sp *secretProvider) GetSecret(ctx context.Context, req *secrets.GetSecretRequest) (*secrets.GetSecretResponse, error) { - dt, err := sp.store.GetSecret(ctx, req.ID) - if err != nil { - if errors.Cause(err) == secrets.ErrNotFound { - return nil, status.Errorf(codes.NotFound, err.Error()) - } - return nil, err - } - if l := len(dt); l > MaxSecretSize { - return nil, errors.Errorf("invalid secret size %d", l) - } - - return &secrets.GetSecretResponse{ - Data: dt, - }, nil -} - -func FromMap(m map[string][]byte) session.Attachable { - return NewSecretProvider(mapStore(m)) -} - -type mapStore map[string][]byte - -func (m mapStore) GetSecret(ctx context.Context, id string) ([]byte, error) { - v, ok := m[id] - if !ok { - return nil, errors.WithStack(secrets.ErrNotFound) - } - return v, nil -} diff --git a/vendor/github.com/moby/buildkit/session/session.go b/vendor/github.com/moby/buildkit/session/session.go deleted file mode 100644 index 47c9579633a7..000000000000 --- a/vendor/github.com/moby/buildkit/session/session.go +++ /dev/null @@ -1,143 +0,0 @@ -package session - -import ( - "context" - "net" - "strings" - - "github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc" - "github.com/moby/buildkit/identity" - opentracing "github.com/opentracing/opentracing-go" - "github.com/pkg/errors" - "google.golang.org/grpc" - "google.golang.org/grpc/health" - "google.golang.org/grpc/health/grpc_health_v1" -) - -const ( - headerSessionID = "X-Docker-Expose-Session-Uuid" - headerSessionName = "X-Docker-Expose-Session-Name" - headerSessionSharedKey = "X-Docker-Expose-Session-Sharedkey" - headerSessionMethod = "X-Docker-Expose-Session-Grpc-Method" -) - -// Dialer returns a connection that can be used by the session -type Dialer func(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) - -// Attachable defines a feature that can be expsed on a session -type Attachable interface { - Register(*grpc.Server) -} - -// Session is a long running connection between client and a daemon -type Session struct { - id string - name string - sharedKey string - ctx context.Context - cancelCtx func() - done chan struct{} - grpcServer *grpc.Server - conn net.Conn -} - -// NewSession returns a new long running session -func NewSession(ctx context.Context, name, sharedKey string) (*Session, error) { - id := identity.NewID() - - serverOpts := []grpc.ServerOption{} - if span := opentracing.SpanFromContext(ctx); span != nil { - tracer := span.Tracer() - serverOpts = []grpc.ServerOption{ - grpc.StreamInterceptor(otgrpc.OpenTracingStreamServerInterceptor(span.Tracer(), traceFilter())), - grpc.UnaryInterceptor(otgrpc.OpenTracingServerInterceptor(tracer, traceFilter())), - } - } - - s := &Session{ - id: id, - name: name, - sharedKey: sharedKey, - grpcServer: grpc.NewServer(serverOpts...), - } - - grpc_health_v1.RegisterHealthServer(s.grpcServer, health.NewServer()) - - return s, nil -} - -// Allow enable a given service to be reachable through the grpc session -func (s *Session) Allow(a Attachable) { - a.Register(s.grpcServer) -} - -// ID returns unique identifier for the session -func (s *Session) ID() string { - return s.id -} - -// Run activates the session -func (s *Session) Run(ctx context.Context, dialer Dialer) error { - ctx, cancel := context.WithCancel(ctx) - s.cancelCtx = cancel - s.done = make(chan struct{}) - - defer cancel() - defer close(s.done) - - meta := make(map[string][]string) - meta[headerSessionID] = []string{s.id} - meta[headerSessionName] = []string{s.name} - meta[headerSessionSharedKey] = []string{s.sharedKey} - - for name, svc := range s.grpcServer.GetServiceInfo() { - for _, method := range svc.Methods { - meta[headerSessionMethod] = append(meta[headerSessionMethod], MethodURL(name, method.Name)) - } - } - conn, err := dialer(ctx, "h2c", meta) - if err != nil { - return errors.Wrap(err, "failed to dial gRPC") - } - s.conn = conn - serve(ctx, s.grpcServer, conn) - return nil -} - -// Close closes the session -func (s *Session) Close() error { - if s.cancelCtx != nil && s.done != nil { - if s.conn != nil { - s.conn.Close() - } - s.grpcServer.Stop() - <-s.done - } - return nil -} - -func (s *Session) context() context.Context { - return s.ctx -} - -func (s *Session) closed() bool { - select { - case <-s.context().Done(): - return true - default: - return false - } -} - -// MethodURL returns a gRPC method URL for service and method name -func MethodURL(s, m string) string { - return "/" + s + "/" + m -} - -func traceFilter() otgrpc.Option { - return otgrpc.IncludingSpans(func(parentSpanCtx opentracing.SpanContext, - method string, - req, resp interface{}) bool { - return !strings.HasSuffix(method, "Health/Check") - }) -} diff --git a/vendor/github.com/moby/buildkit/session/sshforward/copy.go b/vendor/github.com/moby/buildkit/session/sshforward/copy.go deleted file mode 100644 index c101f3b4558f..000000000000 --- a/vendor/github.com/moby/buildkit/session/sshforward/copy.go +++ /dev/null @@ -1,61 +0,0 @@ -package sshforward - -import ( - io "io" - - context "golang.org/x/net/context" - "golang.org/x/sync/errgroup" - "google.golang.org/grpc" -) - -func Copy(ctx context.Context, conn io.ReadWriteCloser, stream grpc.Stream) error { - g, ctx := errgroup.WithContext(ctx) - - g.Go(func() (retErr error) { - p := &BytesMessage{} - for { - if err := stream.RecvMsg(p); err != nil { - if err == io.EOF { - return nil - } - conn.Close() - return err - } - select { - case <-ctx.Done(): - conn.Close() - return ctx.Err() - default: - } - if _, err := conn.Write(p.Data); err != nil { - conn.Close() - return err - } - p.Data = p.Data[:0] - } - }) - - g.Go(func() (retErr error) { - for { - buf := make([]byte, 32*1024) - n, err := conn.Read(buf) - switch { - case err == io.EOF: - return nil - case err != nil: - return err - } - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - p := &BytesMessage{Data: buf[:n]} - if err := stream.SendMsg(p); err != nil { - return err - } - } - }) - - return g.Wait() -} diff --git a/vendor/github.com/moby/buildkit/session/sshforward/generate.go b/vendor/github.com/moby/buildkit/session/sshforward/generate.go deleted file mode 100644 index feecc7743c25..000000000000 --- a/vendor/github.com/moby/buildkit/session/sshforward/generate.go +++ /dev/null @@ -1,3 +0,0 @@ -package sshforward - -//go:generate protoc --gogoslick_out=plugins=grpc:. ssh.proto diff --git a/vendor/github.com/moby/buildkit/session/sshforward/ssh.go b/vendor/github.com/moby/buildkit/session/sshforward/ssh.go deleted file mode 100644 index a4effef604a2..000000000000 --- a/vendor/github.com/moby/buildkit/session/sshforward/ssh.go +++ /dev/null @@ -1,113 +0,0 @@ -package sshforward - -import ( - "io/ioutil" - "net" - "os" - "path/filepath" - - "github.com/moby/buildkit/session" - context "golang.org/x/net/context" - "golang.org/x/sync/errgroup" - "google.golang.org/grpc/metadata" -) - -// DefaultID is the default ssh ID -const DefaultID = "default" - -const KeySSHID = "buildkit.ssh.id" - -type server struct { - caller session.Caller -} - -func (s *server) run(ctx context.Context, l net.Listener, id string) error { - eg, ctx := errgroup.WithContext(ctx) - - eg.Go(func() error { - <-ctx.Done() - return ctx.Err() - }) - - eg.Go(func() error { - for { - conn, err := l.Accept() - if err != nil { - return err - } - - client := NewSSHClient(s.caller.Conn()) - - opts := make(map[string][]string) - opts[KeySSHID] = []string{id} - ctx = metadata.NewOutgoingContext(ctx, opts) - - stream, err := client.ForwardAgent(ctx) - if err != nil { - conn.Close() - return err - } - - go Copy(ctx, conn, stream) - } - }) - - return eg.Wait() -} - -type SocketOpt struct { - ID string - UID int - GID int - Mode int -} - -func MountSSHSocket(ctx context.Context, c session.Caller, opt SocketOpt) (sockPath string, closer func() error, err error) { - dir, err := ioutil.TempDir("", ".buildkit-ssh-sock") - if err != nil { - return "", nil, err - } - - defer func() { - if err != nil { - os.RemoveAll(dir) - } - }() - - sockPath = filepath.Join(dir, "ssh_auth_sock") - - l, err := net.Listen("unix", sockPath) - if err != nil { - return "", nil, err - } - - if err := os.Chown(sockPath, opt.UID, opt.GID); err != nil { - l.Close() - return "", nil, err - } - if err := os.Chmod(sockPath, os.FileMode(opt.Mode)); err != nil { - l.Close() - return "", nil, err - } - - s := &server{caller: c} - - id := opt.ID - if id == "" { - id = DefaultID - } - - go s.run(ctx, l, id) // erroring per connection allowed - - return sockPath, func() error { - err := l.Close() - os.RemoveAll(sockPath) - return err - }, nil -} - -func CheckSSHID(ctx context.Context, c session.Caller, id string) error { - client := NewSSHClient(c.Conn()) - _, err := client.CheckAgent(ctx, &CheckAgentRequest{ID: id}) - return err -} diff --git a/vendor/github.com/moby/buildkit/session/sshforward/ssh.pb.go b/vendor/github.com/moby/buildkit/session/sshforward/ssh.pb.go deleted file mode 100644 index 3fb36c9d34ac..000000000000 --- a/vendor/github.com/moby/buildkit/session/sshforward/ssh.pb.go +++ /dev/null @@ -1,816 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: ssh.proto - -/* -Package sshforward is a generated protocol buffer package. - -It is generated from these files: - ssh.proto - -It has these top-level messages: - BytesMessage - CheckAgentRequest - CheckAgentResponse -*/ -package sshforward - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import bytes "bytes" - -import strings "strings" -import reflect "reflect" - -import context "golang.org/x/net/context" -import grpc "google.golang.org/grpc" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -// BytesMessage contains a chunk of byte data -type BytesMessage struct { - Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` -} - -func (m *BytesMessage) Reset() { *m = BytesMessage{} } -func (*BytesMessage) ProtoMessage() {} -func (*BytesMessage) Descriptor() ([]byte, []int) { return fileDescriptorSsh, []int{0} } - -func (m *BytesMessage) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -type CheckAgentRequest struct { - ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` -} - -func (m *CheckAgentRequest) Reset() { *m = CheckAgentRequest{} } -func (*CheckAgentRequest) ProtoMessage() {} -func (*CheckAgentRequest) Descriptor() ([]byte, []int) { return fileDescriptorSsh, []int{1} } - -func (m *CheckAgentRequest) GetID() string { - if m != nil { - return m.ID - } - return "" -} - -type CheckAgentResponse struct { -} - -func (m *CheckAgentResponse) Reset() { *m = CheckAgentResponse{} } -func (*CheckAgentResponse) ProtoMessage() {} -func (*CheckAgentResponse) Descriptor() ([]byte, []int) { return fileDescriptorSsh, []int{2} } - -func init() { - proto.RegisterType((*BytesMessage)(nil), "moby.sshforward.v1.BytesMessage") - proto.RegisterType((*CheckAgentRequest)(nil), "moby.sshforward.v1.CheckAgentRequest") - proto.RegisterType((*CheckAgentResponse)(nil), "moby.sshforward.v1.CheckAgentResponse") -} -func (this *BytesMessage) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*BytesMessage) - if !ok { - that2, ok := that.(BytesMessage) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !bytes.Equal(this.Data, that1.Data) { - return false - } - return true -} -func (this *CheckAgentRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*CheckAgentRequest) - if !ok { - that2, ok := that.(CheckAgentRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.ID != that1.ID { - return false - } - return true -} -func (this *CheckAgentResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*CheckAgentResponse) - if !ok { - that2, ok := that.(CheckAgentResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - return true -} -func (this *BytesMessage) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&sshforward.BytesMessage{") - s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *CheckAgentRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&sshforward.CheckAgentRequest{") - s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *CheckAgentResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 4) - s = append(s, "&sshforward.CheckAgentResponse{") - s = append(s, "}") - return strings.Join(s, "") -} -func valueToGoStringSsh(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for SSH service - -type SSHClient interface { - CheckAgent(ctx context.Context, in *CheckAgentRequest, opts ...grpc.CallOption) (*CheckAgentResponse, error) - ForwardAgent(ctx context.Context, opts ...grpc.CallOption) (SSH_ForwardAgentClient, error) -} - -type sSHClient struct { - cc *grpc.ClientConn -} - -func NewSSHClient(cc *grpc.ClientConn) SSHClient { - return &sSHClient{cc} -} - -func (c *sSHClient) CheckAgent(ctx context.Context, in *CheckAgentRequest, opts ...grpc.CallOption) (*CheckAgentResponse, error) { - out := new(CheckAgentResponse) - err := grpc.Invoke(ctx, "/moby.sshforward.v1.SSH/CheckAgent", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *sSHClient) ForwardAgent(ctx context.Context, opts ...grpc.CallOption) (SSH_ForwardAgentClient, error) { - stream, err := grpc.NewClientStream(ctx, &_SSH_serviceDesc.Streams[0], c.cc, "/moby.sshforward.v1.SSH/ForwardAgent", opts...) - if err != nil { - return nil, err - } - x := &sSHForwardAgentClient{stream} - return x, nil -} - -type SSH_ForwardAgentClient interface { - Send(*BytesMessage) error - Recv() (*BytesMessage, error) - grpc.ClientStream -} - -type sSHForwardAgentClient struct { - grpc.ClientStream -} - -func (x *sSHForwardAgentClient) Send(m *BytesMessage) error { - return x.ClientStream.SendMsg(m) -} - -func (x *sSHForwardAgentClient) Recv() (*BytesMessage, error) { - m := new(BytesMessage) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// Server API for SSH service - -type SSHServer interface { - CheckAgent(context.Context, *CheckAgentRequest) (*CheckAgentResponse, error) - ForwardAgent(SSH_ForwardAgentServer) error -} - -func RegisterSSHServer(s *grpc.Server, srv SSHServer) { - s.RegisterService(&_SSH_serviceDesc, srv) -} - -func _SSH_CheckAgent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CheckAgentRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SSHServer).CheckAgent(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/moby.sshforward.v1.SSH/CheckAgent", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SSHServer).CheckAgent(ctx, req.(*CheckAgentRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _SSH_ForwardAgent_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(SSHServer).ForwardAgent(&sSHForwardAgentServer{stream}) -} - -type SSH_ForwardAgentServer interface { - Send(*BytesMessage) error - Recv() (*BytesMessage, error) - grpc.ServerStream -} - -type sSHForwardAgentServer struct { - grpc.ServerStream -} - -func (x *sSHForwardAgentServer) Send(m *BytesMessage) error { - return x.ServerStream.SendMsg(m) -} - -func (x *sSHForwardAgentServer) Recv() (*BytesMessage, error) { - m := new(BytesMessage) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -var _SSH_serviceDesc = grpc.ServiceDesc{ - ServiceName: "moby.sshforward.v1.SSH", - HandlerType: (*SSHServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "CheckAgent", - Handler: _SSH_CheckAgent_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "ForwardAgent", - Handler: _SSH_ForwardAgent_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "ssh.proto", -} - -func (m *BytesMessage) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *BytesMessage) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Data) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintSsh(dAtA, i, uint64(len(m.Data))) - i += copy(dAtA[i:], m.Data) - } - return i, nil -} - -func (m *CheckAgentRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CheckAgentRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.ID) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintSsh(dAtA, i, uint64(len(m.ID))) - i += copy(dAtA[i:], m.ID) - } - return i, nil -} - -func (m *CheckAgentResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CheckAgentResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - return i, nil -} - -func encodeVarintSsh(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *BytesMessage) Size() (n int) { - var l int - _ = l - l = len(m.Data) - if l > 0 { - n += 1 + l + sovSsh(uint64(l)) - } - return n -} - -func (m *CheckAgentRequest) Size() (n int) { - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovSsh(uint64(l)) - } - return n -} - -func (m *CheckAgentResponse) Size() (n int) { - var l int - _ = l - return n -} - -func sovSsh(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozSsh(x uint64) (n int) { - return sovSsh(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *BytesMessage) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&BytesMessage{`, - `Data:` + fmt.Sprintf("%v", this.Data) + `,`, - `}`, - }, "") - return s -} -func (this *CheckAgentRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CheckAgentRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `}`, - }, "") - return s -} -func (this *CheckAgentResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CheckAgentResponse{`, - `}`, - }, "") - return s -} -func valueToStringSsh(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *BytesMessage) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSsh - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BytesMessage: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BytesMessage: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSsh - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthSsh - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSsh(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthSsh - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CheckAgentRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSsh - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CheckAgentRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CheckAgentRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSsh - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSsh - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSsh(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthSsh - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CheckAgentResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSsh - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CheckAgentResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CheckAgentResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipSsh(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthSsh - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipSsh(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSsh - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSsh - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSsh - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthSsh - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSsh - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipSsh(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthSsh = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowSsh = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("ssh.proto", fileDescriptorSsh) } - -var fileDescriptorSsh = []byte{ - // 243 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2c, 0x2e, 0xce, 0xd0, - 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0xca, 0xcd, 0x4f, 0xaa, 0xd4, 0x2b, 0x2e, 0xce, 0x48, - 0xcb, 0x2f, 0x2a, 0x4f, 0x2c, 0x4a, 0xd1, 0x2b, 0x33, 0x54, 0x52, 0xe2, 0xe2, 0x71, 0xaa, 0x2c, - 0x49, 0x2d, 0xf6, 0x4d, 0x2d, 0x2e, 0x4e, 0x4c, 0x4f, 0x15, 0x12, 0xe2, 0x62, 0x49, 0x49, 0x2c, - 0x49, 0x94, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x09, 0x02, 0xb3, 0x95, 0x94, 0xb9, 0x04, 0x9d, 0x33, - 0x52, 0x93, 0xb3, 0x1d, 0xd3, 0x53, 0xf3, 0x4a, 0x82, 0x52, 0x0b, 0x4b, 0x53, 0x8b, 0x4b, 0x84, - 0xf8, 0xb8, 0x98, 0x3c, 0x5d, 0xc0, 0xca, 0x38, 0x83, 0x98, 0x3c, 0x5d, 0x94, 0x44, 0xb8, 0x84, - 0x90, 0x15, 0x15, 0x17, 0xe4, 0xe7, 0x15, 0xa7, 0x1a, 0xed, 0x62, 0xe4, 0x62, 0x0e, 0x0e, 0xf6, - 0x10, 0x8a, 0xe6, 0xe2, 0x42, 0xc8, 0x0a, 0xa9, 0xea, 0x61, 0xba, 0x44, 0x0f, 0xc3, 0x0a, 0x29, - 0x35, 0x42, 0xca, 0x20, 0x96, 0x08, 0x85, 0x71, 0xf1, 0xb8, 0x41, 0x14, 0x40, 0x8c, 0x57, 0xc0, - 0xa6, 0x0f, 0xd9, 0x97, 0x52, 0x04, 0x55, 0x68, 0x30, 0x1a, 0x30, 0x3a, 0x59, 0x5c, 0x78, 0x28, - 0xc7, 0x70, 0xe3, 0xa1, 0x1c, 0xc3, 0x87, 0x87, 0x72, 0x8c, 0x0d, 0x8f, 0xe4, 0x18, 0x57, 0x3c, - 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x5f, - 0x3c, 0x92, 0x63, 0xf8, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, 0x28, 0x2e, 0x84, 0x69, - 0x49, 0x6c, 0xe0, 0x00, 0x37, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x31, 0x3e, 0x40, 0xab, 0x7d, - 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/moby/buildkit/session/sshforward/ssh.proto b/vendor/github.com/moby/buildkit/session/sshforward/ssh.proto deleted file mode 100644 index 99f63436a610..000000000000 --- a/vendor/github.com/moby/buildkit/session/sshforward/ssh.proto +++ /dev/null @@ -1,22 +0,0 @@ -syntax = "proto3"; - -package moby.sshforward.v1; - -option go_package = "sshforward"; - -service SSH { - rpc CheckAgent(CheckAgentRequest) returns (CheckAgentResponse); - rpc ForwardAgent(stream BytesMessage) returns (stream BytesMessage); -} - -// BytesMessage contains a chunk of byte data -message BytesMessage{ - bytes data = 1; -} - -message CheckAgentRequest { - string ID = 1; -} - -message CheckAgentResponse { -} \ No newline at end of file diff --git a/vendor/github.com/moby/buildkit/session/sshforward/sshprovider/agentprovider.go b/vendor/github.com/moby/buildkit/session/sshforward/sshprovider/agentprovider.go deleted file mode 100644 index 009a91b7dd79..000000000000 --- a/vendor/github.com/moby/buildkit/session/sshforward/sshprovider/agentprovider.go +++ /dev/null @@ -1,198 +0,0 @@ -package sshprovider - -import ( - "context" - "io" - "io/ioutil" - "net" - "os" - "time" - - "github.com/moby/buildkit/session" - "github.com/moby/buildkit/session/sshforward" - "github.com/pkg/errors" - "golang.org/x/crypto/ssh" - "golang.org/x/crypto/ssh/agent" - "golang.org/x/sync/errgroup" - "google.golang.org/grpc" - "google.golang.org/grpc/metadata" -) - -// AgentConfig is the config for a single exposed SSH agent -type AgentConfig struct { - ID string - Paths []string -} - -// NewSSHAgentProvider creates a session provider that allows access to ssh agent -func NewSSHAgentProvider(confs []AgentConfig) (session.Attachable, error) { - m := map[string]source{} - for _, conf := range confs { - if len(conf.Paths) == 0 || len(conf.Paths) == 1 && conf.Paths[0] == "" { - conf.Paths = []string{os.Getenv("SSH_AUTH_SOCK")} - } - - if conf.Paths[0] == "" { - return nil, errors.Errorf("invalid empty ssh agent socket, make sure SSH_AUTH_SOCK is set") - } - - src, err := toAgentSource(conf.Paths) - if err != nil { - return nil, err - } - if conf.ID == "" { - conf.ID = sshforward.DefaultID - } - if _, ok := m[conf.ID]; ok { - return nil, errors.Errorf("invalid duplicate ID %s", conf.ID) - } - m[conf.ID] = src - } - - return &socketProvider{m: m}, nil -} - -type source struct { - agent agent.Agent - socket string -} - -type socketProvider struct { - m map[string]source -} - -func (sp *socketProvider) Register(server *grpc.Server) { - sshforward.RegisterSSHServer(server, sp) -} - -func (sp *socketProvider) CheckAgent(ctx context.Context, req *sshforward.CheckAgentRequest) (*sshforward.CheckAgentResponse, error) { - id := sshforward.DefaultID - if req.ID != "" { - id = req.ID - } - if _, ok := sp.m[id]; !ok { - return &sshforward.CheckAgentResponse{}, errors.Errorf("unset ssh forward key %s", id) - } - return &sshforward.CheckAgentResponse{}, nil -} - -func (sp *socketProvider) ForwardAgent(stream sshforward.SSH_ForwardAgentServer) error { - id := sshforward.DefaultID - - opts, _ := metadata.FromIncomingContext(stream.Context()) // if no metadata continue with empty object - - if v, ok := opts[sshforward.KeySSHID]; ok && len(v) > 0 && v[0] != "" { - id = v[0] - } - - src, ok := sp.m[id] - if !ok { - return errors.Errorf("unset ssh forward key %s", id) - } - - var a agent.Agent - - if src.socket != "" { - conn, err := net.DialTimeout("unix", src.socket, time.Second) - if err != nil { - return errors.Wrapf(err, "failed to connect to %s", src.socket) - } - - a = &readOnlyAgent{agent.NewClient(conn)} - defer conn.Close() - } else { - a = src.agent - } - - s1, s2 := sockPair() - - eg, ctx := errgroup.WithContext(context.TODO()) - - eg.Go(func() error { - return agent.ServeAgent(a, s1) - }) - - eg.Go(func() error { - defer s1.Close() - return sshforward.Copy(ctx, s2, stream) - }) - - return eg.Wait() -} - -func toAgentSource(paths []string) (source, error) { - var keys bool - var socket string - a := agent.NewKeyring() - for _, p := range paths { - if socket != "" { - return source{}, errors.New("only single socket allowed") - } - fi, err := os.Stat(p) - if err != nil { - return source{}, errors.WithStack(err) - } - if fi.Mode()&os.ModeSocket > 0 { - if keys { - return source{}, errors.Errorf("invalid combination of keys and sockets") - } - socket = p - continue - } - keys = true - f, err := os.Open(p) - if err != nil { - return source{}, errors.Wrapf(err, "failed to open %s", p) - } - dt, err := ioutil.ReadAll(&io.LimitedReader{R: f, N: 100 * 1024}) - if err != nil { - return source{}, errors.Wrapf(err, "failed to read %s", p) - } - - k, err := ssh.ParseRawPrivateKey(dt) - if err != nil { - return source{}, errors.Wrapf(err, "failed to parse %s", p) // TODO: prompt passphrase? - } - if err := a.Add(agent.AddedKey{PrivateKey: k}); err != nil { - return source{}, errors.Wrapf(err, "failed to add %s to agent", p) - } - } - - if socket != "" { - return source{socket: socket}, nil - } - - return source{agent: a}, nil -} - -func sockPair() (io.ReadWriteCloser, io.ReadWriteCloser) { - pr1, pw1 := io.Pipe() - pr2, pw2 := io.Pipe() - return &sock{pr1, pw2, pw1}, &sock{pr2, pw1, pw2} -} - -type sock struct { - io.Reader - io.Writer - io.Closer -} - -type readOnlyAgent struct { - agent.Agent -} - -func (a *readOnlyAgent) Add(_ agent.AddedKey) error { - return errors.Errorf("adding new keys not allowed by buildkit") -} - -func (a *readOnlyAgent) Remove(_ ssh.PublicKey) error { - return errors.Errorf("removing keys not allowed by buildkit") -} - -func (a *readOnlyAgent) RemoveAll() error { - return errors.Errorf("removing keys not allowed by buildkit") -} - -func (a *readOnlyAgent) Lock(_ []byte) error { - return errors.Errorf("locking agent not allowed by buildkit") -} diff --git a/vendor/github.com/moby/buildkit/session/testutil/testutil.go b/vendor/github.com/moby/buildkit/session/testutil/testutil.go deleted file mode 100644 index 168656dfd630..000000000000 --- a/vendor/github.com/moby/buildkit/session/testutil/testutil.go +++ /dev/null @@ -1,70 +0,0 @@ -package testutil - -import ( - "context" - "io" - "net" - "time" - - "github.com/sirupsen/logrus" -) - -// Handler is function called to handle incoming connection -type Handler func(ctx context.Context, conn net.Conn, meta map[string][]string) error - -// Dialer is a function for dialing an outgoing connection -type Dialer func(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) - -// TestStream creates an in memory session dialer for a handler function -func TestStream(handler Handler) Dialer { - s1, s2 := sockPair() - return func(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) { - go func() { - err := handler(context.TODO(), s1, meta) - if err != nil { - logrus.Error(err) - } - s1.Close() - }() - return s2, nil - } -} - -func sockPair() (*sock, *sock) { - pr1, pw1 := io.Pipe() - pr2, pw2 := io.Pipe() - return &sock{pw1, pr2, pw1}, &sock{pw2, pr1, pw2} -} - -type sock struct { - io.Writer - io.Reader - io.Closer -} - -func (s *sock) LocalAddr() net.Addr { - return dummyAddr{} -} -func (s *sock) RemoteAddr() net.Addr { - return dummyAddr{} -} -func (s *sock) SetDeadline(t time.Time) error { - return nil -} -func (s *sock) SetReadDeadline(t time.Time) error { - return nil -} -func (s *sock) SetWriteDeadline(t time.Time) error { - return nil -} - -type dummyAddr struct { -} - -func (d dummyAddr) Network() string { - return "tcp" -} - -func (d dummyAddr) String() string { - return "localhost" -} diff --git a/vendor/github.com/moby/buildkit/snapshot/blobmapping/snapshotter.go b/vendor/github.com/moby/buildkit/snapshot/blobmapping/snapshotter.go deleted file mode 100644 index e145235a290b..000000000000 --- a/vendor/github.com/moby/buildkit/snapshot/blobmapping/snapshotter.go +++ /dev/null @@ -1,140 +0,0 @@ -package blobmapping - -import ( - "context" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/snapshots" - "github.com/moby/buildkit/cache/metadata" - "github.com/moby/buildkit/snapshot" - digest "github.com/opencontainers/go-digest" - "github.com/sirupsen/logrus" - bolt "go.etcd.io/bbolt" -) - -const blobKey = "blobmapping.blob" - -type Opt struct { - Content content.Store - Snapshotter snapshot.SnapshotterBase - MetadataStore *metadata.Store -} - -type Info struct { - snapshots.Info - Blob string -} - -type DiffPair struct { - Blobsum digest.Digest - DiffID digest.Digest -} - -// this snapshotter keeps an internal mapping between a snapshot and a blob - -type Snapshotter struct { - snapshot.SnapshotterBase - opt Opt -} - -func NewSnapshotter(opt Opt) snapshot.Snapshotter { - s := &Snapshotter{ - SnapshotterBase: opt.Snapshotter, - opt: opt, - } - - return s -} - -// Remove also removes a reference to a blob. If it is a last reference then it deletes it the blob as well -// Remove is not safe to be called concurrently -func (s *Snapshotter) Remove(ctx context.Context, key string) error { - _, blob, err := s.GetBlob(ctx, key) - if err != nil { - return err - } - - blobs, err := s.opt.MetadataStore.Search(index(blob)) - if err != nil { - return err - } - - if err := s.SnapshotterBase.Remove(ctx, key); err != nil { - return err - } - - if len(blobs) == 1 && blobs[0].ID() == key { // last snapshot - if err := s.opt.Content.Delete(ctx, blob); err != nil { - logrus.Errorf("failed to delete blob %v: %+v", blob, err) - } - } - return nil -} - -func (s *Snapshotter) Usage(ctx context.Context, key string) (snapshots.Usage, error) { - u, err := s.SnapshotterBase.Usage(ctx, key) - if err != nil { - return snapshots.Usage{}, err - } - _, blob, err := s.GetBlob(ctx, key) - if err != nil { - return u, err - } - if blob != "" { - info, err := s.opt.Content.Info(ctx, blob) - if err != nil { - return u, err - } - (&u).Add(snapshots.Usage{Size: info.Size, Inodes: 1}) - } - return u, nil -} - -func (s *Snapshotter) GetBlob(ctx context.Context, key string) (digest.Digest, digest.Digest, error) { - md, _ := s.opt.MetadataStore.Get(key) - v := md.Get(blobKey) - if v == nil { - return "", "", nil - } - var blob DiffPair - if err := v.Unmarshal(&blob); err != nil { - return "", "", err - } - return blob.DiffID, blob.Blobsum, nil -} - -// Validates that there is no blob associated with the snapshot. -// Checks that there is a blob in the content store. -// If same blob has already been set then this is a noop. -func (s *Snapshotter) SetBlob(ctx context.Context, key string, diffID, blobsum digest.Digest) error { - info, err := s.opt.Content.Info(ctx, blobsum) - if err != nil { - return err - } - if _, ok := info.Labels["containerd.io/uncompressed"]; !ok { - labels := map[string]string{ - "containerd.io/uncompressed": diffID.String(), - } - if _, err := s.opt.Content.Update(ctx, content.Info{ - Digest: blobsum, - Labels: labels, - }, "labels.containerd.io/uncompressed"); err != nil { - return err - } - } - md, _ := s.opt.MetadataStore.Get(key) - - v, err := metadata.NewValue(DiffPair{DiffID: diffID, Blobsum: blobsum}) - if err != nil { - return err - } - v.Index = index(blobsum) - - return md.Update(func(b *bolt.Bucket) error { - return md.SetValue(b, blobKey, v) - }) -} - -func index(blob digest.Digest) string { - return "blobmap::" + blob.String() -} diff --git a/vendor/github.com/moby/buildkit/snapshot/containerd/content.go b/vendor/github.com/moby/buildkit/snapshot/containerd/content.go deleted file mode 100644 index c3bb86dba2e5..000000000000 --- a/vendor/github.com/moby/buildkit/snapshot/containerd/content.go +++ /dev/null @@ -1,124 +0,0 @@ -package containerd - -import ( - "context" - "time" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/namespaces" - "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -type garbageCollectFn func(context.Context) error - -func NewContentStore(store content.Store, ns string, gc func(context.Context) error) content.Store { - return &noGCContentStore{&nsContent{ns, store, gc}} -} - -type nsContent struct { - ns string - content.Store - gc garbageCollectFn -} - -func (c *nsContent) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) { - ctx = namespaces.WithNamespace(ctx, c.ns) - return c.Store.Info(ctx, dgst) -} - -func (c *nsContent) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) { - ctx = namespaces.WithNamespace(ctx, c.ns) - return c.Store.Update(ctx, info, fieldpaths...) -} - -func (c *nsContent) Walk(ctx context.Context, fn content.WalkFunc, filters ...string) error { - ctx = namespaces.WithNamespace(ctx, c.ns) - return c.Store.Walk(ctx, fn, filters...) -} - -func (c *nsContent) Delete(ctx context.Context, dgst digest.Digest) error { - ctx = namespaces.WithNamespace(ctx, c.ns) - if _, err := c.Update(ctx, content.Info{ - Digest: dgst, - }, "labels.containerd.io/gc.root"); err != nil { - return err - } // calling snapshotter.Remove here causes a race in containerd - if c.gc == nil { - return nil - } - return c.gc(ctx) -} - -func (c *nsContent) Status(ctx context.Context, ref string) (content.Status, error) { - ctx = namespaces.WithNamespace(ctx, c.ns) - return c.Store.Status(ctx, ref) -} - -func (c *nsContent) ListStatuses(ctx context.Context, filters ...string) ([]content.Status, error) { - ctx = namespaces.WithNamespace(ctx, c.ns) - return c.Store.ListStatuses(ctx, filters...) -} - -func (c *nsContent) Abort(ctx context.Context, ref string) error { - ctx = namespaces.WithNamespace(ctx, c.ns) - return c.Store.Abort(ctx, ref) -} - -func (c *nsContent) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { - ctx = namespaces.WithNamespace(ctx, c.ns) - return c.Store.ReaderAt(ctx, desc) -} - -func (c *nsContent) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { - return c.writer(ctx, 3, opts...) -} - -func (c *nsContent) writer(ctx context.Context, retries int, opts ...content.WriterOpt) (content.Writer, error) { - var wOpts content.WriterOpts - for _, opt := range opts { - if err := opt(&wOpts); err != nil { - return nil, err - } - } - ctx = namespaces.WithNamespace(ctx, c.ns) - w, err := c.Store.Writer(ctx, opts...) - if err != nil { - if errdefs.IsAlreadyExists(err) && wOpts.Desc.Digest != "" && retries > 0 { - _, err2 := c.Update(ctx, content.Info{ - Digest: wOpts.Desc.Digest, - Labels: map[string]string{ - "containerd.io/gc.root": time.Now().UTC().Format(time.RFC3339Nano), - }, - }, "labels.containerd.io/gc.root") - if err2 != nil { - return c.writer(ctx, retries-1, opts...) - } - } - } - return w, err -} - -type noGCContentStore struct { - content.Store -} -type noGCWriter struct { - content.Writer -} - -func (cs *noGCContentStore) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { - w, err := cs.Store.Writer(ctx, opts...) - return &noGCWriter{w}, err -} - -func (w *noGCWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { - opts = append(opts, func(info *content.Info) error { - if info.Labels == nil { - info.Labels = map[string]string{} - } - info.Labels["containerd.io/gc.root"] = time.Now().UTC().Format(time.RFC3339Nano) - return nil - }) - return w.Writer.Commit(ctx, size, expected, opts...) -} diff --git a/vendor/github.com/moby/buildkit/snapshot/containerd/snapshotter.go b/vendor/github.com/moby/buildkit/snapshot/containerd/snapshotter.go deleted file mode 100644 index c4b6d1282cbb..000000000000 --- a/vendor/github.com/moby/buildkit/snapshot/containerd/snapshotter.go +++ /dev/null @@ -1,99 +0,0 @@ -package containerd - -import ( - "context" - "time" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/mount" - "github.com/containerd/containerd/namespaces" - ctdsnapshot "github.com/containerd/containerd/snapshots" - "github.com/moby/buildkit/cache/metadata" - "github.com/moby/buildkit/snapshot" - "github.com/moby/buildkit/snapshot/blobmapping" -) - -func NewSnapshotter(snapshotter ctdsnapshot.Snapshotter, store content.Store, mdstore *metadata.Store, ns string, gc func(context.Context) error) snapshot.Snapshotter { - return blobmapping.NewSnapshotter(blobmapping.Opt{ - Content: store, - Snapshotter: snapshot.FromContainerdSnapshotter(&nsSnapshotter{ns, snapshotter, gc}), - MetadataStore: mdstore, - }) -} - -type nsSnapshotter struct { - ns string - ctdsnapshot.Snapshotter - gc garbageCollectFn -} - -func (s *nsSnapshotter) Stat(ctx context.Context, key string) (ctdsnapshot.Info, error) { - ctx = namespaces.WithNamespace(ctx, s.ns) - info, err := s.Snapshotter.Stat(ctx, key) - if err == nil { - if _, ok := info.Labels["labels.containerd.io/gc.root"]; !ok { - if err := addRootLabel()(&info); err != nil { - return info, err - } - return s.Update(ctx, info, "labels.containerd.io/gc.root") - } - } - return info, err -} - -func (s *nsSnapshotter) Update(ctx context.Context, info ctdsnapshot.Info, fieldpaths ...string) (ctdsnapshot.Info, error) { - ctx = namespaces.WithNamespace(ctx, s.ns) - return s.Snapshotter.Update(ctx, info, fieldpaths...) -} - -func (s *nsSnapshotter) Usage(ctx context.Context, key string) (ctdsnapshot.Usage, error) { - ctx = namespaces.WithNamespace(ctx, s.ns) - return s.Snapshotter.Usage(ctx, key) -} -func (s *nsSnapshotter) Mounts(ctx context.Context, key string) ([]mount.Mount, error) { - ctx = namespaces.WithNamespace(ctx, s.ns) - return s.Snapshotter.Mounts(ctx, key) -} -func (s *nsSnapshotter) Prepare(ctx context.Context, key, parent string, opts ...ctdsnapshot.Opt) ([]mount.Mount, error) { - ctx = namespaces.WithNamespace(ctx, s.ns) - return s.Snapshotter.Prepare(ctx, key, parent, addRootLabel(opts...)) -} -func (s *nsSnapshotter) View(ctx context.Context, key, parent string, opts ...ctdsnapshot.Opt) ([]mount.Mount, error) { - ctx = namespaces.WithNamespace(ctx, s.ns) - return s.Snapshotter.View(ctx, key, parent, addRootLabel(opts...)) -} -func (s *nsSnapshotter) Commit(ctx context.Context, name, key string, opts ...ctdsnapshot.Opt) error { - ctx = namespaces.WithNamespace(ctx, s.ns) - return s.Snapshotter.Commit(ctx, name, key, addRootLabel(opts...)) -} -func (s *nsSnapshotter) Remove(ctx context.Context, key string) error { - ctx = namespaces.WithNamespace(ctx, s.ns) - if _, err := s.Update(ctx, ctdsnapshot.Info{ - Name: key, - }, "labels.containerd.io/gc.root"); err != nil { - return err - } // calling snapshotter.Remove here causes a race in containerd - if s.gc == nil { - return nil - } - return s.gc(ctx) -} -func (s *nsSnapshotter) Walk(ctx context.Context, fn func(context.Context, ctdsnapshot.Info) error) error { - ctx = namespaces.WithNamespace(ctx, s.ns) - return s.Snapshotter.Walk(ctx, fn) -} - -func addRootLabel(opts ...ctdsnapshot.Opt) ctdsnapshot.Opt { - return func(info *ctdsnapshot.Info) error { - for _, opt := range opts { - if err := opt(info); err != nil { - return err - } - } - if info.Labels == nil { - info.Labels = map[string]string{} - } - info.Labels["containerd.io/gc.root"] = time.Now().UTC().Format(time.RFC3339Nano) - return nil - } -} diff --git a/vendor/github.com/moby/buildkit/snapshot/imagerefchecker/checker.go b/vendor/github.com/moby/buildkit/snapshot/imagerefchecker/checker.go deleted file mode 100644 index f31384c0915a..000000000000 --- a/vendor/github.com/moby/buildkit/snapshot/imagerefchecker/checker.go +++ /dev/null @@ -1,154 +0,0 @@ -package imagerefchecker - -import ( - "context" - "encoding/json" - "strings" - "sync" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/images" - "github.com/moby/buildkit/cache" - "github.com/moby/buildkit/snapshot" - digest "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -const ( - emptyGZLayer = digest.Digest("sha256:4f4fb700ef54461cfa02571ae0db9a0dc1e0cdb5577484a6d75e68dc38e8acc1") -) - -type Opt struct { - Snapshotter snapshot.Snapshotter - ImageStore images.Store - ContentStore content.Provider -} - -// New creates new image reference checker that can be used to see if a reference -// is being used by any of the images in the image store -func New(opt Opt) cache.ExternalRefCheckerFunc { - return func() (cache.ExternalRefChecker, error) { - return &checker{opt: opt}, nil - } -} - -type checker struct { - opt Opt - once sync.Once - images map[string]struct{} - cache map[string]bool -} - -func (c *checker) Exists(key string) bool { - if c.opt.ImageStore == nil { - return false - } - - c.once.Do(c.init) - - if b, ok := c.cache[key]; ok { - return b - } - - l, err := c.getLayers(key) - if err != nil { - c.cache[key] = false - return false - } - - _, ok := c.images[layerKey(l)] - c.cache[key] = ok - return ok -} - -func (c *checker) getLayers(key string) ([]specs.Descriptor, error) { - _, blob, err := c.opt.Snapshotter.GetBlob(context.TODO(), key) - if err != nil { - return nil, err - } - stat, err := c.opt.Snapshotter.Stat(context.TODO(), key) - if err != nil { - return nil, err - } - var layers []specs.Descriptor - if parent := stat.Parent; parent != "" { - layers, err = c.getLayers(parent) - if err != nil { - return nil, err - } - } - return append(layers, specs.Descriptor{Digest: blob}), nil -} - -func (c *checker) init() { - c.images = map[string]struct{}{} - c.cache = map[string]bool{} - - imgs, err := c.opt.ImageStore.List(context.TODO()) - if err != nil { - return - } - - var mu sync.Mutex - - for _, img := range imgs { - if err := images.Dispatch(context.TODO(), images.Handlers(layersHandler(c.opt.ContentStore, func(layers []specs.Descriptor) { - mu.Lock() - c.registerLayers(layers) - mu.Unlock() - })), img.Target); err != nil { - return - } - } -} - -func (c *checker) registerLayers(l []specs.Descriptor) { - if k := layerKey(l); k != "" { - c.images[k] = struct{}{} - } -} - -func layerKey(layers []specs.Descriptor) string { - b := &strings.Builder{} - for _, l := range layers { - if l.Digest != emptyGZLayer { - b.Write([]byte(l.Digest)) - } - } - return b.String() -} - -func layersHandler(provider content.Provider, f func([]specs.Descriptor)) images.HandlerFunc { - return func(ctx context.Context, desc specs.Descriptor) ([]specs.Descriptor, error) { - switch desc.MediaType { - case images.MediaTypeDockerSchema2Manifest, specs.MediaTypeImageManifest: - p, err := content.ReadBlob(ctx, provider, desc) - if err != nil { - return nil, nil - } - - var manifest specs.Manifest - if err := json.Unmarshal(p, &manifest); err != nil { - return nil, err - } - - f(manifest.Layers) - return nil, nil - case images.MediaTypeDockerSchema2ManifestList, specs.MediaTypeImageIndex: - p, err := content.ReadBlob(ctx, provider, desc) - if err != nil { - return nil, nil - } - - var index specs.Index - if err := json.Unmarshal(p, &index); err != nil { - return nil, err - } - - return index.Manifests, nil - default: - return nil, errors.Errorf("encountered unknown type %v", desc.MediaType) - } - } -} diff --git a/vendor/github.com/moby/buildkit/snapshot/localmounter.go b/vendor/github.com/moby/buildkit/snapshot/localmounter.go deleted file mode 100644 index 18e2411cfc90..000000000000 --- a/vendor/github.com/moby/buildkit/snapshot/localmounter.go +++ /dev/null @@ -1,72 +0,0 @@ -package snapshot - -import ( - "io/ioutil" - "os" - "sync" - - "github.com/containerd/containerd/mount" - "github.com/pkg/errors" -) - -type Mounter interface { - Mount() (string, error) - Unmount() error -} - -// LocalMounter is a helper for mounting mountfactory to temporary path. In -// addition it can mount binds without privileges -func LocalMounter(mountable Mountable) Mounter { - return &localMounter{mountable: mountable} -} - -// LocalMounterWithMounts is a helper for mounting to temporary path. In -// addition it can mount binds without privileges -func LocalMounterWithMounts(mounts []mount.Mount) Mounter { - return &localMounter{mounts: mounts} -} - -type localMounter struct { - mu sync.Mutex - mounts []mount.Mount - mountable Mountable - target string -} - -func (lm *localMounter) Mount() (string, error) { - lm.mu.Lock() - defer lm.mu.Unlock() - - if lm.mounts == nil { - mounts, err := lm.mountable.Mount() - if err != nil { - return "", err - } - lm.mounts = mounts - } - - if len(lm.mounts) == 1 && (lm.mounts[0].Type == "bind" || lm.mounts[0].Type == "rbind") { - ro := false - for _, opt := range lm.mounts[0].Options { - if opt == "ro" { - ro = true - break - } - } - if !ro { - return lm.mounts[0].Source, nil - } - } - - dir, err := ioutil.TempDir("", "buildkit-mount") - if err != nil { - return "", errors.Wrap(err, "failed to create temp dir") - } - - if err := mount.All(lm.mounts, dir); err != nil { - os.RemoveAll(dir) - return "", errors.Wrapf(err, "failed to mount %s: %+v", dir, lm.mounts) - } - lm.target = dir - return dir, nil -} diff --git a/vendor/github.com/moby/buildkit/snapshot/localmounter_unix.go b/vendor/github.com/moby/buildkit/snapshot/localmounter_unix.go deleted file mode 100644 index c44e435e993d..000000000000 --- a/vendor/github.com/moby/buildkit/snapshot/localmounter_unix.go +++ /dev/null @@ -1,29 +0,0 @@ -// +build !windows - -package snapshot - -import ( - "os" - "syscall" - - "github.com/containerd/containerd/mount" -) - -func (lm *localMounter) Unmount() error { - lm.mu.Lock() - defer lm.mu.Unlock() - - if lm.target != "" { - if err := mount.Unmount(lm.target, syscall.MNT_DETACH); err != nil { - return err - } - os.RemoveAll(lm.target) - lm.target = "" - } - - if lm.mountable != nil { - return lm.mountable.Release() - } - - return nil -} diff --git a/vendor/github.com/moby/buildkit/snapshot/localmounter_windows.go b/vendor/github.com/moby/buildkit/snapshot/localmounter_windows.go deleted file mode 100644 index 4e1287b0d801..000000000000 --- a/vendor/github.com/moby/buildkit/snapshot/localmounter_windows.go +++ /dev/null @@ -1,26 +0,0 @@ -package snapshot - -import ( - "os" - - "github.com/containerd/containerd/mount" -) - -func (lm *localMounter) Unmount() error { - lm.mu.Lock() - defer lm.mu.Unlock() - - if lm.target != "" { - if err := mount.Unmount(lm.target, 0); err != nil { - return err - } - os.RemoveAll(lm.target) - lm.target = "" - } - - if lm.mountable != nil { - return lm.mountable.Release() - } - - return nil -} diff --git a/vendor/github.com/moby/buildkit/snapshot/snapshotter.go b/vendor/github.com/moby/buildkit/snapshot/snapshotter.go deleted file mode 100644 index ad7fcaf2dcf4..000000000000 --- a/vendor/github.com/moby/buildkit/snapshot/snapshotter.go +++ /dev/null @@ -1,137 +0,0 @@ -package snapshot - -import ( - "context" - "sync" - - "github.com/containerd/containerd/mount" - "github.com/containerd/containerd/snapshots" - digest "github.com/opencontainers/go-digest" -) - -type Mountable interface { - // ID() string - Mount() ([]mount.Mount, error) - Release() error -} - -type SnapshotterBase interface { - Mounts(ctx context.Context, key string) (Mountable, error) - Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) error - View(ctx context.Context, key, parent string, opts ...snapshots.Opt) (Mountable, error) - - Stat(ctx context.Context, key string) (snapshots.Info, error) - Update(ctx context.Context, info snapshots.Info, fieldpaths ...string) (snapshots.Info, error) - Usage(ctx context.Context, key string) (snapshots.Usage, error) - Commit(ctx context.Context, name, key string, opts ...snapshots.Opt) error - Remove(ctx context.Context, key string) error - Walk(ctx context.Context, fn func(context.Context, snapshots.Info) error) error - Close() error -} - -// Snapshotter defines interface that any snapshot implementation should satisfy -type Snapshotter interface { - Blobmapper - SnapshotterBase -} - -type Blobmapper interface { - GetBlob(ctx context.Context, key string) (digest.Digest, digest.Digest, error) - SetBlob(ctx context.Context, key string, diffID, blob digest.Digest) error -} - -func FromContainerdSnapshotter(s snapshots.Snapshotter) SnapshotterBase { - return &fromContainerd{Snapshotter: s} -} - -type fromContainerd struct { - snapshots.Snapshotter -} - -func (s *fromContainerd) Mounts(ctx context.Context, key string) (Mountable, error) { - mounts, err := s.Snapshotter.Mounts(ctx, key) - if err != nil { - return nil, err - } - return &staticMountable{mounts}, nil -} -func (s *fromContainerd) Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) error { - _, err := s.Snapshotter.Prepare(ctx, key, parent, opts...) - return err -} -func (s *fromContainerd) View(ctx context.Context, key, parent string, opts ...snapshots.Opt) (Mountable, error) { - mounts, err := s.Snapshotter.View(ctx, key, parent, opts...) - if err != nil { - return nil, err - } - return &staticMountable{mounts}, nil -} - -type staticMountable struct { - mounts []mount.Mount -} - -func (m *staticMountable) Mount() ([]mount.Mount, error) { - return m.mounts, nil -} - -func (cm *staticMountable) Release() error { - return nil -} - -// NewContainerdSnapshotter converts snapshotter to containerd snapshotter -func NewContainerdSnapshotter(s Snapshotter) (snapshots.Snapshotter, func() error) { - cs := &containerdSnapshotter{Snapshotter: s} - return cs, cs.release -} - -type containerdSnapshotter struct { - mu sync.Mutex - releasers []func() error - Snapshotter -} - -func (cs *containerdSnapshotter) release() error { - cs.mu.Lock() - defer cs.mu.Unlock() - var err error - for _, f := range cs.releasers { - if err1 := f(); err != nil && err == nil { - err = err1 - } - } - return err -} - -func (cs *containerdSnapshotter) returnMounts(mf Mountable) ([]mount.Mount, error) { - mounts, err := mf.Mount() - if err != nil { - return nil, err - } - cs.mu.Lock() - cs.releasers = append(cs.releasers, mf.Release) - cs.mu.Unlock() - return mounts, nil -} - -func (cs *containerdSnapshotter) Mounts(ctx context.Context, key string) ([]mount.Mount, error) { - mf, err := cs.Snapshotter.Mounts(ctx, key) - if err != nil { - return nil, err - } - return cs.returnMounts(mf) -} - -func (cs *containerdSnapshotter) Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) { - if err := cs.Snapshotter.Prepare(ctx, key, parent, opts...); err != nil { - return nil, err - } - return cs.Mounts(ctx, key) -} -func (cs *containerdSnapshotter) View(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) { - mf, err := cs.Snapshotter.View(ctx, key, parent, opts...) - if err != nil { - return nil, err - } - return cs.returnMounts(mf) -} diff --git a/vendor/github.com/moby/buildkit/solver/bboltcachestorage/storage.go b/vendor/github.com/moby/buildkit/solver/bboltcachestorage/storage.go deleted file mode 100644 index 19755816cd5a..000000000000 --- a/vendor/github.com/moby/buildkit/solver/bboltcachestorage/storage.go +++ /dev/null @@ -1,459 +0,0 @@ -package bboltcachestorage - -import ( - "bytes" - "encoding/json" - "fmt" - - "github.com/moby/buildkit/solver" - digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - bolt "go.etcd.io/bbolt" -) - -const ( - resultBucket = "_result" - linksBucket = "_links" - byResultBucket = "_byresult" - backlinksBucket = "_backlinks" -) - -type Store struct { - db *bolt.DB -} - -func NewStore(dbPath string) (*Store, error) { - db, err := bolt.Open(dbPath, 0600, nil) - if err != nil { - return nil, errors.Wrapf(err, "failed to open database file %s", dbPath) - } - if err := db.Update(func(tx *bolt.Tx) error { - for _, b := range []string{resultBucket, linksBucket, byResultBucket, backlinksBucket} { - if _, err := tx.CreateBucketIfNotExists([]byte(b)); err != nil { - return err - } - } - return nil - }); err != nil { - return nil, err - } - db.NoSync = true - return &Store{db: db}, nil -} - -func (s *Store) Exists(id string) bool { - exists := false - err := s.db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte(linksBucket)).Bucket([]byte(id)) - exists = b != nil - return nil - }) - if err != nil { - return false - } - return exists -} - -func (s *Store) Walk(fn func(id string) error) error { - ids := make([]string, 0) - if err := s.db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte(linksBucket)) - c := b.Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - if v == nil { - ids = append(ids, string(k)) - } - } - return nil - }); err != nil { - return err - } - for _, id := range ids { - if err := fn(id); err != nil { - return err - } - } - return nil -} - -func (s *Store) WalkResults(id string, fn func(solver.CacheResult) error) error { - var list []solver.CacheResult - if err := s.db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte(resultBucket)) - if b == nil { - return nil - } - b = b.Bucket([]byte(id)) - if b == nil { - return nil - } - - return b.ForEach(func(k, v []byte) error { - var res solver.CacheResult - if err := json.Unmarshal(v, &res); err != nil { - return err - } - list = append(list, res) - return nil - }) - }); err != nil { - return err - } - for _, res := range list { - if err := fn(res); err != nil { - return err - } - } - return nil -} - -func (s *Store) Load(id string, resultID string) (solver.CacheResult, error) { - var res solver.CacheResult - if err := s.db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte(resultBucket)) - if b == nil { - return errors.WithStack(solver.ErrNotFound) - } - b = b.Bucket([]byte(id)) - if b == nil { - return errors.WithStack(solver.ErrNotFound) - } - - v := b.Get([]byte(resultID)) - if v == nil { - return errors.WithStack(solver.ErrNotFound) - } - - return json.Unmarshal(v, &res) - }); err != nil { - return solver.CacheResult{}, err - } - return res, nil -} - -func (s *Store) AddResult(id string, res solver.CacheResult) error { - return s.db.Update(func(tx *bolt.Tx) error { - _, err := tx.Bucket([]byte(linksBucket)).CreateBucketIfNotExists([]byte(id)) - if err != nil { - return err - } - - b, err := tx.Bucket([]byte(resultBucket)).CreateBucketIfNotExists([]byte(id)) - if err != nil { - return err - } - dt, err := json.Marshal(res) - if err != nil { - return err - } - if err := b.Put([]byte(res.ID), dt); err != nil { - return err - } - b, err = tx.Bucket([]byte(byResultBucket)).CreateBucketIfNotExists([]byte(res.ID)) - if err != nil { - return err - } - if err := b.Put([]byte(id), []byte{}); err != nil { - return err - } - - return nil - }) -} - -func (s *Store) WalkIDsByResult(resultID string, fn func(string) error) error { - ids := map[string]struct{}{} - if err := s.db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte(byResultBucket)) - if b == nil { - return nil - } - b = b.Bucket([]byte(resultID)) - if b == nil { - return nil - } - return b.ForEach(func(k, v []byte) error { - ids[string(k)] = struct{}{} - return nil - }) - }); err != nil { - return err - } - for id := range ids { - if err := fn(id); err != nil { - return err - } - } - return nil -} - -func (s *Store) Release(resultID string) error { - return s.db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte(byResultBucket)) - if b == nil { - return errors.WithStack(solver.ErrNotFound) - } - b = b.Bucket([]byte(resultID)) - if b == nil { - return errors.WithStack(solver.ErrNotFound) - } - if err := b.ForEach(func(k, v []byte) error { - return s.releaseHelper(tx, string(k), resultID) - }); err != nil { - return err - } - return nil - }) -} - -func (s *Store) releaseHelper(tx *bolt.Tx, id, resultID string) error { - results := tx.Bucket([]byte(resultBucket)).Bucket([]byte(id)) - if results == nil { - return nil - } - - if err := results.Delete([]byte(resultID)); err != nil { - return err - } - - ids := tx.Bucket([]byte(byResultBucket)) - - ids = ids.Bucket([]byte(resultID)) - if ids == nil { - return nil - } - - if err := ids.Delete([]byte(id)); err != nil { - return err - } - - if isEmptyBucket(ids) { - if err := tx.Bucket([]byte(byResultBucket)).DeleteBucket([]byte(resultID)); err != nil { - return err - } - } - - links := tx.Bucket([]byte(resultBucket)) - if results == nil { - return nil - } - links = links.Bucket([]byte(id)) - - return s.emptyBranchWithParents(tx, []byte(id)) -} - -func (s *Store) emptyBranchWithParents(tx *bolt.Tx, id []byte) error { - results := tx.Bucket([]byte(resultBucket)).Bucket(id) - if results == nil { - return nil - } - - isEmptyLinks := true - links := tx.Bucket([]byte(linksBucket)).Bucket(id) - if links != nil { - isEmptyLinks = isEmptyBucket(links) - } - - if !isEmptyBucket(results) || !isEmptyLinks { - return nil - } - - if backlinks := tx.Bucket([]byte(backlinksBucket)).Bucket(id); backlinks != nil { - if err := backlinks.ForEach(func(k, v []byte) error { - if subLinks := tx.Bucket([]byte(linksBucket)).Bucket(k); subLinks != nil { - if err := subLinks.ForEach(func(k, v []byte) error { - parts := bytes.Split(k, []byte("@")) - if len(parts) != 2 { - return errors.Errorf("invalid key %s", k) - } - if bytes.Equal(id, parts[1]) { - return subLinks.Delete(k) - } - return nil - }); err != nil { - return err - } - - if isEmptyBucket(subLinks) { - if err := tx.Bucket([]byte(linksBucket)).DeleteBucket(k); err != nil { - return err - } - } - } - return s.emptyBranchWithParents(tx, k) - }); err != nil { - return err - } - if err := tx.Bucket([]byte(backlinksBucket)).DeleteBucket(id); err != nil { - return err - } - } - - // intentionally ignoring errors - tx.Bucket([]byte(linksBucket)).DeleteBucket([]byte(id)) - tx.Bucket([]byte(resultBucket)).DeleteBucket([]byte(id)) - - return nil -} - -func (s *Store) AddLink(id string, link solver.CacheInfoLink, target string) error { - return s.db.Update(func(tx *bolt.Tx) error { - b, err := tx.Bucket([]byte(linksBucket)).CreateBucketIfNotExists([]byte(id)) - if err != nil { - return err - } - - dt, err := json.Marshal(link) - if err != nil { - return err - } - - if err := b.Put(bytes.Join([][]byte{dt, []byte(target)}, []byte("@")), []byte{}); err != nil { - return err - } - - b, err = tx.Bucket([]byte(backlinksBucket)).CreateBucketIfNotExists([]byte(target)) - if err != nil { - return err - } - - if err := b.Put([]byte(id), []byte{}); err != nil { - return err - } - - return nil - }) -} - -func (s *Store) WalkLinks(id string, link solver.CacheInfoLink, fn func(id string) error) error { - var links []string - if err := s.db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte(linksBucket)) - if b == nil { - return nil - } - b = b.Bucket([]byte(id)) - if b == nil { - return nil - } - - dt, err := json.Marshal(link) - if err != nil { - return err - } - index := bytes.Join([][]byte{dt, {}}, []byte("@")) - c := b.Cursor() - k, _ := c.Seek([]byte(index)) - for { - if k != nil && bytes.HasPrefix(k, index) { - target := bytes.TrimPrefix(k, index) - links = append(links, string(target)) - k, _ = c.Next() - } else { - break - } - } - - return nil - }); err != nil { - return err - } - for _, l := range links { - if err := fn(l); err != nil { - return err - } - } - return nil -} - -func (s *Store) HasLink(id string, link solver.CacheInfoLink, target string) bool { - var v bool - if err := s.db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte(linksBucket)) - if b == nil { - return nil - } - b = b.Bucket([]byte(id)) - if b == nil { - return nil - } - - dt, err := json.Marshal(link) - if err != nil { - return err - } - v = b.Get(bytes.Join([][]byte{dt, []byte(target)}, []byte("@"))) != nil - return nil - }); err != nil { - return false - } - return v -} - -func (s *Store) WalkBacklinks(id string, fn func(id string, link solver.CacheInfoLink) error) error { - var outIDs []string - var outLinks []solver.CacheInfoLink - - if err := s.db.View(func(tx *bolt.Tx) error { - links := tx.Bucket([]byte(linksBucket)) - if links == nil { - return nil - } - backLinks := tx.Bucket([]byte(backlinksBucket)) - if backLinks == nil { - return nil - } - b := backLinks.Bucket([]byte(id)) - if b == nil { - return nil - } - - if err := b.ForEach(func(bid, v []byte) error { - b = links.Bucket(bid) - if b == nil { - return nil - } - if err := b.ForEach(func(k, v []byte) error { - parts := bytes.Split(k, []byte("@")) - if len(parts) == 2 { - if string(parts[1]) != id { - return nil - } - var l solver.CacheInfoLink - if err := json.Unmarshal(parts[0], &l); err != nil { - return err - } - l.Digest = digest.FromBytes([]byte(fmt.Sprintf("%s@%d", l.Digest, l.Output))) - l.Output = 0 - outIDs = append(outIDs, string(bid)) - outLinks = append(outLinks, l) - } - return nil - }); err != nil { - return err - } - return nil - }); err != nil { - return err - } - - return nil - }); err != nil { - return err - } - - for i := range outIDs { - if err := fn(outIDs[i], outLinks[i]); err != nil { - return err - } - } - return nil -} - -func isEmptyBucket(b *bolt.Bucket) bool { - if b == nil { - return true - } - k, _ := b.Cursor().First() - return k == nil -} diff --git a/vendor/github.com/moby/buildkit/solver/bboltcachestorage/storage_test.go b/vendor/github.com/moby/buildkit/solver/bboltcachestorage/storage_test.go deleted file mode 100644 index 5e72c889b47d..000000000000 --- a/vendor/github.com/moby/buildkit/solver/bboltcachestorage/storage_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package bboltcachestorage - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" - - "github.com/moby/buildkit/solver" - "github.com/moby/buildkit/solver/testutil" - "github.com/stretchr/testify/require" -) - -func TestBoltCacheStorage(t *testing.T) { - testutil.RunCacheStorageTests(t, func() (solver.CacheKeyStorage, func()) { - tmpDir, err := ioutil.TempDir("", "storage") - require.NoError(t, err) - - cleanup := func() { - os.RemoveAll(tmpDir) - } - - st, err := NewStore(filepath.Join(tmpDir, "cache.db")) - if err != nil { - cleanup() - } - require.NoError(t, err) - - return st, cleanup - }) -} diff --git a/vendor/github.com/moby/buildkit/solver/cache_test.go b/vendor/github.com/moby/buildkit/solver/cache_test.go deleted file mode 100644 index 5727d2dd2346..000000000000 --- a/vendor/github.com/moby/buildkit/solver/cache_test.go +++ /dev/null @@ -1,365 +0,0 @@ -package solver - -import ( - "context" - "testing" - - "github.com/moby/buildkit/identity" - digest "github.com/opencontainers/go-digest" - "github.com/stretchr/testify/require" -) - -func depKeys(cks ...ExportableCacheKey) []CacheKeyWithSelector { - var keys []CacheKeyWithSelector - for _, ck := range cks { - keys = append(keys, CacheKeyWithSelector{CacheKey: ck}) - } - return keys -} - -func testCacheKey(dgst digest.Digest, output Index, deps ...ExportableCacheKey) *CacheKey { - k := NewCacheKey(dgst, output) - k.deps = make([][]CacheKeyWithSelector, len(deps)) - for i, dep := range deps { - k.deps[i] = depKeys(dep) - } - return k -} - -func testCacheKeyWithDeps(dgst digest.Digest, output Index, deps [][]CacheKeyWithSelector) *CacheKey { - k := NewCacheKey(dgst, output) - k.deps = deps - return k -} - -func expKey(k *CacheKey) ExportableCacheKey { - return ExportableCacheKey{CacheKey: k, Exporter: &exporter{k: k}} -} - -func TestInMemoryCache(t *testing.T) { - ctx := context.TODO() - - m := NewInMemoryCacheManager() - - cacheFoo, err := m.Save(NewCacheKey(dgst("foo"), 0), testResult("result0")) - require.NoError(t, err) - - keys, err := m.Query(nil, 0, dgst("foo"), 0) - require.NoError(t, err) - require.Equal(t, len(keys), 1) - - matches, err := m.Records(keys[0]) - require.NoError(t, err) - require.Equal(t, len(matches), 1) - - res, err := m.Load(ctx, matches[0]) - require.NoError(t, err) - require.Equal(t, "result0", unwrap(res)) - - // another record - cacheBar, err := m.Save(NewCacheKey(dgst("bar"), 0), testResult("result1")) - require.NoError(t, err) - - keys, err = m.Query(nil, 0, dgst("bar"), 0) - require.NoError(t, err) - require.Equal(t, len(keys), 1) - - matches, err = m.Records(keys[0]) - require.NoError(t, err) - require.Equal(t, len(matches), 1) - - res, err = m.Load(ctx, matches[0]) - require.NoError(t, err) - require.Equal(t, "result1", unwrap(res)) - - // invalid request - keys, err = m.Query(nil, 0, dgst("baz"), 0) - require.NoError(t, err) - require.Equal(t, len(keys), 0) - - // second level - k := testCacheKey(dgst("baz"), Index(1), *cacheFoo, *cacheBar) - cacheBaz, err := m.Save(k, testResult("result2")) - require.NoError(t, err) - - keys, err = m.Query(nil, 0, dgst("baz"), 0) - require.NoError(t, err) - require.Equal(t, len(keys), 0) - - keys, err = m.Query(depKeys(*cacheFoo), 0, dgst("baz"), 0) - require.NoError(t, err) - require.Equal(t, len(keys), 0) - - keys, err = m.Query(depKeys(*cacheFoo), 1, dgst("baz"), Index(1)) - require.NoError(t, err) - require.Equal(t, len(keys), 0) - - keys, err = m.Query(depKeys(*cacheFoo), 0, dgst("baz"), Index(1)) - require.NoError(t, err) - require.Equal(t, len(keys), 1) - - matches, err = m.Records(keys[0]) - require.NoError(t, err) - require.Equal(t, len(matches), 1) - - res, err = m.Load(ctx, matches[0]) - require.NoError(t, err) - require.Equal(t, "result2", unwrap(res)) - - keys2, err := m.Query(depKeys(*cacheBar), 1, dgst("baz"), Index(1)) - require.NoError(t, err) - require.Equal(t, len(keys2), 1) - - require.Equal(t, keys[0].ID, keys2[0].ID) - - k = testCacheKey(dgst("baz"), Index(1), *cacheFoo) - _, err = m.Save(k, testResult("result3")) - require.NoError(t, err) - - keys, err = m.Query(depKeys(*cacheFoo), 0, dgst("baz"), Index(1)) - require.NoError(t, err) - require.Equal(t, len(keys), 1) - - matches, err = m.Records(keys[0]) - require.NoError(t, err) - require.Equal(t, len(matches), 2) - - k = testCacheKeyWithDeps(dgst("bax"), 0, [][]CacheKeyWithSelector{ - {{CacheKey: *cacheFoo}, {CacheKey: *cacheBaz}}, - {{CacheKey: *cacheBar}}, - }) - _, err = m.Save(k, testResult("result4")) - require.NoError(t, err) - - // foo, bar, baz should all point to result4 - keys, err = m.Query(depKeys(*cacheFoo), 0, dgst("bax"), 0) - require.NoError(t, err) - require.Equal(t, len(keys), 1) - - id := keys[0].ID - - keys, err = m.Query(depKeys(*cacheBar), 1, dgst("bax"), 0) - require.NoError(t, err) - require.Equal(t, len(keys), 1) - require.Equal(t, keys[0].ID, id) - - keys, err = m.Query(depKeys(*cacheBaz), 0, dgst("bax"), 0) - require.NoError(t, err) - require.Equal(t, len(keys), 1) - require.Equal(t, keys[0].ID, id) -} - -func TestInMemoryCacheSelector(t *testing.T) { - ctx := context.TODO() - - m := NewInMemoryCacheManager() - - cacheFoo, err := m.Save(NewCacheKey(dgst("foo"), 0), testResult("result0")) - require.NoError(t, err) - - _, err = m.Save(testCacheKeyWithDeps(dgst("bar"), 0, [][]CacheKeyWithSelector{ - {{CacheKey: *cacheFoo, Selector: dgst("sel0")}}, - }), testResult("result1")) - require.NoError(t, err) - - keys, err := m.Query(depKeys(*cacheFoo), 0, dgst("bar"), 0) - require.NoError(t, err) - require.Equal(t, len(keys), 0) - - keys, err = m.Query([]CacheKeyWithSelector{{Selector: "sel-invalid", CacheKey: *cacheFoo}}, 0, dgst("bar"), 0) - require.NoError(t, err) - require.Equal(t, len(keys), 0) - - keys, err = m.Query([]CacheKeyWithSelector{{Selector: dgst("sel0"), CacheKey: *cacheFoo}}, 0, dgst("bar"), 0) - require.NoError(t, err) - require.Equal(t, len(keys), 1) - - matches, err := m.Records(keys[0]) - require.NoError(t, err) - require.Equal(t, len(matches), 1) - - res, err := m.Load(ctx, matches[0]) - require.NoError(t, err) - require.Equal(t, "result1", unwrap(res)) -} - -func TestInMemoryCacheSelectorNested(t *testing.T) { - ctx := context.TODO() - - m := NewInMemoryCacheManager() - - cacheFoo, err := m.Save(NewCacheKey(dgst("foo"), 0), testResult("result0")) - require.NoError(t, err) - - _, err = m.Save(testCacheKeyWithDeps(dgst("bar"), 0, [][]CacheKeyWithSelector{ - {{CacheKey: *cacheFoo, Selector: dgst("sel0")}, {CacheKey: expKey(NewCacheKey(dgst("second"), 0))}}, - }), testResult("result1")) - require.NoError(t, err) - - keys, err := m.Query( - []CacheKeyWithSelector{{Selector: dgst("sel0"), CacheKey: *cacheFoo}}, - 0, dgst("bar"), 0) - require.NoError(t, err) - require.Equal(t, len(keys), 1) - - matches, err := m.Records(keys[0]) - require.NoError(t, err) - require.Equal(t, len(matches), 1) - - res, err := m.Load(ctx, matches[0]) - require.NoError(t, err) - require.Equal(t, "result1", unwrap(res)) - - keys, err = m.Query(depKeys(*cacheFoo), 0, dgst("bar"), 0) - require.NoError(t, err) - require.Equal(t, len(keys), 0) - - keys, err = m.Query([]CacheKeyWithSelector{{Selector: dgst("bar"), CacheKey: *cacheFoo}}, 0, dgst("bar"), 0) - require.NoError(t, err) - require.Equal(t, len(keys), 0) - - keys, err = m.Query(depKeys(expKey(NewCacheKey(dgst("second"), 0))), 0, dgst("bar"), 0) - require.NoError(t, err) - require.Equal(t, len(keys), 1) - - matches, err = m.Records(keys[0]) - require.NoError(t, err) - require.Equal(t, len(matches), 1) - - res, err = m.Load(ctx, matches[0]) - require.NoError(t, err) - require.Equal(t, "result1", unwrap(res)) - - keys, err = m.Query(depKeys(expKey(NewCacheKey(dgst("second"), 0))), 0, dgst("bar"), 0) - require.NoError(t, err) - require.Equal(t, len(keys), 1) -} - -func TestInMemoryCacheReleaseParent(t *testing.T) { - storage := NewInMemoryCacheStorage() - results := NewInMemoryResultStorage() - m := NewCacheManager(identity.NewID(), storage, results) - - res0 := testResult("result0") - cacheFoo, err := m.Save(NewCacheKey(dgst("foo"), 0), res0) - require.NoError(t, err) - - res1 := testResult("result1") - _, err = m.Save(testCacheKey(dgst("bar"), 0, *cacheFoo), res1) - require.NoError(t, err) - - keys, err := m.Query(nil, 0, dgst("foo"), 0) - require.NoError(t, err) - require.Equal(t, len(keys), 1) - - matches, err := m.Records(keys[0]) - require.NoError(t, err) - require.Equal(t, len(matches), 1) - - err = storage.Release(res0.ID()) - require.NoError(t, err) - - // foo becomes unloadable - keys, err = m.Query(nil, 0, dgst("foo"), 0) - require.NoError(t, err) - require.Equal(t, len(keys), 1) - - matches, err = m.Records(keys[0]) - require.NoError(t, err) - require.Equal(t, len(matches), 0) - - keys, err = m.Query(depKeys(expKey(keys[0])), 0, dgst("bar"), 0) - require.NoError(t, err) - require.Equal(t, len(keys), 1) - - matches, err = m.Records(keys[0]) - require.NoError(t, err) - require.Equal(t, len(matches), 1) - - // releasing bar releases both foo and bar - err = storage.Release(res1.ID()) - require.NoError(t, err) - - keys, err = m.Query(nil, 0, dgst("foo"), 0) - require.NoError(t, err) - require.Equal(t, len(keys), 0) -} - -// TestInMemoryCacheRestoreOfflineDeletion deletes a result while the -// cachemanager is not running and checks that it syncs up on restore -func TestInMemoryCacheRestoreOfflineDeletion(t *testing.T) { - storage := NewInMemoryCacheStorage() - results := NewInMemoryResultStorage() - m := NewCacheManager(identity.NewID(), storage, results) - - res0 := testResult("result0") - cacheFoo, err := m.Save(NewCacheKey(dgst("foo"), 0), res0) - require.NoError(t, err) - - res1 := testResult("result1") - _, err = m.Save(testCacheKey(dgst("bar"), 0, *cacheFoo), res1) - require.NoError(t, err) - - results2 := NewInMemoryResultStorage() - _, err = results2.Save(res1) // only add bar - require.NoError(t, err) - - m = NewCacheManager(identity.NewID(), storage, results2) - - keys, err := m.Query(nil, 0, dgst("foo"), 0) - require.NoError(t, err) - require.Equal(t, len(keys), 1) - - matches, err := m.Records(keys[0]) - require.NoError(t, err) - require.Equal(t, len(matches), 0) - - keys, err = m.Query(depKeys(expKey(keys[0])), 0, dgst("bar"), 0) - require.NoError(t, err) - require.Equal(t, len(keys), 1) - - matches, err = m.Records(keys[0]) - require.NoError(t, err) - require.Equal(t, len(matches), 1) -} - -func TestCarryOverFromSublink(t *testing.T) { - storage := NewInMemoryCacheStorage() - results := NewInMemoryResultStorage() - m := NewCacheManager(identity.NewID(), storage, results) - - cacheFoo, err := m.Save(NewCacheKey(dgst("foo"), 0), testResult("resultFoo")) - require.NoError(t, err) - - _, err = m.Save(testCacheKeyWithDeps(dgst("res"), 0, [][]CacheKeyWithSelector{ - {{CacheKey: *cacheFoo, Selector: dgst("sel0")}, {CacheKey: expKey(NewCacheKey(dgst("content0"), 0))}}, - }), testResult("result0")) - require.NoError(t, err) - - cacheBar, err := m.Save(NewCacheKey(dgst("bar"), 0), testResult("resultBar")) - require.NoError(t, err) - - keys, err := m.Query([]CacheKeyWithSelector{ - {CacheKey: *cacheBar, Selector: dgst("sel0")}, - {CacheKey: expKey(NewCacheKey(dgst("content0"), 0))}, - }, 0, dgst("res"), 0) - require.NoError(t, err) - require.Equal(t, len(keys), 1) - - keys, err = m.Query([]CacheKeyWithSelector{ - {Selector: dgst("sel0"), CacheKey: *cacheBar}, - }, 0, dgst("res"), 0) - require.NoError(t, err) - require.Equal(t, len(keys), 1) -} - -func dgst(s string) digest.Digest { - return digest.FromBytes([]byte(s)) -} - -func testResult(v string) Result { - return &dummyResult{ - id: identity.NewID(), - value: v, - } -} diff --git a/vendor/github.com/moby/buildkit/solver/cachekey.go b/vendor/github.com/moby/buildkit/solver/cachekey.go deleted file mode 100644 index 3749af0ab3ac..000000000000 --- a/vendor/github.com/moby/buildkit/solver/cachekey.go +++ /dev/null @@ -1,66 +0,0 @@ -package solver - -import ( - "sync" - - digest "github.com/opencontainers/go-digest" -) - -// NewCacheKey creates a new cache key for a specific output index -func NewCacheKey(dgst digest.Digest, output Index) *CacheKey { - return &CacheKey{ - ID: rootKey(dgst, output).String(), - digest: dgst, - output: output, - ids: map[*cacheManager]string{}, - } -} - -// CacheKeyWithSelector combines a cache key with an optional selector digest. -// Used to limit the matches for dependency cache key. -type CacheKeyWithSelector struct { - Selector digest.Digest - CacheKey ExportableCacheKey -} - -type CacheKey struct { - mu sync.RWMutex - - ID string - deps [][]CacheKeyWithSelector // only [][]*inMemoryCacheKey - digest digest.Digest - output Index - ids map[*cacheManager]string - - indexIDs []string -} - -func (ck *CacheKey) Deps() [][]CacheKeyWithSelector { - ck.mu.RLock() - defer ck.mu.RUnlock() - deps := make([][]CacheKeyWithSelector, len(ck.deps)) - for i := range ck.deps { - deps[i] = append([]CacheKeyWithSelector(nil), ck.deps[i]...) - } - return deps -} - -func (ck *CacheKey) Digest() digest.Digest { - return ck.digest -} -func (ck *CacheKey) Output() Index { - return ck.output -} - -func (ck *CacheKey) clone() *CacheKey { - nk := &CacheKey{ - ID: ck.ID, - digest: ck.digest, - output: ck.output, - ids: map[*cacheManager]string{}, - } - for cm, id := range ck.ids { - nk.ids[cm] = id - } - return nk -} diff --git a/vendor/github.com/moby/buildkit/solver/cachemanager.go b/vendor/github.com/moby/buildkit/solver/cachemanager.go deleted file mode 100644 index ce41aa791b30..000000000000 --- a/vendor/github.com/moby/buildkit/solver/cachemanager.go +++ /dev/null @@ -1,277 +0,0 @@ -package solver - -import ( - "context" - "fmt" - "sync" - - "github.com/moby/buildkit/identity" - digest "github.com/opencontainers/go-digest" - "github.com/sirupsen/logrus" -) - -// NewInMemoryCacheManager creates a new in-memory cache manager -func NewInMemoryCacheManager() CacheManager { - return NewCacheManager(identity.NewID(), NewInMemoryCacheStorage(), NewInMemoryResultStorage()) -} - -// NewCacheManager creates a new cache manager with specific storage backend -func NewCacheManager(id string, storage CacheKeyStorage, results CacheResultStorage) CacheManager { - cm := &cacheManager{ - id: id, - backend: storage, - results: results, - } - - if err := cm.ReleaseUnreferenced(); err != nil { - logrus.Errorf("failed to release unreferenced cache metadata: %+v", err) - } - - return cm -} - -type cacheManager struct { - mu sync.RWMutex - id string - - backend CacheKeyStorage - results CacheResultStorage -} - -func (c *cacheManager) ReleaseUnreferenced() error { - return c.backend.Walk(func(id string) error { - return c.backend.WalkResults(id, func(cr CacheResult) error { - if !c.results.Exists(cr.ID) { - c.backend.Release(cr.ID) - } - return nil - }) - }) -} - -func (c *cacheManager) ID() string { - return c.id -} - -func (c *cacheManager) Query(deps []CacheKeyWithSelector, input Index, dgst digest.Digest, output Index) ([]*CacheKey, error) { - c.mu.RLock() - defer c.mu.RUnlock() - - type dep struct { - results map[string]struct{} - key CacheKeyWithSelector - } - - allDeps := make([]dep, 0, len(deps)) - for _, k := range deps { - allDeps = append(allDeps, dep{key: k, results: map[string]struct{}{}}) - } - - allRes := map[string]*CacheKey{} - for _, d := range allDeps { - if err := c.backend.WalkLinks(c.getID(d.key.CacheKey.CacheKey), CacheInfoLink{input, output, dgst, d.key.Selector}, func(id string) error { - d.results[id] = struct{}{} - if _, ok := allRes[id]; !ok { - allRes[id] = c.newKeyWithID(id, dgst, output) - } - return nil - }); err != nil { - return nil, err - } - } - - // link the results against the keys that didn't exist - for id, key := range allRes { - for _, d := range allDeps { - if _, ok := d.results[id]; !ok { - if err := c.backend.AddLink(c.getID(d.key.CacheKey.CacheKey), CacheInfoLink{ - Input: input, - Output: output, - Digest: dgst, - Selector: d.key.Selector, - }, c.getID(key)); err != nil { - return nil, err - } - } - } - } - - if len(deps) == 0 { - if !c.backend.Exists(rootKey(dgst, output).String()) { - return nil, nil - } - return []*CacheKey{c.newRootKey(dgst, output)}, nil - } - - keys := make([]*CacheKey, 0, len(deps)) - for _, k := range allRes { - keys = append(keys, k) - } - return keys, nil -} - -func (c *cacheManager) Records(ck *CacheKey) ([]*CacheRecord, error) { - outs := make([]*CacheRecord, 0) - if err := c.backend.WalkResults(c.getID(ck), func(r CacheResult) error { - if c.results.Exists(r.ID) { - outs = append(outs, &CacheRecord{ - ID: r.ID, - cacheManager: c, - key: ck, - CreatedAt: r.CreatedAt, - }) - } else { - c.backend.Release(r.ID) - } - return nil - }); err != nil { - return nil, err - } - return outs, nil -} - -func (c *cacheManager) Load(ctx context.Context, rec *CacheRecord) (Result, error) { - c.mu.RLock() - defer c.mu.RUnlock() - - res, err := c.backend.Load(c.getID(rec.key), rec.ID) - if err != nil { - return nil, err - } - - return c.results.Load(ctx, res) -} - -func (c *cacheManager) Save(k *CacheKey, r Result) (*ExportableCacheKey, error) { - c.mu.Lock() - defer c.mu.Unlock() - - res, err := c.results.Save(r) - if err != nil { - return nil, err - } - - if err := c.backend.AddResult(c.getID(k), res); err != nil { - return nil, err - } - - if err := c.ensurePersistentKey(k); err != nil { - return nil, err - } - - rec := &CacheRecord{ - ID: res.ID, - cacheManager: c, - key: k, - CreatedAt: res.CreatedAt, - } - - return &ExportableCacheKey{ - CacheKey: k, - Exporter: &exporter{k: k, record: rec}, - }, nil -} - -func newKey() *CacheKey { - return &CacheKey{ids: map[*cacheManager]string{}} -} - -func (c *cacheManager) newKeyWithID(id string, dgst digest.Digest, output Index) *CacheKey { - k := newKey() - k.digest = dgst - k.output = output - k.ID = id - k.ids[c] = id - return k -} - -func (c *cacheManager) newRootKey(dgst digest.Digest, output Index) *CacheKey { - return c.newKeyWithID(rootKey(dgst, output).String(), dgst, output) -} - -func (c *cacheManager) getID(k *CacheKey) string { - k.mu.Lock() - id, ok := k.ids[c] - if ok { - k.mu.Unlock() - return id - } - if len(k.deps) == 0 { - k.ids[c] = k.ID - k.mu.Unlock() - return k.ID - } - id = c.getIDFromDeps(k) - k.ids[c] = id - k.mu.Unlock() - return id -} - -func (c *cacheManager) ensurePersistentKey(k *CacheKey) error { - id := c.getID(k) - for i, deps := range k.Deps() { - for _, ck := range deps { - l := CacheInfoLink{ - Input: Index(i), - Output: Index(k.Output()), - Digest: k.Digest(), - Selector: ck.Selector, - } - ckID := c.getID(ck.CacheKey.CacheKey) - if !c.backend.HasLink(ckID, l, id) { - if err := c.ensurePersistentKey(ck.CacheKey.CacheKey); err != nil { - return err - } - if err := c.backend.AddLink(ckID, l, id); err != nil { - return err - } - } - } - } - return nil -} - -func (c *cacheManager) getIDFromDeps(k *CacheKey) string { - matches := map[string]struct{}{} - - for i, deps := range k.deps { - if i == 0 || len(matches) > 0 { - for _, ck := range deps { - m2 := make(map[string]struct{}) - if err := c.backend.WalkLinks(c.getID(ck.CacheKey.CacheKey), CacheInfoLink{ - Input: Index(i), - Output: Index(k.Output()), - Digest: k.Digest(), - Selector: ck.Selector, - }, func(id string) error { - if i == 0 { - matches[id] = struct{}{} - } else { - m2[id] = struct{}{} - } - return nil - }); err != nil { - matches = map[string]struct{}{} - break - } - if i != 0 { - for id := range matches { - if _, ok := m2[id]; !ok { - delete(matches, id) - } - } - } - } - } - } - - for k := range matches { - return k - } - - return identity.NewID() -} - -func rootKey(dgst digest.Digest, output Index) digest.Digest { - return digest.FromBytes([]byte(fmt.Sprintf("%s@%d", dgst, output))) -} diff --git a/vendor/github.com/moby/buildkit/solver/cachestorage.go b/vendor/github.com/moby/buildkit/solver/cachestorage.go deleted file mode 100644 index 65225f757b34..000000000000 --- a/vendor/github.com/moby/buildkit/solver/cachestorage.go +++ /dev/null @@ -1,51 +0,0 @@ -package solver - -import ( - "context" - "time" - - digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -var ErrNotFound = errors.Errorf("not found") - -// CacheKeyStorage is interface for persisting cache metadata -type CacheKeyStorage interface { - Exists(id string) bool - Walk(fn func(id string) error) error - - WalkResults(id string, fn func(CacheResult) error) error - Load(id string, resultID string) (CacheResult, error) - AddResult(id string, res CacheResult) error - Release(resultID string) error - WalkIDsByResult(resultID string, fn func(string) error) error - - AddLink(id string, link CacheInfoLink, target string) error - WalkLinks(id string, link CacheInfoLink, fn func(id string) error) error - HasLink(id string, link CacheInfoLink, target string) bool - WalkBacklinks(id string, fn func(id string, link CacheInfoLink) error) error -} - -// CacheResult is a record for a single solve result -type CacheResult struct { - CreatedAt time.Time - ID string -} - -// CacheInfoLink is a link between two cache keys -type CacheInfoLink struct { - Input Index `json:"Input,omitempty"` - Output Index `json:"Output,omitempty"` - Digest digest.Digest `json:"Digest,omitempty"` - Selector digest.Digest `json:"Selector,omitempty"` -} - -// CacheResultStorage is interface for converting cache metadata result to -// actual solve result -type CacheResultStorage interface { - Save(Result) (CacheResult, error) - Load(ctx context.Context, res CacheResult) (Result, error) - LoadRemote(ctx context.Context, res CacheResult) (*Remote, error) - Exists(id string) bool -} diff --git a/vendor/github.com/moby/buildkit/solver/combinedcache.go b/vendor/github.com/moby/buildkit/solver/combinedcache.go deleted file mode 100644 index b4205d3ed063..000000000000 --- a/vendor/github.com/moby/buildkit/solver/combinedcache.go +++ /dev/null @@ -1,124 +0,0 @@ -package solver - -import ( - "context" - "strings" - "sync" - - digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "golang.org/x/sync/errgroup" -) - -func newCombinedCacheManager(cms []CacheManager, main CacheManager) CacheManager { - return &combinedCacheManager{cms: cms, main: main} -} - -type combinedCacheManager struct { - cms []CacheManager - main CacheManager - id string - idOnce sync.Once -} - -func (cm *combinedCacheManager) ID() string { - cm.idOnce.Do(func() { - ids := make([]string, len(cm.cms)) - for i, c := range cm.cms { - ids[i] = c.ID() - } - cm.id = digest.FromBytes([]byte(strings.Join(ids, ","))).String() - }) - return cm.id -} - -func (cm *combinedCacheManager) Query(inp []CacheKeyWithSelector, inputIndex Index, dgst digest.Digest, outputIndex Index) ([]*CacheKey, error) { - eg, _ := errgroup.WithContext(context.TODO()) - keys := make(map[string]*CacheKey, len(cm.cms)) - var mu sync.Mutex - for _, c := range cm.cms { - func(c CacheManager) { - eg.Go(func() error { - recs, err := c.Query(inp, inputIndex, dgst, outputIndex) - if err != nil { - return err - } - mu.Lock() - for _, r := range recs { - if _, ok := keys[r.ID]; !ok || c == cm.main { - keys[r.ID] = r - } - } - mu.Unlock() - return nil - }) - }(c) - } - - if err := eg.Wait(); err != nil { - return nil, err - } - - out := make([]*CacheKey, 0, len(keys)) - for _, k := range keys { - out = append(out, k) - } - return out, nil -} - -func (cm *combinedCacheManager) Load(ctx context.Context, rec *CacheRecord) (Result, error) { - res, err := rec.cacheManager.Load(ctx, rec) - if err != nil { - return nil, err - } - if _, err := cm.main.Save(rec.key, res); err != nil { - return nil, err - } - return res, nil -} - -func (cm *combinedCacheManager) Save(key *CacheKey, s Result) (*ExportableCacheKey, error) { - return cm.main.Save(key, s) -} - -func (cm *combinedCacheManager) Records(ck *CacheKey) ([]*CacheRecord, error) { - if len(ck.ids) == 0 { - return nil, errors.Errorf("no results") - } - - records := map[string]*CacheRecord{} - var mu sync.Mutex - - eg, _ := errgroup.WithContext(context.TODO()) - for c := range ck.ids { - func(c *cacheManager) { - eg.Go(func() error { - recs, err := c.Records(ck) - if err != nil { - return err - } - mu.Lock() - for _, rec := range recs { - if _, ok := records[rec.ID]; !ok || c == cm.main { - if c == cm.main { - rec.Priority = 1 - } - records[rec.ID] = rec - } - } - mu.Unlock() - return nil - }) - }(c) - } - - if err := eg.Wait(); err != nil { - return nil, err - } - - out := make([]*CacheRecord, 0, len(records)) - for _, rec := range records { - out = append(out, rec) - } - return out, nil -} diff --git a/vendor/github.com/moby/buildkit/solver/edge.go b/vendor/github.com/moby/buildkit/solver/edge.go deleted file mode 100644 index 07e67a3e194c..000000000000 --- a/vendor/github.com/moby/buildkit/solver/edge.go +++ /dev/null @@ -1,898 +0,0 @@ -package solver - -import ( - "context" - "sync" - - "github.com/moby/buildkit/solver/internal/pipe" - digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -type edgeStatusType int - -const ( - edgeStatusInitial edgeStatusType = iota - edgeStatusCacheFast - edgeStatusCacheSlow - edgeStatusComplete -) - -func (t edgeStatusType) String() string { - return []string{"initial", "cache-fast", "cache-slow", "complete"}[t] -} - -func newEdge(ed Edge, op activeOp, index *edgeIndex) *edge { - e := &edge{ - edge: ed, - op: op, - depRequests: map[pipe.Receiver]*dep{}, - keyMap: map[string]*CacheKey{}, - cacheRecords: map[string]*CacheRecord{}, - index: index, - } - return e -} - -type edge struct { - edge Edge - op activeOp - - edgeState - depRequests map[pipe.Receiver]*dep - deps []*dep - - cacheMapReq pipe.Receiver - cacheMapDone bool - cacheMapIndex int - cacheMapDigests []digest.Digest - execReq pipe.Receiver - err error - cacheRecords map[string]*CacheRecord - keyMap map[string]*CacheKey - - noCacheMatchPossible bool - allDepsCompletedCacheFast bool - allDepsCompletedCacheSlow bool - allDepsStateCacheSlow bool - allDepsCompleted bool - hasActiveOutgoing bool - - releaserCount int - keysDidChange bool - index *edgeIndex - - secondaryExporters []expDep -} - -// dep holds state for a dependant edge -type dep struct { - req pipe.Receiver - edgeState - index Index - keyMap map[string]*CacheKey - desiredState edgeStatusType - e *edge - slowCacheReq pipe.Receiver - slowCacheComplete bool - slowCacheFoundKey bool - slowCacheKey *ExportableCacheKey - err error -} - -// expDep holds secorndary exporter info for dependency -type expDep struct { - index int - cacheKey CacheKeyWithSelector -} - -func newDep(i Index) *dep { - return &dep{index: i, keyMap: map[string]*CacheKey{}} -} - -// edgePipe is a pipe for requests between two edges -type edgePipe struct { - *pipe.Pipe - From, Target *edge - mu sync.Mutex -} - -// edgeState hold basic mutable state info for an edge -type edgeState struct { - state edgeStatusType - result *SharedCachedResult - cacheMap *CacheMap - keys []ExportableCacheKey -} - -type edgeRequest struct { - desiredState edgeStatusType - currentState edgeState - currentKeys int -} - -// incrementReferenceCount increases the number of times release needs to be -// called to release the edge. Called on merging edges. -func (e *edge) incrementReferenceCount() { - e.releaserCount += 1 -} - -// release releases the edge resources -func (e *edge) release() { - if e.releaserCount > 0 { - e.releaserCount-- - return - } - e.index.Release(e) - if e.result != nil { - go e.result.Release(context.TODO()) - } -} - -// commitOptions returns parameters for the op execution -func (e *edge) commitOptions() ([]*CacheKey, []CachedResult) { - k := NewCacheKey(e.cacheMap.Digest, e.edge.Index) - if len(e.deps) == 0 { - keys := make([]*CacheKey, 0, len(e.cacheMapDigests)) - for _, dgst := range e.cacheMapDigests { - keys = append(keys, NewCacheKey(dgst, e.edge.Index)) - } - return keys, nil - } - - inputs := make([][]CacheKeyWithSelector, len(e.deps)) - results := make([]CachedResult, len(e.deps)) - for i, dep := range e.deps { - for _, k := range dep.result.CacheKeys() { - inputs[i] = append(inputs[i], CacheKeyWithSelector{CacheKey: k, Selector: e.cacheMap.Deps[i].Selector}) - } - if dep.slowCacheKey != nil { - inputs[i] = append(inputs[i], CacheKeyWithSelector{CacheKey: *dep.slowCacheKey}) - } - results[i] = dep.result - } - - k.deps = inputs - return []*CacheKey{k}, results -} - -// isComplete returns true if edge state is final and will never change -func (e *edge) isComplete() bool { - return e.err != nil || e.result != nil -} - -// finishIncoming finalizes the incoming pipe request -func (e *edge) finishIncoming(req pipe.Sender) { - err := e.err - if req.Request().Canceled && err == nil { - err = context.Canceled - } - if debugScheduler { - logrus.Debugf("finishIncoming %s %v %#v desired=%s", e.edge.Vertex.Name(), err, e.edgeState, req.Request().Payload.(*edgeRequest).desiredState) - } - req.Finalize(&e.edgeState, err) -} - -// updateIncoming updates the current value of incoming pipe request -func (e *edge) updateIncoming(req pipe.Sender) { - req.Update(&e.edgeState) -} - -// probeCache is called with unprocessed cache keys for dependency -// if the key could match the edge, the cacheRecords for dependency are filled -func (e *edge) probeCache(d *dep, depKeys []CacheKeyWithSelector) bool { - if len(depKeys) == 0 { - return false - } - if e.op.IgnoreCache() { - return false - } - keys, err := e.op.Cache().Query(depKeys, d.index, e.cacheMap.Digest, e.edge.Index) - if err != nil { - e.err = errors.Wrap(err, "error on cache query") - } - found := false - for _, k := range keys { - if _, ok := d.keyMap[k.ID]; !ok { - d.keyMap[k.ID] = k - found = true - } - } - return found -} - -// checkDepMatchPossible checks if any cache matches are possible past this point -func (e *edge) checkDepMatchPossible(dep *dep) { - depHasSlowCache := e.cacheMap.Deps[dep.index].ComputeDigestFunc != nil - if !e.noCacheMatchPossible && (((!dep.slowCacheFoundKey && dep.slowCacheComplete && depHasSlowCache) || (!depHasSlowCache && dep.state >= edgeStatusCacheSlow)) && len(dep.keyMap) == 0) { - e.noCacheMatchPossible = true - } -} - -// slowCacheFunc returns the result based cache func for dependency if it exists -func (e *edge) slowCacheFunc(dep *dep) ResultBasedCacheFunc { - if e.cacheMap == nil { - return nil - } - return e.cacheMap.Deps[int(dep.index)].ComputeDigestFunc -} - -// allDepsHaveKeys checks if all dependencies have at least one key. used for -// determining if there is enough data for combining cache key for edge -func (e *edge) allDepsHaveKeys(matching bool) bool { - if e.cacheMap == nil { - return false - } - for _, d := range e.deps { - cond := len(d.keys) == 0 - if matching { - cond = len(d.keyMap) == 0 - } - if cond && d.slowCacheKey == nil && d.result == nil { - return false - } - } - return true -} - -// depKeys returns all current dependency cache keys -func (e *edge) currentIndexKey() *CacheKey { - if e.cacheMap == nil { - return nil - } - - keys := make([][]CacheKeyWithSelector, len(e.deps)) - for i, d := range e.deps { - if len(d.keys) == 0 && d.result == nil { - return nil - } - for _, k := range d.keys { - keys[i] = append(keys[i], CacheKeyWithSelector{Selector: e.cacheMap.Deps[i].Selector, CacheKey: k}) - } - if d.result != nil { - for _, rk := range d.result.CacheKeys() { - keys[i] = append(keys[i], CacheKeyWithSelector{Selector: e.cacheMap.Deps[i].Selector, CacheKey: rk}) - } - if d.slowCacheKey != nil { - keys[i] = append(keys[i], CacheKeyWithSelector{CacheKey: ExportableCacheKey{CacheKey: d.slowCacheKey.CacheKey, Exporter: &exporter{k: d.slowCacheKey.CacheKey}}}) - } - } - } - - k := NewCacheKey(e.cacheMap.Digest, e.edge.Index) - k.deps = keys - - return k -} - -// slow cache keys can be computed in 2 phases if there are multiple deps. -// first evaluate ones that didn't match any definition based keys -func (e *edge) skipPhase2SlowCache(dep *dep) bool { - isPhase1 := false - for _, dep := range e.deps { - if (!dep.slowCacheComplete && e.slowCacheFunc(dep) != nil || dep.state < edgeStatusCacheSlow) && len(dep.keyMap) == 0 { - isPhase1 = true - break - } - } - - if isPhase1 && !dep.slowCacheComplete && e.slowCacheFunc(dep) != nil && len(dep.keyMap) > 0 { - return true - } - return false -} - -func (e *edge) skipPhase2FastCache(dep *dep) bool { - isPhase1 := false - for _, dep := range e.deps { - if e.cacheMap == nil || len(dep.keyMap) == 0 && ((!dep.slowCacheComplete && e.slowCacheFunc(dep) != nil) || (dep.state < edgeStatusComplete && e.slowCacheFunc(dep) == nil)) { - isPhase1 = true - break - } - } - - if isPhase1 && len(dep.keyMap) > 0 { - return true - } - return false -} - -// unpark is called by the scheduler with incoming requests and updates for -// previous calls. -// To avoid deadlocks and resource leaks this function needs to follow -// following rules: -// 1) this function needs to return unclosed outgoing requests if some incoming -// requests were not completed -// 2) this function may not return outgoing requests if it has completed all -// incoming requests -func (e *edge) unpark(incoming []pipe.Sender, updates, allPipes []pipe.Receiver, f *pipeFactory) { - // process all incoming changes - depChanged := false - for _, upt := range updates { - if changed := e.processUpdate(upt); changed { - depChanged = true - } - } - - if depChanged { - // the dep responses had changes. need to reevaluate edge state - e.recalcCurrentState() - } - - desiredState, done := e.respondToIncoming(incoming, allPipes) - if done { - return - } - - cacheMapReq := false - // set up new outgoing requests if needed - if e.cacheMapReq == nil && (e.cacheMap == nil || len(e.cacheRecords) == 0) { - index := e.cacheMapIndex - e.cacheMapReq = f.NewFuncRequest(func(ctx context.Context) (interface{}, error) { - return e.op.CacheMap(ctx, index) - }) - cacheMapReq = true - } - - // execute op - if e.execReq == nil && desiredState == edgeStatusComplete { - if ok := e.execIfPossible(f); ok { - return - } - } - - if e.execReq == nil { - if added := e.createInputRequests(desiredState, f, false); !added && !e.hasActiveOutgoing && !cacheMapReq { - logrus.Errorf("buildkit scheluding error: leaving incoming open. forcing solve. Please report this with BUILDKIT_SCHEDULER_DEBUG=1") - e.createInputRequests(desiredState, f, true) - } - } - -} - -func (e *edge) makeExportable(k *CacheKey, records []*CacheRecord) ExportableCacheKey { - return ExportableCacheKey{ - CacheKey: k, - Exporter: &exporter{k: k, records: records, override: e.edge.Vertex.Options().ExportCache}, - } -} - -func (e *edge) markFailed(f *pipeFactory, err error) { - e.err = err - e.postpone(f) -} - -// processUpdate is called by unpark for every updated pipe request -func (e *edge) processUpdate(upt pipe.Receiver) (depChanged bool) { - // response for cachemap request - if upt == e.cacheMapReq && upt.Status().Completed { - if err := upt.Status().Err; err != nil { - e.cacheMapReq = nil - if !upt.Status().Canceled && e.err == nil { - e.err = err - } - } else { - resp := upt.Status().Value.(*cacheMapResp) - e.cacheMap = resp.CacheMap - e.cacheMapDone = resp.complete - e.cacheMapIndex++ - if len(e.deps) == 0 { - e.cacheMapDigests = append(e.cacheMapDigests, e.cacheMap.Digest) - if !e.op.IgnoreCache() { - keys, err := e.op.Cache().Query(nil, 0, e.cacheMap.Digest, e.edge.Index) - if err != nil { - logrus.Error(errors.Wrap(err, "invalid query response")) // make the build fail for this error - } else { - for _, k := range keys { - records, err := e.op.Cache().Records(k) - if err != nil { - logrus.Errorf("error receiving cache records: %v", err) - continue - } - - for _, r := range records { - e.cacheRecords[r.ID] = r - } - - e.keys = append(e.keys, e.makeExportable(k, records)) - } - } - } - e.state = edgeStatusCacheSlow - } - if e.allDepsHaveKeys(false) { - e.keysDidChange = true - } - // probe keys that were loaded before cache map - for i, dep := range e.deps { - e.probeCache(dep, withSelector(dep.keys, e.cacheMap.Deps[i].Selector)) - e.checkDepMatchPossible(dep) - } - if !e.cacheMapDone { - e.cacheMapReq = nil - } - } - return true - } - - // response for exec request - if upt == e.execReq && upt.Status().Completed { - if err := upt.Status().Err; err != nil { - e.execReq = nil - if !upt.Status().Canceled && e.err == nil { - e.err = err - } - } else { - e.result = NewSharedCachedResult(upt.Status().Value.(CachedResult)) - e.state = edgeStatusComplete - } - return true - } - - // response for requests to dependencies - if dep, ok := e.depRequests[upt]; ok { - if err := upt.Status().Err; !upt.Status().Canceled && upt.Status().Completed && err != nil { - if e.err == nil { - e.err = err - } - dep.err = err - } - - state := upt.Status().Value.(*edgeState) - - if len(dep.keys) < len(state.keys) { - newKeys := state.keys[len(dep.keys):] - if e.cacheMap != nil { - e.probeCache(dep, withSelector(newKeys, e.cacheMap.Deps[dep.index].Selector)) - dep.edgeState.keys = state.keys - if e.allDepsHaveKeys(false) { - e.keysDidChange = true - } - } - depChanged = true - } - if dep.state != edgeStatusComplete && state.state == edgeStatusComplete { - e.keysDidChange = true - } - - recheck := state.state != dep.state - - dep.edgeState = *state - - if recheck && e.cacheMap != nil { - e.checkDepMatchPossible(dep) - depChanged = true - } - - return - } - - // response for result based cache function - for i, dep := range e.deps { - if upt == dep.slowCacheReq && upt.Status().Completed { - if err := upt.Status().Err; err != nil { - dep.slowCacheReq = nil - if !upt.Status().Canceled && e.err == nil { - e.err = upt.Status().Err - } - } else if !dep.slowCacheComplete { - k := NewCacheKey(upt.Status().Value.(digest.Digest), -1) - dep.slowCacheKey = &ExportableCacheKey{CacheKey: k, Exporter: &exporter{k: k}} - slowKeyExp := CacheKeyWithSelector{CacheKey: *dep.slowCacheKey} - defKeys := make([]CacheKeyWithSelector, 0, len(dep.result.CacheKeys())) - for _, dk := range dep.result.CacheKeys() { - defKeys = append(defKeys, CacheKeyWithSelector{CacheKey: dk, Selector: e.cacheMap.Deps[i].Selector}) - } - dep.slowCacheFoundKey = e.probeCache(dep, []CacheKeyWithSelector{slowKeyExp}) - - // connect def key to slow key - e.op.Cache().Query(append(defKeys, slowKeyExp), dep.index, e.cacheMap.Digest, e.edge.Index) - - dep.slowCacheComplete = true - e.keysDidChange = true - e.checkDepMatchPossible(dep) // not matching key here doesn't set nocachematch possible to true - } - return true - } - } - - return -} - -// recalcCurrentState is called by unpark to recompute internal state after -// the state of dependencies has changed -func (e *edge) recalcCurrentState() { - // TODO: fast pass to detect incomplete results - newKeys := map[string]*CacheKey{} - - for i, dep := range e.deps { - if i == 0 { - for id, k := range dep.keyMap { - if _, ok := e.keyMap[id]; ok { - continue - } - newKeys[id] = k - } - } else { - for id := range newKeys { - if _, ok := dep.keyMap[id]; !ok { - delete(newKeys, id) - } - } - } - if len(newKeys) == 0 { - break - } - } - - for _, r := range newKeys { - // TODO: add all deps automatically - mergedKey := r.clone() - mergedKey.deps = make([][]CacheKeyWithSelector, len(e.deps)) - for i, dep := range e.deps { - if dep.result != nil { - for _, dk := range dep.result.CacheKeys() { - mergedKey.deps[i] = append(mergedKey.deps[i], CacheKeyWithSelector{Selector: e.cacheMap.Deps[i].Selector, CacheKey: dk}) - } - if dep.slowCacheKey != nil { - mergedKey.deps[i] = append(mergedKey.deps[i], CacheKeyWithSelector{CacheKey: *dep.slowCacheKey}) - } - } else { - for _, k := range dep.keys { - mergedKey.deps[i] = append(mergedKey.deps[i], CacheKeyWithSelector{Selector: e.cacheMap.Deps[i].Selector, CacheKey: k}) - } - } - } - - records, err := e.op.Cache().Records(mergedKey) - if err != nil { - logrus.Errorf("error receiving cache records: %v", err) - continue - } - - for _, r := range records { - e.cacheRecords[r.ID] = r - } - - e.keys = append(e.keys, e.makeExportable(mergedKey, records)) - } - - // detect lower/upper bound for current state - allDepsCompletedCacheFast := e.cacheMap != nil - allDepsCompletedCacheSlow := e.cacheMap != nil - allDepsStateCacheSlow := true - allDepsCompleted := true - stLow := edgeStatusInitial // minimal possible state - stHigh := edgeStatusCacheSlow // maximum possible state - if e.cacheMap != nil { - for _, dep := range e.deps { - isSlowIncomplete := e.slowCacheFunc(dep) != nil && (dep.state == edgeStatusCacheSlow || (dep.state == edgeStatusComplete && !dep.slowCacheComplete)) - - if dep.state > stLow && len(dep.keyMap) == 0 && !isSlowIncomplete { - stLow = dep.state - if stLow > edgeStatusCacheSlow { - stLow = edgeStatusCacheSlow - } - } - effectiveState := dep.state - if dep.state == edgeStatusCacheSlow && isSlowIncomplete { - effectiveState = edgeStatusCacheFast - } - if dep.state == edgeStatusComplete && isSlowIncomplete { - effectiveState = edgeStatusCacheFast - } - if effectiveState < stHigh { - stHigh = effectiveState - } - if isSlowIncomplete || dep.state < edgeStatusComplete { - allDepsCompleted = false - } - if dep.state < edgeStatusCacheFast { - allDepsCompletedCacheFast = false - } - if isSlowIncomplete || dep.state < edgeStatusCacheSlow { - allDepsCompletedCacheSlow = false - } - if dep.state < edgeStatusCacheSlow && len(dep.keyMap) == 0 { - allDepsStateCacheSlow = false - } - } - if stLow > e.state { - e.state = stLow - } - if stHigh > e.state { - e.state = stHigh - } - if !e.cacheMapDone && len(e.keys) == 0 { - e.state = edgeStatusInitial - } - - e.allDepsCompletedCacheFast = e.cacheMapDone && allDepsCompletedCacheFast - e.allDepsCompletedCacheSlow = e.cacheMapDone && allDepsCompletedCacheSlow - e.allDepsStateCacheSlow = e.cacheMapDone && allDepsStateCacheSlow - e.allDepsCompleted = e.cacheMapDone && allDepsCompleted - } -} - -// respondToIncoming responds to all incoming requests. completing or -// updating them when possible -func (e *edge) respondToIncoming(incoming []pipe.Sender, allPipes []pipe.Receiver) (edgeStatusType, bool) { - // detect the result state for the requests - allIncomingCanComplete := true - desiredState := e.state - allCanceled := true - - // check incoming requests - // check if all requests can be either answered or canceled - if !e.isComplete() { - for _, req := range incoming { - if !req.Request().Canceled { - allCanceled = false - if r := req.Request().Payload.(*edgeRequest); desiredState < r.desiredState { - desiredState = r.desiredState - if e.hasActiveOutgoing || r.desiredState == edgeStatusComplete || r.currentKeys == len(e.keys) { - allIncomingCanComplete = false - } - } - } - } - } - - // do not set allIncomingCanComplete if active ongoing can modify the state - if !allCanceled && e.state < edgeStatusComplete && len(e.keys) == 0 && e.hasActiveOutgoing { - allIncomingCanComplete = false - } - - if debugScheduler { - logrus.Debugf("status state=%s cancomplete=%v hasouts=%v noPossibleCache=%v depsCacheFast=%v keys=%d cacheRecords=%d", e.state, allIncomingCanComplete, e.hasActiveOutgoing, e.noCacheMatchPossible, e.allDepsCompletedCacheFast, len(e.keys), len(e.cacheRecords)) - } - - if allIncomingCanComplete && e.hasActiveOutgoing { - // cancel all current requests - for _, p := range allPipes { - p.Cancel() - } - - // can close all but one requests - var leaveOpen pipe.Sender - for _, req := range incoming { - if !req.Request().Canceled { - leaveOpen = req - break - } - } - for _, req := range incoming { - if leaveOpen == nil || leaveOpen == req { - leaveOpen = req - continue - } - e.finishIncoming(req) - } - return desiredState, true - } - - // can complete, finish and return - if allIncomingCanComplete && !e.hasActiveOutgoing { - for _, req := range incoming { - e.finishIncoming(req) - } - return desiredState, true - } - - // update incoming based on current state - for _, req := range incoming { - r := req.Request().Payload.(*edgeRequest) - if req.Request().Canceled { - e.finishIncoming(req) - } else if !e.hasActiveOutgoing && e.state >= r.desiredState { - e.finishIncoming(req) - } else if !isEqualState(r.currentState, e.edgeState) && !req.Request().Canceled { - e.updateIncoming(req) - } - } - return desiredState, false -} - -// createInputRequests creates new requests for dependencies or async functions -// that need to complete to continue processing the edge -func (e *edge) createInputRequests(desiredState edgeStatusType, f *pipeFactory, force bool) bool { - addedNew := false - - // initialize deps state - if e.deps == nil { - e.depRequests = make(map[pipe.Receiver]*dep) - e.deps = make([]*dep, 0, len(e.edge.Vertex.Inputs())) - for i := range e.edge.Vertex.Inputs() { - e.deps = append(e.deps, newDep(Index(i))) - } - } - - // cycle all dependencies. set up outgoing requests if needed - for _, dep := range e.deps { - desiredStateDep := dep.state - - if e.noCacheMatchPossible || force { - desiredStateDep = edgeStatusComplete - } else if dep.state == edgeStatusInitial && desiredState > dep.state { - desiredStateDep = edgeStatusCacheFast - } else if dep.state == edgeStatusCacheFast && desiredState > dep.state { - // wait all deps to complete cache fast before continuing with slow cache - if (e.allDepsCompletedCacheFast && len(e.keys) == 0) || len(dep.keyMap) == 0 || e.allDepsHaveKeys(true) { - if !e.skipPhase2FastCache(dep) && e.cacheMap != nil { - desiredStateDep = edgeStatusCacheSlow - } - } - } else if e.cacheMap != nil && dep.state == edgeStatusCacheSlow && desiredState == edgeStatusComplete { - // if all deps have completed cache-slow or content based cache for input is available - if (len(dep.keyMap) == 0 || e.allDepsCompletedCacheSlow || (!e.skipPhase2FastCache(dep) && e.slowCacheFunc(dep) != nil)) && (len(e.cacheRecords) == 0) { - if len(dep.keyMap) == 0 || !e.skipPhase2SlowCache(dep) { - desiredStateDep = edgeStatusComplete - } - } - } else if e.cacheMap != nil && dep.state == edgeStatusCacheSlow && e.slowCacheFunc(dep) != nil && desiredState == edgeStatusCacheSlow { - if len(dep.keyMap) == 0 || !e.skipPhase2SlowCache(dep) { - desiredStateDep = edgeStatusComplete - } - } - - // outgoing request is needed - if dep.state < desiredStateDep { - addNew := true - if dep.req != nil && !dep.req.Status().Completed { - if dep.req.Request().(*edgeRequest).desiredState != desiredStateDep { - dep.req.Cancel() - } else { - addNew = false - } - } - if addNew { - req := f.NewInputRequest(e.edge.Vertex.Inputs()[int(dep.index)], &edgeRequest{ - currentState: dep.edgeState, - desiredState: desiredStateDep, - currentKeys: len(dep.keys), - }) - e.depRequests[req] = dep - dep.req = req - addedNew = true - } - } - // initialize function to compute cache key based on dependency result - if dep.state == edgeStatusComplete && dep.slowCacheReq == nil && e.slowCacheFunc(dep) != nil && e.cacheMap != nil { - fn := e.slowCacheFunc(dep) - res := dep.result - func(fn ResultBasedCacheFunc, res Result, index Index) { - dep.slowCacheReq = f.NewFuncRequest(func(ctx context.Context) (interface{}, error) { - return e.op.CalcSlowCache(ctx, index, fn, res) - }) - }(fn, res, dep.index) - addedNew = true - } - } - return addedNew -} - -// execIfPossible creates a request for getting the edge result if there is -// enough state -func (e *edge) execIfPossible(f *pipeFactory) bool { - if len(e.cacheRecords) > 0 { - if e.keysDidChange { - e.postpone(f) - return true - } - e.execReq = f.NewFuncRequest(e.loadCache) - for req := range e.depRequests { - req.Cancel() - } - return true - } else if e.allDepsCompleted { - if e.keysDidChange { - e.postpone(f) - return true - } - e.execReq = f.NewFuncRequest(e.execOp) - return true - } - return false -} - -// postpone delays exec to next unpark invocation if we have unprocessed keys -func (e *edge) postpone(f *pipeFactory) { - f.NewFuncRequest(func(context.Context) (interface{}, error) { - return nil, nil - }) -} - -// loadCache creates a request to load edge result from cache -func (e *edge) loadCache(ctx context.Context) (interface{}, error) { - recs := make([]*CacheRecord, 0, len(e.cacheRecords)) - for _, r := range e.cacheRecords { - recs = append(recs, r) - } - - rec := getBestResult(recs) - - logrus.Debugf("load cache for %s with %s", e.edge.Vertex.Name(), rec.ID) - res, err := e.op.LoadCache(ctx, rec) - if err != nil { - return nil, err - } - - return NewCachedResult(res, []ExportableCacheKey{{CacheKey: rec.key, Exporter: &exporter{k: rec.key, record: rec, edge: e}}}), nil -} - -// execOp creates a request to execute the vertex operation -func (e *edge) execOp(ctx context.Context) (interface{}, error) { - cacheKeys, inputs := e.commitOptions() - results, subExporters, err := e.op.Exec(ctx, toResultSlice(inputs)) - if err != nil { - return nil, err - } - - index := e.edge.Index - if len(results) <= int(index) { - return nil, errors.Errorf("invalid response from exec need %d index but %d results received", index, len(results)) - } - - res := results[int(index)] - - for i := range results { - if i != int(index) { - go results[i].Release(context.TODO()) - } - } - - var exporters []CacheExporter - - for _, cacheKey := range cacheKeys { - ck, err := e.op.Cache().Save(cacheKey, res) - if err != nil { - return nil, err - } - - if exp, ok := ck.Exporter.(*exporter); ok { - exp.edge = e - } - - exps := make([]CacheExporter, 0, len(subExporters)) - for _, exp := range subExporters { - exps = append(exps, exp.Exporter) - } - - exporters = append(exporters, ck.Exporter) - exporters = append(exporters, exps...) - } - - ek := make([]ExportableCacheKey, 0, len(cacheKeys)) - for _, ck := range cacheKeys { - ek = append(ek, ExportableCacheKey{ - CacheKey: ck, - Exporter: &mergedExporter{exporters: exporters}, - }) - } - - return NewCachedResult(res, ek), nil -} - -func toResultSlice(cres []CachedResult) (out []Result) { - out = make([]Result, len(cres)) - for i := range cres { - out[i] = cres[i].(Result) - } - return out -} - -func isEqualState(s1, s2 edgeState) bool { - if s1.state != s2.state || s1.result != s2.result || s1.cacheMap != s2.cacheMap || len(s1.keys) != len(s2.keys) { - return false - } - return true -} - -func withSelector(keys []ExportableCacheKey, selector digest.Digest) []CacheKeyWithSelector { - out := make([]CacheKeyWithSelector, len(keys)) - for i, k := range keys { - out[i] = CacheKeyWithSelector{Selector: selector, CacheKey: k} - } - return out -} diff --git a/vendor/github.com/moby/buildkit/solver/exporter.go b/vendor/github.com/moby/buildkit/solver/exporter.go deleted file mode 100644 index fa963f414a3e..000000000000 --- a/vendor/github.com/moby/buildkit/solver/exporter.go +++ /dev/null @@ -1,208 +0,0 @@ -package solver - -import ( - "context" - - digest "github.com/opencontainers/go-digest" -) - -type exporter struct { - k *CacheKey - records []*CacheRecord - record *CacheRecord - - res []CacheExporterRecord - edge *edge // for secondaryExporters - override *bool -} - -func addBacklinks(t CacheExporterTarget, rec CacheExporterRecord, cm *cacheManager, id string, bkm map[string]CacheExporterRecord) (CacheExporterRecord, error) { - if rec == nil { - var ok bool - rec, ok = bkm[id] - if ok { - return rec, nil - } - _ = ok - } - if err := cm.backend.WalkBacklinks(id, func(id string, link CacheInfoLink) error { - if rec == nil { - rec = t.Add(link.Digest) - } - r, ok := bkm[id] - if !ok { - var err error - r, err = addBacklinks(t, nil, cm, id, bkm) - if err != nil { - return err - } - } - rec.LinkFrom(r, int(link.Input), link.Selector.String()) - return nil - }); err != nil { - return nil, err - } - if rec == nil { - rec = t.Add(digest.Digest(id)) - } - bkm[id] = rec - return rec, nil -} - -type backlinkT struct{} - -var backlinkKey = backlinkT{} - -func (e *exporter) ExportTo(ctx context.Context, t CacheExporterTarget, opt CacheExportOpt) ([]CacheExporterRecord, error) { - var bkm map[string]CacheExporterRecord - - if bk := ctx.Value(backlinkKey); bk == nil { - bkm = map[string]CacheExporterRecord{} - ctx = context.WithValue(ctx, backlinkKey, bkm) - } else { - bkm = bk.(map[string]CacheExporterRecord) - } - - if t.Visited(e) { - return e.res, nil - } - - deps := e.k.Deps() - - type expr struct { - r CacheExporterRecord - selector digest.Digest - } - - rec := t.Add(rootKey(e.k.Digest(), e.k.Output())) - allRec := []CacheExporterRecord{rec} - - addRecord := true - - if e.override != nil { - addRecord = *e.override - } - - if e.record == nil && len(e.k.Deps()) > 0 { - e.record = getBestResult(e.records) - } - - var remote *Remote - if v := e.record; v != nil && len(e.k.Deps()) > 0 && addRecord { - cm := v.cacheManager - key := cm.getID(v.key) - res, err := cm.backend.Load(key, v.ID) - if err != nil { - return nil, err - } - - remote, err = cm.results.LoadRemote(ctx, res) - if err != nil { - return nil, err - } - - if remote == nil && opt.Mode != CacheExportModeRemoteOnly { - res, err := cm.results.Load(ctx, res) - if err != nil { - return nil, err - } - remote, err = opt.Convert(ctx, res) - if err != nil { - return nil, err - } - res.Release(context.TODO()) - } - - if remote != nil { - for _, rec := range allRec { - rec.AddResult(v.CreatedAt, remote) - } - } - } - - if remote != nil && opt.Mode == CacheExportModeMin { - opt.Mode = CacheExportModeRemoteOnly - } - - srcs := make([][]expr, len(deps)) - - for i, deps := range deps { - for _, dep := range deps { - recs, err := dep.CacheKey.Exporter.ExportTo(ctx, t, opt) - if err != nil { - return nil, nil - } - for _, r := range recs { - srcs[i] = append(srcs[i], expr{r: r, selector: dep.Selector}) - } - } - } - - if e.edge != nil { - for _, de := range e.edge.secondaryExporters { - recs, err := de.cacheKey.CacheKey.Exporter.ExportTo(ctx, t, opt) - if err != nil { - return nil, nil - } - for _, r := range recs { - srcs[de.index] = append(srcs[de.index], expr{r: r, selector: de.cacheKey.Selector}) - } - } - } - - for i, srcs := range srcs { - for _, src := range srcs { - rec.LinkFrom(src.r, i, src.selector.String()) - } - } - - for cm, id := range e.k.ids { - if _, err := addBacklinks(t, rec, cm, id, bkm); err != nil { - return nil, err - } - } - - if v := e.record; v != nil && len(deps) == 0 { - cm := v.cacheManager - key := cm.getID(v.key) - if err := cm.backend.WalkIDsByResult(v.ID, func(id string) error { - if id == key { - return nil - } - allRec = append(allRec, t.Add(digest.Digest(id))) - return nil - }); err != nil { - return nil, err - } - } - - e.res = allRec - t.Visit(e) - - return e.res, nil -} - -func getBestResult(records []*CacheRecord) *CacheRecord { - var rec *CacheRecord - for _, r := range records { - if rec == nil || rec.CreatedAt.Before(r.CreatedAt) || (rec.CreatedAt.Equal(r.CreatedAt) && rec.Priority < r.Priority) { - rec = r - } - } - return rec -} - -type mergedExporter struct { - exporters []CacheExporter -} - -func (e *mergedExporter) ExportTo(ctx context.Context, t CacheExporterTarget, opt CacheExportOpt) (er []CacheExporterRecord, err error) { - for _, e := range e.exporters { - r, err := e.ExportTo(ctx, t, opt) - if err != nil { - return nil, err - } - er = append(er, r...) - } - return -} diff --git a/vendor/github.com/moby/buildkit/solver/index.go b/vendor/github.com/moby/buildkit/solver/index.go deleted file mode 100644 index 78a2cca256e0..000000000000 --- a/vendor/github.com/moby/buildkit/solver/index.go +++ /dev/null @@ -1,245 +0,0 @@ -package solver - -import ( - "sync" - - "github.com/moby/buildkit/identity" -) - -// edgeIndex is a synchronous map for detecting edge collisions. -type edgeIndex struct { - mu sync.Mutex - - items map[string]*indexItem - backRefs map[*edge]map[string]struct{} -} - -type indexItem struct { - edge *edge - links map[CacheInfoLink]map[string]struct{} - deps map[string]struct{} -} - -func newEdgeIndex() *edgeIndex { - return &edgeIndex{ - items: map[string]*indexItem{}, - backRefs: map[*edge]map[string]struct{}{}, - } -} - -func (ei *edgeIndex) Release(e *edge) { - ei.mu.Lock() - defer ei.mu.Unlock() - - for id := range ei.backRefs[e] { - ei.releaseEdge(id, e) - } - delete(ei.backRefs, e) -} - -func (ei *edgeIndex) releaseEdge(id string, e *edge) { - item, ok := ei.items[id] - if !ok { - return - } - - item.edge = nil - - if len(item.links) == 0 { - for d := range item.deps { - ei.releaseLink(d, id) - } - delete(ei.items, id) - } -} - -func (ei *edgeIndex) releaseLink(id, target string) { - item, ok := ei.items[id] - if !ok { - return - } - - for lid, links := range item.links { - for check := range links { - if check == target { - delete(links, check) - } - } - if len(links) == 0 { - delete(item.links, lid) - } - } - - if item.edge == nil && len(item.links) == 0 { - for d := range item.deps { - ei.releaseLink(d, id) - } - delete(ei.items, id) - } -} - -func (ei *edgeIndex) LoadOrStore(k *CacheKey, e *edge) *edge { - ei.mu.Lock() - defer ei.mu.Unlock() - - // get all current edges that match the cachekey - ids := ei.getAllMatches(k) - - var oldID string - var old *edge - - for _, id := range ids { - if item, ok := ei.items[id]; ok { - if item.edge != e { - oldID = id - old = item.edge - } - } - } - - if old != nil && !(!isIgnoreCache(old) && isIgnoreCache(e)) { - ei.enforceLinked(oldID, k) - return old - } - - id := identity.NewID() - if len(ids) > 0 { - id = ids[0] - } - - ei.enforceLinked(id, k) - - ei.items[id].edge = e - backRefs, ok := ei.backRefs[e] - if !ok { - backRefs = map[string]struct{}{} - ei.backRefs[e] = backRefs - } - backRefs[id] = struct{}{} - - return nil -} - -// enforceLinked adds links from current ID to all dep keys -func (er *edgeIndex) enforceLinked(id string, k *CacheKey) { - main, ok := er.items[id] - if !ok { - main = &indexItem{ - links: map[CacheInfoLink]map[string]struct{}{}, - deps: map[string]struct{}{}, - } - er.items[id] = main - } - - deps := k.Deps() - - for i, dd := range deps { - for _, d := range dd { - ck := d.CacheKey.CacheKey - er.enforceIndexID(ck) - ll := CacheInfoLink{Input: Index(i), Digest: k.Digest(), Output: k.Output(), Selector: d.Selector} - for _, ckID := range ck.indexIDs { - if item, ok := er.items[ckID]; ok { - links, ok := item.links[ll] - if !ok { - links = map[string]struct{}{} - item.links[ll] = links - } - links[id] = struct{}{} - main.deps[ckID] = struct{}{} - } - } - } - } -} - -func (ei *edgeIndex) enforceIndexID(k *CacheKey) { - if len(k.indexIDs) > 0 { - return - } - - matches := ei.getAllMatches(k) - - if len(matches) > 0 { - k.indexIDs = matches - } else { - k.indexIDs = []string{identity.NewID()} - } - - for _, id := range k.indexIDs { - ei.enforceLinked(id, k) - } -} - -func (ei *edgeIndex) getAllMatches(k *CacheKey) []string { - deps := k.Deps() - - if len(deps) == 0 { - return []string{rootKey(k.Digest(), k.Output()).String()} - } - - for _, dd := range deps { - for _, k := range dd { - ei.enforceIndexID(k.CacheKey.CacheKey) - } - } - - matches := map[string]struct{}{} - - for i, dd := range deps { - if i == 0 { - for _, d := range dd { - ll := CacheInfoLink{Input: Index(i), Digest: k.Digest(), Output: k.Output(), Selector: d.Selector} - for _, ckID := range d.CacheKey.CacheKey.indexIDs { - item, ok := ei.items[ckID] - if ok { - for l := range item.links[ll] { - matches[l] = struct{}{} - } - } - } - } - continue - } - - if len(matches) == 0 { - break - } - - for m := range matches { - found := false - for _, d := range dd { - ll := CacheInfoLink{Input: Index(i), Digest: k.Digest(), Output: k.Output(), Selector: d.Selector} - for _, ckID := range d.CacheKey.CacheKey.indexIDs { - if item, ok := ei.items[ckID]; ok { - if l, ok := item.links[ll]; ok { - if _, ok := l[m]; ok { - found = true - break - } - } - } - } - } - - if !found { - delete(matches, m) - } - } - } - - out := make([]string, 0, len(matches)) - - for m := range matches { - out = append(out, m) - } - - return out -} - -func isIgnoreCache(e *edge) bool { - if e.edge.Vertex == nil { - return false - } - return e.edge.Vertex.Options().IgnoreCache -} diff --git a/vendor/github.com/moby/buildkit/solver/index_test.go b/vendor/github.com/moby/buildkit/solver/index_test.go deleted file mode 100644 index c77ff3cbce94..000000000000 --- a/vendor/github.com/moby/buildkit/solver/index_test.go +++ /dev/null @@ -1,211 +0,0 @@ -package solver - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func checkEmpty(t *testing.T, ei *edgeIndex) { - require.Equal(t, len(ei.items), 0) - require.Equal(t, len(ei.backRefs), 0) -} - -func TestIndexSimple(t *testing.T) { - idx := newEdgeIndex() - - e1 := &edge{} - e2 := &edge{} - e3 := &edge{} - - k1 := NewCacheKey(dgst("foo"), 0) - v := idx.LoadOrStore(k1, e1) - require.Nil(t, v) - - k2 := NewCacheKey(dgst("bar"), 0) - v = idx.LoadOrStore(k2, e2) - require.Nil(t, v) - - v = idx.LoadOrStore(NewCacheKey(dgst("bar"), 0), e3) - require.Equal(t, v, e2) - - v = idx.LoadOrStore(NewCacheKey(dgst("bar"), 0), e3) - require.Equal(t, v, e2) - - v = idx.LoadOrStore(NewCacheKey(dgst("foo"), 0), e3) - require.Equal(t, v, e1) - - idx.Release(e1) - idx.Release(e2) - checkEmpty(t, idx) -} - -func TestIndexMultiLevelSimple(t *testing.T) { - idx := newEdgeIndex() - - e1 := &edge{} - e2 := &edge{} - e3 := &edge{} - - k1 := testCacheKeyWithDeps(dgst("foo"), 1, [][]CacheKeyWithSelector{ - {{CacheKey: expKey(NewCacheKey("f0", 0))}}, - {{CacheKey: expKey(NewCacheKey("s0", 0)), Selector: dgst("s0")}}, - }) - - v := idx.LoadOrStore(k1, e1) - require.Nil(t, v) - - k2 := testCacheKeyWithDeps(dgst("foo"), 1, [][]CacheKeyWithSelector{ - {{CacheKey: expKey(NewCacheKey("f0", 0))}}, - {{CacheKey: expKey(NewCacheKey("s0", 0)), Selector: dgst("s0")}}, - }) - - v = idx.LoadOrStore(k2, e2) - require.Equal(t, v, e1) - - k2 = testCacheKeyWithDeps(dgst("foo"), 1, k1.Deps()) - v = idx.LoadOrStore(k2, e2) - require.Equal(t, v, e1) - - v = idx.LoadOrStore(k1, e2) - require.Equal(t, v, e1) - - // update selector - k2 = testCacheKeyWithDeps(dgst("foo"), 1, [][]CacheKeyWithSelector{ - {{CacheKey: expKey(NewCacheKey("f0", 0))}}, - {{CacheKey: expKey(NewCacheKey("s0", 0))}}, - }) - v = idx.LoadOrStore(k2, e2) - require.Nil(t, v) - - // add one dep to e1 - k2 = testCacheKeyWithDeps(dgst("foo"), 1, [][]CacheKeyWithSelector{ - {{CacheKey: expKey(NewCacheKey("f0", 0))}}, - { - {CacheKey: expKey(NewCacheKey("s0", 0)), Selector: dgst("s0")}, - {CacheKey: expKey(NewCacheKey("s1", 1))}, - }, - }) - v = idx.LoadOrStore(k2, e2) - require.Equal(t, v, e1) - - // recheck with only the new dep key - k2 = testCacheKeyWithDeps(dgst("foo"), 1, [][]CacheKeyWithSelector{ - {{CacheKey: expKey(NewCacheKey("f0", 0))}}, - { - {CacheKey: expKey(NewCacheKey("s1", 1))}, - }, - }) - v = idx.LoadOrStore(k2, e2) - require.Equal(t, v, e1) - - // combine e1 and e2 - k2 = testCacheKeyWithDeps(dgst("foo"), 1, [][]CacheKeyWithSelector{ - {{CacheKey: expKey(NewCacheKey("f0", 0))}}, - { - {CacheKey: expKey(NewCacheKey("s0", 0))}, - {CacheKey: expKey(NewCacheKey("s1", 1))}, - }, - }) - v = idx.LoadOrStore(k2, e2) - require.Equal(t, v, e1) - - // initial e2 now points to e1 - k2 = testCacheKeyWithDeps(dgst("foo"), 1, [][]CacheKeyWithSelector{ - {{CacheKey: expKey(NewCacheKey("f0", 0))}}, - {{CacheKey: expKey(NewCacheKey("s0", 0))}}, - }) - v = idx.LoadOrStore(k2, e2) - require.Equal(t, v, e1) - - idx.Release(e1) - - // e2 still remains after e1 is gone - k2 = testCacheKeyWithDeps(dgst("foo"), 1, [][]CacheKeyWithSelector{ - {{CacheKey: expKey(NewCacheKey("f0", 0))}}, - {{CacheKey: expKey(NewCacheKey("s0", 0))}}, - }) - v = idx.LoadOrStore(k2, e3) - require.Equal(t, v, e2) - - idx.Release(e2) - checkEmpty(t, idx) -} - -func TestIndexThreeLevels(t *testing.T) { - idx := newEdgeIndex() - - e1 := &edge{} - e2 := &edge{} - e3 := &edge{} - - k1 := testCacheKeyWithDeps(dgst("foo"), 1, [][]CacheKeyWithSelector{ - {{CacheKey: expKey(NewCacheKey("f0", 0))}}, - {{CacheKey: expKey(NewCacheKey("s0", 0)), Selector: dgst("s0")}}, - }) - - v := idx.LoadOrStore(k1, e1) - require.Nil(t, v) - - v = idx.LoadOrStore(k1, e2) - require.Equal(t, v, e1) - - k2 := testCacheKeyWithDeps(dgst("bar"), 0, [][]CacheKeyWithSelector{ - {{CacheKey: expKey(NewCacheKey("f0", 0))}}, - {{CacheKey: expKey(k1)}}, - }) - v = idx.LoadOrStore(k2, e2) - require.Nil(t, v) - - k2 = testCacheKeyWithDeps(dgst("bar"), 0, [][]CacheKeyWithSelector{ - {{CacheKey: expKey(NewCacheKey("f0", 0))}}, - { - {CacheKey: expKey(k1)}, - {CacheKey: expKey(NewCacheKey("alt", 0))}, - }, - }) - v = idx.LoadOrStore(k2, e2) - require.Nil(t, v) - - k2 = testCacheKeyWithDeps(dgst("bar"), 0, [][]CacheKeyWithSelector{ - {{CacheKey: expKey(NewCacheKey("f0", 0))}}, - { - {CacheKey: expKey(NewCacheKey("alt", 0))}, - }, - }) - v = idx.LoadOrStore(k2, e3) - require.Equal(t, v, e2) - - // change dep in a low key - k1 = testCacheKeyWithDeps(dgst("foo"), 1, [][]CacheKeyWithSelector{ - { - {CacheKey: expKey(NewCacheKey("f0", 0))}, - {CacheKey: expKey(NewCacheKey("f0_", 0))}, - }, - {{CacheKey: expKey(NewCacheKey("s0", 0)), Selector: dgst("s0")}}, - }) - k2 = testCacheKeyWithDeps(dgst("bar"), 0, [][]CacheKeyWithSelector{ - {{CacheKey: expKey(NewCacheKey("f0", 0))}}, - {{CacheKey: expKey(k1)}}, - }) - v = idx.LoadOrStore(k2, e3) - require.Equal(t, v, e2) - - // reload with only f0_ still matches - k1 = testCacheKeyWithDeps(dgst("foo"), 1, [][]CacheKeyWithSelector{ - { - {CacheKey: expKey(NewCacheKey("f0_", 0))}, - }, - {{CacheKey: expKey(NewCacheKey("s0", 0)), Selector: dgst("s0")}}, - }) - k2 = testCacheKeyWithDeps(dgst("bar"), 0, [][]CacheKeyWithSelector{ - {{CacheKey: expKey(NewCacheKey("f0", 0))}}, - {{CacheKey: expKey(k1)}}, - }) - v = idx.LoadOrStore(k2, e3) - require.Equal(t, v, e2) - - idx.Release(e1) - idx.Release(e2) - checkEmpty(t, idx) -} diff --git a/vendor/github.com/moby/buildkit/solver/internal/pipe/pipe.go b/vendor/github.com/moby/buildkit/solver/internal/pipe/pipe.go deleted file mode 100644 index e61a6b3465bb..000000000000 --- a/vendor/github.com/moby/buildkit/solver/internal/pipe/pipe.go +++ /dev/null @@ -1,197 +0,0 @@ -package pipe - -import ( - "context" - "sync" - "sync/atomic" - - "github.com/pkg/errors" -) - -type channel struct { - OnSendCompletion func() - value atomic.Value - lastValue interface{} -} - -func (c *channel) Send(v interface{}) { - c.value.Store(v) - if c.OnSendCompletion != nil { - c.OnSendCompletion() - } -} - -func (c *channel) Receive() (interface{}, bool) { - v := c.value.Load() - if c.lastValue == v { - return nil, false - } - c.lastValue = v - return v, true -} - -type Pipe struct { - Sender Sender - Receiver Receiver - OnReceiveCompletion func() - OnSendCompletion func() -} - -type Request struct { - Payload interface{} - Canceled bool -} - -type Sender interface { - Request() Request - Update(v interface{}) - Finalize(v interface{}, err error) - Status() Status -} - -type Receiver interface { - Receive() bool - Cancel() - Status() Status - Request() interface{} -} - -type Status struct { - Canceled bool - Completed bool - Err error - Value interface{} -} - -func NewWithFunction(f func(context.Context) (interface{}, error)) (*Pipe, func()) { - p := New(Request{}) - - ctx, cancel := context.WithCancel(context.TODO()) - - p.OnReceiveCompletion = func() { - if req := p.Sender.Request(); req.Canceled { - cancel() - } - } - - return p, func() { - res, err := f(ctx) - if err != nil { - p.Sender.Finalize(nil, err) - return - } - p.Sender.Finalize(res, nil) - } -} - -func New(req Request) *Pipe { - cancelCh := &channel{} - roundTripCh := &channel{} - pw := &sender{ - req: req, - recvChannel: cancelCh, - sendChannel: roundTripCh, - } - pr := &receiver{ - req: req, - recvChannel: roundTripCh, - sendChannel: cancelCh, - } - - p := &Pipe{ - Sender: pw, - Receiver: pr, - } - - cancelCh.OnSendCompletion = func() { - v, ok := cancelCh.Receive() - if ok { - pw.setRequest(v.(Request)) - } - if p.OnReceiveCompletion != nil { - p.OnReceiveCompletion() - } - } - - roundTripCh.OnSendCompletion = func() { - if p.OnSendCompletion != nil { - p.OnSendCompletion() - } - } - - return p -} - -type sender struct { - status Status - req Request - recvChannel *channel - sendChannel *channel - mu sync.Mutex -} - -func (pw *sender) Status() Status { - return pw.status -} - -func (pw *sender) Request() Request { - pw.mu.Lock() - defer pw.mu.Unlock() - return pw.req -} - -func (pw *sender) setRequest(req Request) { - pw.mu.Lock() - defer pw.mu.Unlock() - pw.req = req -} - -func (pw *sender) Update(v interface{}) { - pw.status.Value = v - pw.sendChannel.Send(pw.status) -} - -func (pw *sender) Finalize(v interface{}, err error) { - if v != nil { - pw.status.Value = v - } - pw.status.Err = err - pw.status.Completed = true - if errors.Cause(err) == context.Canceled && pw.req.Canceled { - pw.status.Canceled = true - } - pw.sendChannel.Send(pw.status) -} - -type receiver struct { - status Status - req Request - recvChannel *channel - sendChannel *channel -} - -func (pr *receiver) Request() interface{} { - return pr.req.Payload -} - -func (pr *receiver) Receive() bool { - v, ok := pr.recvChannel.Receive() - if !ok { - return false - } - pr.status = v.(Status) - return true -} - -func (pr *receiver) Cancel() { - req := pr.req - if req.Canceled { - return - } - req.Canceled = true - pr.sendChannel.Send(req) -} - -func (pr *receiver) Status() Status { - return pr.status -} diff --git a/vendor/github.com/moby/buildkit/solver/internal/pipe/pipe_test.go b/vendor/github.com/moby/buildkit/solver/internal/pipe/pipe_test.go deleted file mode 100644 index 54b59af35066..000000000000 --- a/vendor/github.com/moby/buildkit/solver/internal/pipe/pipe_test.go +++ /dev/null @@ -1,92 +0,0 @@ -package pipe - -import ( - "context" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestPipe(t *testing.T) { - t.Parallel() - - runCh := make(chan struct{}) - f := func(ctx context.Context) (interface{}, error) { - select { - case <-ctx.Done(): - return nil, ctx.Err() - case <-runCh: - return "res0", nil - } - } - - waitSignal := make(chan struct{}, 10) - signalled := 0 - signal := func() { - signalled++ - waitSignal <- struct{}{} - } - - p, start := NewWithFunction(f) - p.OnSendCompletion = signal - go start() - require.Equal(t, false, p.Receiver.Receive()) - - st := p.Receiver.Status() - require.Equal(t, st.Completed, false) - require.Equal(t, st.Canceled, false) - require.Nil(t, st.Value) - require.Equal(t, signalled, 0) - - close(runCh) - <-waitSignal - - p.Receiver.Receive() - st = p.Receiver.Status() - require.Equal(t, st.Completed, true) - require.Equal(t, st.Canceled, false) - require.NoError(t, st.Err) - require.Equal(t, st.Value.(string), "res0") -} - -func TestPipeCancel(t *testing.T) { - t.Parallel() - - runCh := make(chan struct{}) - f := func(ctx context.Context) (interface{}, error) { - select { - case <-ctx.Done(): - return nil, ctx.Err() - case <-runCh: - return "res0", nil - } - } - - waitSignal := make(chan struct{}, 10) - signalled := 0 - signal := func() { - signalled++ - waitSignal <- struct{}{} - } - - p, start := NewWithFunction(f) - p.OnSendCompletion = signal - go start() - p.Receiver.Receive() - - st := p.Receiver.Status() - require.Equal(t, st.Completed, false) - require.Equal(t, st.Canceled, false) - require.Nil(t, st.Value) - require.Equal(t, signalled, 0) - - p.Receiver.Cancel() - <-waitSignal - - p.Receiver.Receive() - st = p.Receiver.Status() - require.Equal(t, st.Completed, true) - require.Equal(t, st.Canceled, true) - require.Error(t, st.Err) - require.Equal(t, st.Err, context.Canceled) -} diff --git a/vendor/github.com/moby/buildkit/solver/jobs.go b/vendor/github.com/moby/buildkit/solver/jobs.go deleted file mode 100644 index 371f43199bf0..000000000000 --- a/vendor/github.com/moby/buildkit/solver/jobs.go +++ /dev/null @@ -1,802 +0,0 @@ -package solver - -import ( - "context" - "fmt" - "strings" - "sync" - "time" - - "github.com/moby/buildkit/client" - "github.com/moby/buildkit/session" - "github.com/moby/buildkit/util/flightcontrol" - "github.com/moby/buildkit/util/progress" - "github.com/moby/buildkit/util/tracing" - digest "github.com/opencontainers/go-digest" - opentracing "github.com/opentracing/opentracing-go" - "github.com/pkg/errors" -) - -// ResolveOpFunc finds an Op implementation for a Vertex -type ResolveOpFunc func(Vertex, Builder) (Op, error) - -type Builder interface { - Build(ctx context.Context, e Edge) (CachedResult, error) - Context(ctx context.Context) context.Context - EachValue(ctx context.Context, key string, fn func(interface{}) error) error -} - -// Solver provides a shared graph of all the vertexes currently being -// processed. Every vertex that is being solved needs to be loaded into job -// first. Vertex operations are invoked and progress tracking happens through -// jobs. -type Solver struct { - mu sync.RWMutex - jobs map[string]*Job - actives map[digest.Digest]*state - opts SolverOpt - - updateCond *sync.Cond - s *scheduler - index *edgeIndex -} - -type state struct { - jobs map[*Job]struct{} - parents map[digest.Digest]struct{} - childVtx map[digest.Digest]struct{} - - mpw *progress.MultiWriter - allPw map[progress.Writer]struct{} - mspan *tracing.MultiSpan - allSpan map[opentracing.Span]struct{} - - vtx Vertex - clientVertex client.Vertex - - mu sync.Mutex - op *sharedOp - edges map[Index]*edge - opts SolverOpt - index *edgeIndex - - cache map[string]CacheManager - mainCache CacheManager - solver *Solver -} - -func (s *state) getSessionID() string { - // TODO: connect with sessionmanager to avoid getting dropped sessions - s.mu.Lock() - for j := range s.jobs { - if j.SessionID != "" { - s.mu.Unlock() - return j.SessionID - } - } - parents := map[digest.Digest]struct{}{} - for p := range s.parents { - parents[p] = struct{}{} - } - s.mu.Unlock() - - for p := range parents { - s.solver.mu.Lock() - pst, ok := s.solver.actives[p] - s.solver.mu.Unlock() - if ok { - if sessionID := pst.getSessionID(); sessionID != "" { - return sessionID - } - } - } - return "" -} - -func (s *state) builder() *subBuilder { - return &subBuilder{state: s} -} - -func (s *state) getEdge(index Index) *edge { - s.mu.Lock() - defer s.mu.Unlock() - if e, ok := s.edges[index]; ok { - return e - } - - if s.op == nil { - s.op = newSharedOp(s.opts.ResolveOpFunc, s.opts.DefaultCache, s) - } - - e := newEdge(Edge{Index: index, Vertex: s.vtx}, s.op, s.index) - s.edges[index] = e - return e -} - -func (s *state) setEdge(index Index, newEdge *edge) { - s.mu.Lock() - defer s.mu.Unlock() - e, ok := s.edges[index] - if ok { - if e == newEdge { - return - } - e.release() - } - - newEdge.incrementReferenceCount() - s.edges[index] = newEdge -} - -func (s *state) combinedCacheManager() CacheManager { - s.mu.Lock() - cms := make([]CacheManager, 0, len(s.cache)+1) - cms = append(cms, s.mainCache) - for _, cm := range s.cache { - cms = append(cms, cm) - } - s.mu.Unlock() - - if len(cms) == 1 { - return s.mainCache - } - - return newCombinedCacheManager(cms, s.mainCache) -} - -func (s *state) Release() { - for _, e := range s.edges { - e.release() - } - if s.op != nil { - s.op.release() - } -} - -type subBuilder struct { - *state - mu sync.Mutex - exporters []ExportableCacheKey -} - -func (sb *subBuilder) Build(ctx context.Context, e Edge) (CachedResult, error) { - res, err := sb.solver.subBuild(ctx, e, sb.vtx) - if err != nil { - return nil, err - } - sb.mu.Lock() - sb.exporters = append(sb.exporters, res.CacheKeys()[0]) // all keys already have full export chain - sb.mu.Unlock() - return res, nil -} - -func (sb *subBuilder) Context(ctx context.Context) context.Context { - return opentracing.ContextWithSpan(progress.WithProgress(ctx, sb.mpw), sb.mspan) -} - -func (sb *subBuilder) EachValue(ctx context.Context, key string, fn func(interface{}) error) error { - sb.mu.Lock() - defer sb.mu.Lock() - for j := range sb.jobs { - if err := j.EachValue(ctx, key, fn); err != nil { - return err - } - } - return nil -} - -type Job struct { - list *Solver - pr *progress.MultiReader - pw progress.Writer - span opentracing.Span - values sync.Map - - progressCloser func() - SessionID string -} - -type SolverOpt struct { - ResolveOpFunc ResolveOpFunc - DefaultCache CacheManager -} - -func NewSolver(opts SolverOpt) *Solver { - if opts.DefaultCache == nil { - opts.DefaultCache = NewInMemoryCacheManager() - } - jl := &Solver{ - jobs: make(map[string]*Job), - actives: make(map[digest.Digest]*state), - opts: opts, - index: newEdgeIndex(), - } - jl.s = newScheduler(jl) - jl.updateCond = sync.NewCond(jl.mu.RLocker()) - return jl -} - -func (jl *Solver) setEdge(e Edge, newEdge *edge) { - jl.mu.RLock() - defer jl.mu.RUnlock() - - st, ok := jl.actives[e.Vertex.Digest()] - if !ok { - return - } - - st.setEdge(e.Index, newEdge) -} - -func (jl *Solver) getEdge(e Edge) *edge { - jl.mu.RLock() - defer jl.mu.RUnlock() - - st, ok := jl.actives[e.Vertex.Digest()] - if !ok { - return nil - } - return st.getEdge(e.Index) -} - -func (jl *Solver) subBuild(ctx context.Context, e Edge, parent Vertex) (CachedResult, error) { - v, err := jl.load(e.Vertex, parent, nil) - if err != nil { - return nil, err - } - e.Vertex = v - return jl.s.build(ctx, e) -} - -func (jl *Solver) Close() { - jl.s.Stop() -} - -func (jl *Solver) load(v, parent Vertex, j *Job) (Vertex, error) { - jl.mu.Lock() - defer jl.mu.Unlock() - - cache := map[Vertex]Vertex{} - - return jl.loadUnlocked(v, parent, j, cache) -} - -func (jl *Solver) loadUnlocked(v, parent Vertex, j *Job, cache map[Vertex]Vertex) (Vertex, error) { - if v, ok := cache[v]; ok { - return v, nil - } - origVtx := v - - inputs := make([]Edge, len(v.Inputs())) - for i, e := range v.Inputs() { - v, err := jl.loadUnlocked(e.Vertex, parent, j, cache) - if err != nil { - return nil, err - } - inputs[i] = Edge{Index: e.Index, Vertex: v} - } - - dgst := v.Digest() - - dgstWithoutCache := digest.FromBytes([]byte(fmt.Sprintf("%s-ignorecache", dgst))) - - // if same vertex is already loaded without cache just use that - st, ok := jl.actives[dgstWithoutCache] - - if !ok { - st, ok = jl.actives[dgst] - - // !ignorecache merges with ignorecache but ignorecache doesn't merge with !ignorecache - if ok && !st.vtx.Options().IgnoreCache && v.Options().IgnoreCache { - dgst = dgstWithoutCache - } - - v = &vertexWithCacheOptions{ - Vertex: v, - dgst: dgst, - inputs: inputs, - } - - st, ok = jl.actives[dgst] - } - - if !ok { - st = &state{ - opts: jl.opts, - jobs: map[*Job]struct{}{}, - parents: map[digest.Digest]struct{}{}, - childVtx: map[digest.Digest]struct{}{}, - allPw: map[progress.Writer]struct{}{}, - allSpan: map[opentracing.Span]struct{}{}, - mpw: progress.NewMultiWriter(progress.WithMetadata("vertex", dgst)), - mspan: tracing.NewMultiSpan(), - vtx: v, - clientVertex: initClientVertex(v), - edges: map[Index]*edge{}, - index: jl.index, - mainCache: jl.opts.DefaultCache, - cache: map[string]CacheManager{}, - solver: jl, - } - jl.actives[dgst] = st - } - - st.mu.Lock() - for _, cache := range v.Options().CacheSources { - if cache.ID() != st.mainCache.ID() { - if _, ok := st.cache[cache.ID()]; !ok { - st.cache[cache.ID()] = cache - } - } - } - - if j != nil { - if _, ok := st.jobs[j]; !ok { - st.jobs[j] = struct{}{} - } - } - st.mu.Unlock() - - if parent != nil { - if _, ok := st.parents[parent.Digest()]; !ok { - st.parents[parent.Digest()] = struct{}{} - parentState, ok := jl.actives[parent.Digest()] - if !ok { - return nil, errors.Errorf("inactive parent %s", parent.Digest()) - } - parentState.childVtx[dgst] = struct{}{} - - for id, c := range parentState.cache { - st.cache[id] = c - } - } - } - - jl.connectProgressFromState(st, st) - cache[origVtx] = v - return v, nil -} - -func (jl *Solver) connectProgressFromState(target, src *state) { - for j := range src.jobs { - if _, ok := target.allPw[j.pw]; !ok { - target.mpw.Add(j.pw) - target.allPw[j.pw] = struct{}{} - j.pw.Write(target.clientVertex.Digest.String(), target.clientVertex) - target.mspan.Add(j.span) - target.allSpan[j.span] = struct{}{} - } - } - for p := range src.parents { - jl.connectProgressFromState(target, jl.actives[p]) - } -} - -func (jl *Solver) NewJob(id string) (*Job, error) { - jl.mu.Lock() - defer jl.mu.Unlock() - - if _, ok := jl.jobs[id]; ok { - return nil, errors.Errorf("job ID %s exists", id) - } - - pr, ctx, progressCloser := progress.NewContext(context.Background()) - pw, _, _ := progress.FromContext(ctx) // TODO: expose progress.Pipe() - - j := &Job{ - list: jl, - pr: progress.NewMultiReader(pr), - pw: pw, - progressCloser: progressCloser, - span: (&opentracing.NoopTracer{}).StartSpan(""), - } - jl.jobs[id] = j - - jl.updateCond.Broadcast() - - return j, nil -} - -func (jl *Solver) Get(id string) (*Job, error) { - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) - defer cancel() - - go func() { - <-ctx.Done() - jl.updateCond.Broadcast() - }() - - jl.mu.RLock() - defer jl.mu.RUnlock() - for { - select { - case <-ctx.Done(): - return nil, errors.Errorf("no such job %s", id) - default: - } - j, ok := jl.jobs[id] - if !ok { - jl.updateCond.Wait() - continue - } - return j, nil - } -} - -// called with solver lock -func (jl *Solver) deleteIfUnreferenced(k digest.Digest, st *state) { - if len(st.jobs) == 0 && len(st.parents) == 0 { - for chKey := range st.childVtx { - chState := jl.actives[chKey] - delete(chState.parents, k) - jl.deleteIfUnreferenced(chKey, chState) - } - st.Release() - delete(jl.actives, k) - } -} - -func (j *Job) Build(ctx context.Context, e Edge) (CachedResult, error) { - if span := opentracing.SpanFromContext(ctx); span != nil { - j.span = span - } - - v, err := j.list.load(e.Vertex, nil, j) - if err != nil { - return nil, err - } - e.Vertex = v - return j.list.s.build(ctx, e) -} - -func (j *Job) Discard() error { - defer j.progressCloser() - - j.list.mu.Lock() - defer j.list.mu.Unlock() - - j.pw.Close() - - for k, st := range j.list.actives { - st.mu.Lock() - if _, ok := st.jobs[j]; ok { - delete(st.jobs, j) - j.list.deleteIfUnreferenced(k, st) - } - if _, ok := st.allPw[j.pw]; ok { - delete(st.allPw, j.pw) - } - if _, ok := st.allSpan[j.span]; ok { - delete(st.allSpan, j.span) - } - st.mu.Unlock() - } - return nil -} - -func (j *Job) Context(ctx context.Context) context.Context { - return progress.WithProgress(ctx, j.pw) -} - -func (j *Job) SetValue(key string, v interface{}) { - j.values.Store(key, v) -} - -func (j *Job) EachValue(ctx context.Context, key string, fn func(interface{}) error) error { - v, ok := j.values.Load(key) - if ok { - return fn(v) - } - return nil -} - -type cacheMapResp struct { - *CacheMap - complete bool -} - -type activeOp interface { - CacheMap(context.Context, int) (*cacheMapResp, error) - LoadCache(ctx context.Context, rec *CacheRecord) (Result, error) - Exec(ctx context.Context, inputs []Result) (outputs []Result, exporters []ExportableCacheKey, err error) - IgnoreCache() bool - Cache() CacheManager - CalcSlowCache(context.Context, Index, ResultBasedCacheFunc, Result) (digest.Digest, error) -} - -func newSharedOp(resolver ResolveOpFunc, cacheManager CacheManager, st *state) *sharedOp { - so := &sharedOp{ - resolver: resolver, - st: st, - slowCacheRes: map[Index]digest.Digest{}, - slowCacheErr: map[Index]error{}, - } - return so -} - -type execRes struct { - execRes []*SharedResult - execExporters []ExportableCacheKey -} - -type sharedOp struct { - resolver ResolveOpFunc - st *state - g flightcontrol.Group - - opOnce sync.Once - op Op - subBuilder *subBuilder - err error - - execRes *execRes - execErr error - - cacheRes []*CacheMap - cacheDone bool - cacheErr error - - slowMu sync.Mutex - slowCacheRes map[Index]digest.Digest - slowCacheErr map[Index]error -} - -func (s *sharedOp) IgnoreCache() bool { - return s.st.vtx.Options().IgnoreCache -} - -func (s *sharedOp) Cache() CacheManager { - return s.st.combinedCacheManager() -} - -func (s *sharedOp) LoadCache(ctx context.Context, rec *CacheRecord) (Result, error) { - ctx = opentracing.ContextWithSpan(progress.WithProgress(ctx, s.st.mpw), s.st.mspan) - // no cache hit. start evaluating the node - span, ctx := tracing.StartSpan(ctx, "load cache: "+s.st.vtx.Name()) - notifyStarted(ctx, &s.st.clientVertex, true) - res, err := s.Cache().Load(ctx, rec) - tracing.FinishWithError(span, err) - notifyCompleted(ctx, &s.st.clientVertex, err, true) - return res, err -} - -func (s *sharedOp) CalcSlowCache(ctx context.Context, index Index, f ResultBasedCacheFunc, res Result) (digest.Digest, error) { - key, err := s.g.Do(ctx, fmt.Sprintf("slow-compute-%d", index), func(ctx context.Context) (interface{}, error) { - s.slowMu.Lock() - // TODO: add helpers for these stored values - if res := s.slowCacheRes[index]; res != "" { - s.slowMu.Unlock() - return res, nil - } - if err := s.slowCacheErr[index]; err != nil { - s.slowMu.Unlock() - return err, nil - } - s.slowMu.Unlock() - ctx = opentracing.ContextWithSpan(progress.WithProgress(ctx, s.st.mpw), s.st.mspan) - key, err := f(ctx, res) - complete := true - if err != nil { - select { - case <-ctx.Done(): - if strings.Contains(err.Error(), context.Canceled.Error()) { - complete = false - err = errors.Wrap(ctx.Err(), err.Error()) - } - default: - } - } - s.slowMu.Lock() - defer s.slowMu.Unlock() - if complete { - if err == nil { - s.slowCacheRes[index] = key - } - s.slowCacheErr[index] = err - } - return key, err - }) - if err != nil { - ctx = opentracing.ContextWithSpan(progress.WithProgress(ctx, s.st.mpw), s.st.mspan) - notifyStarted(ctx, &s.st.clientVertex, false) - notifyCompleted(ctx, &s.st.clientVertex, err, false) - return "", err - } - return key.(digest.Digest), nil -} - -func (s *sharedOp) CacheMap(ctx context.Context, index int) (*cacheMapResp, error) { - op, err := s.getOp() - if err != nil { - return nil, err - } - res, err := s.g.Do(ctx, "cachemap", func(ctx context.Context) (ret interface{}, retErr error) { - if s.cacheRes != nil && s.cacheDone || index < len(s.cacheRes) { - return s.cacheRes, nil - } - if s.cacheErr != nil { - return nil, s.cacheErr - } - ctx = opentracing.ContextWithSpan(progress.WithProgress(ctx, s.st.mpw), s.st.mspan) - ctx = session.NewContext(ctx, s.st.getSessionID()) - if len(s.st.vtx.Inputs()) == 0 { - // no cache hit. start evaluating the node - span, ctx := tracing.StartSpan(ctx, "cache request: "+s.st.vtx.Name()) - notifyStarted(ctx, &s.st.clientVertex, false) - defer func() { - tracing.FinishWithError(span, retErr) - notifyCompleted(ctx, &s.st.clientVertex, retErr, false) - }() - } - res, done, err := op.CacheMap(ctx, len(s.cacheRes)) - complete := true - if err != nil { - select { - case <-ctx.Done(): - if strings.Contains(err.Error(), context.Canceled.Error()) { - complete = false - err = errors.Wrap(ctx.Err(), err.Error()) - } - default: - } - } - if complete { - if err == nil { - s.cacheRes = append(s.cacheRes, res) - s.cacheDone = done - } - s.cacheErr = err - } - return s.cacheRes, err - }) - if err != nil { - return nil, err - } - - if len(res.([]*CacheMap)) <= index { - return s.CacheMap(ctx, index) - } - - return &cacheMapResp{CacheMap: res.([]*CacheMap)[index], complete: s.cacheDone}, nil -} - -func (s *sharedOp) Exec(ctx context.Context, inputs []Result) (outputs []Result, exporters []ExportableCacheKey, err error) { - op, err := s.getOp() - if err != nil { - return nil, nil, err - } - res, err := s.g.Do(ctx, "exec", func(ctx context.Context) (ret interface{}, retErr error) { - if s.execRes != nil || s.execErr != nil { - return s.execRes, s.execErr - } - - ctx = opentracing.ContextWithSpan(progress.WithProgress(ctx, s.st.mpw), s.st.mspan) - ctx = session.NewContext(ctx, s.st.getSessionID()) - - // no cache hit. start evaluating the node - span, ctx := tracing.StartSpan(ctx, s.st.vtx.Name()) - notifyStarted(ctx, &s.st.clientVertex, false) - defer func() { - tracing.FinishWithError(span, retErr) - notifyCompleted(ctx, &s.st.clientVertex, retErr, false) - }() - - res, err := op.Exec(ctx, inputs) - complete := true - if err != nil { - select { - case <-ctx.Done(): - if strings.Contains(err.Error(), context.Canceled.Error()) { - complete = false - err = errors.Wrap(ctx.Err(), err.Error()) - } - default: - } - } - if complete { - if res != nil { - var subExporters []ExportableCacheKey - s.subBuilder.mu.Lock() - if len(s.subBuilder.exporters) > 0 { - subExporters = append(subExporters, s.subBuilder.exporters...) - } - s.subBuilder.mu.Unlock() - - s.execRes = &execRes{execRes: wrapShared(res), execExporters: subExporters} - } - s.execErr = err - } - return s.execRes, err - }) - if err != nil { - return nil, nil, err - } - r := res.(*execRes) - return unwrapShared(r.execRes), r.execExporters, nil -} - -func (s *sharedOp) getOp() (Op, error) { - s.opOnce.Do(func() { - s.subBuilder = s.st.builder() - s.op, s.err = s.resolver(s.st.vtx, s.subBuilder) - }) - if s.err != nil { - return nil, s.err - } - return s.op, nil -} - -func (s *sharedOp) release() { - if s.execRes != nil { - for _, r := range s.execRes.execRes { - go r.Release(context.TODO()) - } - } -} - -func initClientVertex(v Vertex) client.Vertex { - inputDigests := make([]digest.Digest, 0, len(v.Inputs())) - for _, inp := range v.Inputs() { - inputDigests = append(inputDigests, inp.Vertex.Digest()) - } - return client.Vertex{ - Inputs: inputDigests, - Name: v.Name(), - Digest: v.Digest(), - } -} - -func wrapShared(inp []Result) []*SharedResult { - out := make([]*SharedResult, len(inp)) - for i, r := range inp { - out[i] = NewSharedResult(r) - } - return out -} - -func unwrapShared(inp []*SharedResult) []Result { - out := make([]Result, len(inp)) - for i, r := range inp { - out[i] = r.Clone() - } - return out -} - -type vertexWithCacheOptions struct { - Vertex - inputs []Edge - dgst digest.Digest -} - -func (v *vertexWithCacheOptions) Digest() digest.Digest { - return v.dgst -} - -func (v *vertexWithCacheOptions) Inputs() []Edge { - return v.inputs -} - -func notifyStarted(ctx context.Context, v *client.Vertex, cached bool) { - pw, _, _ := progress.FromContext(ctx) - defer pw.Close() - now := time.Now() - v.Started = &now - v.Completed = nil - v.Cached = cached - pw.Write(v.Digest.String(), *v) -} - -func notifyCompleted(ctx context.Context, v *client.Vertex, err error, cached bool) { - pw, _, _ := progress.FromContext(ctx) - defer pw.Close() - now := time.Now() - if v.Started == nil { - v.Started = &now - } - v.Completed = &now - v.Cached = cached - if err != nil { - v.Error = err.Error() - } - pw.Write(v.Digest.String(), *v) -} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go b/vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go deleted file mode 100644 index 5894417deb00..000000000000 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go +++ /dev/null @@ -1,213 +0,0 @@ -package llbsolver - -import ( - "context" - "fmt" - "io" - "strings" - "sync" - - "github.com/containerd/containerd/platforms" - "github.com/docker/distribution/reference" - "github.com/moby/buildkit/cache" - "github.com/moby/buildkit/cache/remotecache" - "github.com/moby/buildkit/executor" - "github.com/moby/buildkit/frontend" - gw "github.com/moby/buildkit/frontend/gateway/client" - "github.com/moby/buildkit/solver" - "github.com/moby/buildkit/util/tracing" - "github.com/moby/buildkit/worker" - digest "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -type llbBridge struct { - builder solver.Builder - frontends map[string]frontend.Frontend - resolveWorker func() (worker.Worker, error) - resolveCacheImporter remotecache.ResolveCacheImporterFunc - cms map[string]solver.CacheManager - cmsMu sync.Mutex - platforms []specs.Platform -} - -func (b *llbBridge) Solve(ctx context.Context, req frontend.SolveRequest) (res *frontend.Result, err error) { - w, err := b.resolveWorker() - if err != nil { - return nil, err - } - var cms []solver.CacheManager - for _, ref := range req.ImportCacheRefs { - b.cmsMu.Lock() - var cm solver.CacheManager - if prevCm, ok := b.cms[ref]; !ok { - r, err := reference.ParseNormalizedNamed(ref) - if err != nil { - return nil, err - } - ref = reference.TagNameOnly(r).String() - func(ref string) { - cm = newLazyCacheManager(ref, func() (solver.CacheManager, error) { - var cmNew solver.CacheManager - if err := inVertexContext(b.builder.Context(ctx), "importing cache manifest from "+ref, "", func(ctx context.Context) error { - if b.resolveCacheImporter == nil { - return errors.New("no cache importer is available") - } - typ := "" // TODO: support non-registry type - ci, desc, err := b.resolveCacheImporter(ctx, typ, ref) - if err != nil { - return err - } - cmNew, err = ci.Resolve(ctx, desc, ref, w) - return err - }); err != nil { - return nil, err - } - return cmNew, nil - }) - }(ref) - b.cms[ref] = cm - } else { - cm = prevCm - } - cms = append(cms, cm) - b.cmsMu.Unlock() - } - - if req.Definition != nil && req.Definition.Def != nil && req.Frontend != "" { - return nil, errors.New("cannot solve with both Definition and Frontend specified") - } - - if req.Definition != nil && req.Definition.Def != nil { - ent, err := loadEntitlements(b.builder) - if err != nil { - return nil, err - } - - edge, err := Load(req.Definition, ValidateEntitlements(ent), WithCacheSources(cms), RuntimePlatforms(b.platforms), WithValidateCaps()) - if err != nil { - return nil, err - } - ref, err := b.builder.Build(ctx, edge) - if err != nil { - return nil, err - } - - res = &frontend.Result{Ref: ref} - } else if req.Frontend != "" { - f, ok := b.frontends[req.Frontend] - if !ok { - return nil, errors.Errorf("invalid frontend: %s", req.Frontend) - } - res, err = f.Solve(ctx, b, req.FrontendOpt) - if err != nil { - return nil, err - } - } else { - return &frontend.Result{}, nil - } - - if err := res.EachRef(func(r solver.CachedResult) error { - wr, ok := r.Sys().(*worker.WorkerRef) - if !ok { - return errors.Errorf("invalid reference for exporting: %T", r.Sys()) - } - if wr.ImmutableRef != nil { - if err := wr.ImmutableRef.Finalize(ctx, false); err != nil { - return err - } - } - return nil - }); err != nil { - return nil, err - } - return -} - -func (s *llbBridge) Exec(ctx context.Context, meta executor.Meta, root cache.ImmutableRef, stdin io.ReadCloser, stdout, stderr io.WriteCloser) (err error) { - w, err := s.resolveWorker() - if err != nil { - return err - } - span, ctx := tracing.StartSpan(ctx, strings.Join(meta.Args, " ")) - err = w.Exec(ctx, meta, root, stdin, stdout, stderr) - tracing.FinishWithError(span, err) - return err -} - -func (s *llbBridge) ResolveImageConfig(ctx context.Context, ref string, opt gw.ResolveImageConfigOpt) (dgst digest.Digest, config []byte, err error) { - w, err := s.resolveWorker() - if err != nil { - return "", nil, err - } - if opt.LogName == "" { - opt.LogName = fmt.Sprintf("resolve image config for %s", ref) - } - id := ref // make a deterministic ID for avoiding duplicates - if platform := opt.Platform; platform == nil { - id += platforms.Format(platforms.DefaultSpec()) - } else { - id += platforms.Format(*platform) - } - err = inVertexContext(s.builder.Context(ctx), opt.LogName, id, func(ctx context.Context) error { - dgst, config, err = w.ResolveImageConfig(ctx, ref, opt) - return err - }) - return dgst, config, err -} - -type lazyCacheManager struct { - id string - main solver.CacheManager - - waitCh chan struct{} - err error -} - -func (lcm *lazyCacheManager) ID() string { - return lcm.id -} -func (lcm *lazyCacheManager) Query(inp []solver.CacheKeyWithSelector, inputIndex solver.Index, dgst digest.Digest, outputIndex solver.Index) ([]*solver.CacheKey, error) { - if err := lcm.wait(); err != nil { - return nil, err - } - return lcm.main.Query(inp, inputIndex, dgst, outputIndex) -} -func (lcm *lazyCacheManager) Records(ck *solver.CacheKey) ([]*solver.CacheRecord, error) { - if err := lcm.wait(); err != nil { - return nil, err - } - return lcm.main.Records(ck) -} -func (lcm *lazyCacheManager) Load(ctx context.Context, rec *solver.CacheRecord) (solver.Result, error) { - if err := lcm.wait(); err != nil { - return nil, err - } - return lcm.main.Load(ctx, rec) -} -func (lcm *lazyCacheManager) Save(key *solver.CacheKey, s solver.Result) (*solver.ExportableCacheKey, error) { - if err := lcm.wait(); err != nil { - return nil, err - } - return lcm.main.Save(key, s) -} - -func (lcm *lazyCacheManager) wait() error { - <-lcm.waitCh - return lcm.err -} - -func newLazyCacheManager(id string, fn func() (solver.CacheManager, error)) solver.CacheManager { - lcm := &lazyCacheManager{id: id, waitCh: make(chan struct{})} - go func() { - defer close(lcm.waitCh) - cm, err := fn() - if err != nil { - lcm.err = err - return - } - lcm.main = cm - }() - return lcm -} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/build.go b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/build.go deleted file mode 100644 index 4b0300497b4f..000000000000 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/build.go +++ /dev/null @@ -1,132 +0,0 @@ -package ops - -import ( - "context" - "encoding/json" - "os" - - "github.com/containerd/continuity/fs" - "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/frontend" - "github.com/moby/buildkit/snapshot" - "github.com/moby/buildkit/solver" - "github.com/moby/buildkit/solver/pb" - "github.com/moby/buildkit/worker" - digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -const buildCacheType = "buildkit.build.v0" - -type buildOp struct { - op *pb.BuildOp - b frontend.FrontendLLBBridge - v solver.Vertex -} - -func NewBuildOp(v solver.Vertex, op *pb.Op_Build, b frontend.FrontendLLBBridge, _ worker.Worker) (solver.Op, error) { - return &buildOp{ - op: op.Build, - b: b, - v: v, - }, nil -} - -func (b *buildOp) CacheMap(ctx context.Context, index int) (*solver.CacheMap, bool, error) { - dt, err := json.Marshal(struct { - Type string - Exec *pb.BuildOp - }{ - Type: buildCacheType, - Exec: b.op, - }) - if err != nil { - return nil, false, err - } - - return &solver.CacheMap{ - Digest: digest.FromBytes(dt), - Deps: make([]struct { - Selector digest.Digest - ComputeDigestFunc solver.ResultBasedCacheFunc - }, len(b.v.Inputs())), - }, true, nil -} - -func (b *buildOp) Exec(ctx context.Context, inputs []solver.Result) (outputs []solver.Result, retErr error) { - if b.op.Builder != pb.LLBBuilder { - return nil, errors.Errorf("only LLB builder is currently allowed") - } - - builderInputs := b.op.Inputs - llbDef, ok := builderInputs[pb.LLBDefinitionInput] - if !ok { - return nil, errors.Errorf("no llb definition input %s found", pb.LLBDefinitionInput) - } - - i := int(llbDef.Input) - if i >= len(inputs) { - return nil, errors.Errorf("invalid index %v", i) // TODO: this should be validated before - } - inp := inputs[i] - - ref, ok := inp.Sys().(*worker.WorkerRef) - if !ok { - return nil, errors.Errorf("invalid reference for build %T", inp.Sys()) - } - - mount, err := ref.ImmutableRef.Mount(ctx, true) - if err != nil { - return nil, err - } - - lm := snapshot.LocalMounter(mount) - - root, err := lm.Mount() - if err != nil { - return nil, err - } - - defer func() { - if retErr != nil && lm != nil { - lm.Unmount() - } - }() - - fn := pb.LLBDefaultDefinitionFile - if override, ok := b.op.Attrs[pb.AttrLLBDefinitionFilename]; ok { - fn = override - } - - newfn, err := fs.RootPath(root, fn) - if err != nil { - return nil, errors.Wrapf(err, "working dir %s points to invalid target", fn) - } - - f, err := os.Open(newfn) - if err != nil { - return nil, errors.Wrapf(err, "failed to open %s", newfn) - } - - def, err := llb.ReadFrom(f) - if err != nil { - f.Close() - return nil, err - } - f.Close() - lm.Unmount() - lm = nil - - newRes, err := b.b.Solve(ctx, frontend.SolveRequest{ - Definition: def.ToPB(), - }) - if err != nil { - return nil, err - } - - for _, r := range newRes.Refs { - r.Release(context.TODO()) - } - - return []solver.Result{newRes.Ref}, err -} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go deleted file mode 100644 index 9eb9968a14ce..000000000000 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go +++ /dev/null @@ -1,796 +0,0 @@ -package ops - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io/ioutil" - "net" - "os" - "path" - "path/filepath" - "runtime" - "sort" - "strings" - "sync" - "time" - - "github.com/containerd/containerd/mount" - "github.com/docker/docker/pkg/locker" - "github.com/moby/buildkit/cache" - "github.com/moby/buildkit/cache/metadata" - "github.com/moby/buildkit/client" - "github.com/moby/buildkit/executor" - "github.com/moby/buildkit/identity" - "github.com/moby/buildkit/session" - "github.com/moby/buildkit/session/secrets" - "github.com/moby/buildkit/session/sshforward" - "github.com/moby/buildkit/snapshot" - "github.com/moby/buildkit/solver" - "github.com/moby/buildkit/solver/llbsolver" - "github.com/moby/buildkit/solver/pb" - "github.com/moby/buildkit/util/progress/logs" - utilsystem "github.com/moby/buildkit/util/system" - "github.com/moby/buildkit/worker" - digest "github.com/opencontainers/go-digest" - "github.com/opencontainers/runc/libcontainer/system" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - bolt "go.etcd.io/bbolt" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -const execCacheType = "buildkit.exec.v0" - -type execOp struct { - op *pb.ExecOp - cm cache.Manager - sm *session.Manager - md *metadata.Store - exec executor.Executor - w worker.Worker - numInputs int - - cacheMounts map[string]*cacheRefShare -} - -func NewExecOp(v solver.Vertex, op *pb.Op_Exec, cm cache.Manager, sm *session.Manager, md *metadata.Store, exec executor.Executor, w worker.Worker) (solver.Op, error) { - return &execOp{ - op: op.Exec, - cm: cm, - sm: sm, - md: md, - exec: exec, - numInputs: len(v.Inputs()), - w: w, - cacheMounts: map[string]*cacheRefShare{}, - }, nil -} - -func cloneExecOp(old *pb.ExecOp) pb.ExecOp { - n := *old - meta := *n.Meta - meta.ExtraHosts = nil - for i := range n.Meta.ExtraHosts { - h := *n.Meta.ExtraHosts[i] - meta.ExtraHosts = append(meta.ExtraHosts, &h) - } - n.Meta = &meta - n.Mounts = nil - for i := range n.Mounts { - m := *n.Mounts[i] - n.Mounts = append(n.Mounts, &m) - } - return n -} - -func (e *execOp) CacheMap(ctx context.Context, index int) (*solver.CacheMap, bool, error) { - op := cloneExecOp(e.op) - for i := range op.Meta.ExtraHosts { - h := op.Meta.ExtraHosts[i] - h.IP = "" - op.Meta.ExtraHosts[i] = h - } - for i := range op.Mounts { - op.Mounts[i].Selector = "" - } - op.Meta.ProxyEnv = nil - - dt, err := json.Marshal(struct { - Type string - Exec *pb.ExecOp - OS string - Arch string - }{ - Type: execCacheType, - Exec: &op, - OS: runtime.GOOS, - Arch: runtime.GOARCH, - }) - if err != nil { - return nil, false, err - } - - cm := &solver.CacheMap{ - Digest: digest.FromBytes(dt), - Deps: make([]struct { - Selector digest.Digest - ComputeDigestFunc solver.ResultBasedCacheFunc - }, e.numInputs), - } - - deps, err := e.getMountDeps() - if err != nil { - return nil, false, err - } - - for i, dep := range deps { - if len(dep.Selectors) != 0 { - dgsts := make([][]byte, 0, len(dep.Selectors)) - for _, p := range dep.Selectors { - dgsts = append(dgsts, []byte(p)) - } - cm.Deps[i].Selector = digest.FromBytes(bytes.Join(dgsts, []byte{0})) - } - if !dep.NoContentBasedHash { - cm.Deps[i].ComputeDigestFunc = llbsolver.NewContentHashFunc(dedupePaths(dep.Selectors)) - } - } - - return cm, true, nil -} - -func dedupePaths(inp []string) []string { - old := make(map[string]struct{}, len(inp)) - for _, p := range inp { - old[p] = struct{}{} - } - paths := make([]string, 0, len(old)) - for p1 := range old { - var skip bool - for p2 := range old { - if p1 != p2 && strings.HasPrefix(p1, p2) { - skip = true - break - } - } - if !skip { - paths = append(paths, p1) - } - } - sort.Slice(paths, func(i, j int) bool { - return paths[i] < paths[j] - }) - return paths -} - -type dep struct { - Selectors []string - NoContentBasedHash bool -} - -func (e *execOp) getMountDeps() ([]dep, error) { - deps := make([]dep, e.numInputs) - for _, m := range e.op.Mounts { - if m.Input == pb.Empty { - continue - } - if int(m.Input) >= len(deps) { - return nil, errors.Errorf("invalid mountinput %v", m) - } - - sel := m.Selector - if sel != "" { - sel = path.Join("/", sel) - deps[m.Input].Selectors = append(deps[m.Input].Selectors, sel) - } - - if !m.Readonly || m.Dest == pb.RootMount { // exclude read-only rootfs - deps[m.Input].NoContentBasedHash = true - } - } - return deps, nil -} - -func (e *execOp) getRefCacheDir(ctx context.Context, ref cache.ImmutableRef, id string, m *pb.Mount, sharing pb.CacheSharingOpt) (mref cache.MutableRef, err error) { - - key := "cache-dir:" + id - if ref != nil { - key += ":" + ref.ID() - } - - if ref, ok := e.cacheMounts[key]; ok { - return ref.clone(), nil - } - defer func() { - if err == nil { - share := &cacheRefShare{MutableRef: mref, refs: map[*cacheRef]struct{}{}} - e.cacheMounts[key] = share - mref = share.clone() - } - }() - - switch sharing { - case pb.CacheSharingOpt_SHARED: - return sharedCacheRefs.get(key, func() (cache.MutableRef, error) { - return e.getRefCacheDirNoCache(ctx, key, ref, id, m, false) - }) - case pb.CacheSharingOpt_PRIVATE: - return e.getRefCacheDirNoCache(ctx, key, ref, id, m, false) - case pb.CacheSharingOpt_LOCKED: - return e.getRefCacheDirNoCache(ctx, key, ref, id, m, true) - default: - return nil, errors.Errorf("invalid cache sharing option: %s", sharing.String()) - } - -} - -func (e *execOp) getRefCacheDirNoCache(ctx context.Context, key string, ref cache.ImmutableRef, id string, m *pb.Mount, block bool) (cache.MutableRef, error) { - makeMutable := func(cache.ImmutableRef) (cache.MutableRef, error) { - desc := fmt.Sprintf("cached mount %s from exec %s", m.Dest, strings.Join(e.op.Meta.Args, " ")) - return e.cm.New(ctx, ref, cache.WithRecordType(client.UsageRecordTypeCacheMount), cache.WithDescription(desc), cache.CachePolicyRetain) - } - - cacheRefsLocker.Lock(key) - defer cacheRefsLocker.Unlock(key) - for { - sis, err := e.md.Search(key) - if err != nil { - return nil, err - } - locked := false - for _, si := range sis { - if mRef, err := e.cm.GetMutable(ctx, si.ID()); err == nil { - logrus.Debugf("reusing ref for cache dir: %s", mRef.ID()) - return mRef, nil - } else if errors.Cause(err) == cache.ErrLocked { - locked = true - } - } - if block && locked { - cacheRefsLocker.Unlock(key) - select { - case <-ctx.Done(): - cacheRefsLocker.Lock(key) - return nil, ctx.Err() - case <-time.After(100 * time.Millisecond): - cacheRefsLocker.Lock(key) - } - } else { - break - } - } - mRef, err := makeMutable(ref) - if err != nil { - return nil, err - } - - si, _ := e.md.Get(mRef.ID()) - v, err := metadata.NewValue(key) - if err != nil { - mRef.Release(context.TODO()) - return nil, err - } - v.Index = key - if err := si.Update(func(b *bolt.Bucket) error { - return si.SetValue(b, key, v) - }); err != nil { - mRef.Release(context.TODO()) - return nil, err - } - return mRef, nil -} - -func (e *execOp) getSSHMountable(ctx context.Context, m *pb.Mount) (cache.Mountable, error) { - sessionID := session.FromContext(ctx) - if sessionID == "" { - return nil, errors.New("could not access local files without session") - } - - timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second) - defer cancel() - - caller, err := e.sm.Get(timeoutCtx, sessionID) - if err != nil { - return nil, err - } - - if err := sshforward.CheckSSHID(ctx, caller, m.SSHOpt.ID); err != nil { - if m.SSHOpt.Optional { - return nil, nil - } - if st, ok := status.FromError(err); ok && st.Code() == codes.Unimplemented { - return nil, errors.Errorf("no SSH key %q forwarded from the client", m.SSHOpt.ID) - } - return nil, err - } - - return &sshMount{mount: m, caller: caller}, nil -} - -type sshMount struct { - mount *pb.Mount - caller session.Caller -} - -func (sm *sshMount) Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) { - return &sshMountInstance{sm: sm}, nil -} - -type sshMountInstance struct { - sm *sshMount - cleanup func() error -} - -func (sm *sshMountInstance) Mount() ([]mount.Mount, error) { - ctx, cancel := context.WithCancel(context.TODO()) - - sock, cleanup, err := sshforward.MountSSHSocket(ctx, sm.sm.caller, sshforward.SocketOpt{ - ID: sm.sm.mount.SSHOpt.ID, - UID: int(sm.sm.mount.SSHOpt.Uid), - GID: int(sm.sm.mount.SSHOpt.Gid), - Mode: int(sm.sm.mount.SSHOpt.Mode), - }) - if err != nil { - cancel() - return nil, err - } - sm.cleanup = func() error { - var err error - if cleanup != nil { - err = cleanup() - } - cancel() - return err - } - - return []mount.Mount{{ - Type: "bind", - Source: sock, - Options: []string{"rbind"}, - }}, nil -} - -func (sm *sshMountInstance) Release() error { - if sm.cleanup != nil { - if err := sm.cleanup(); err != nil { - return err - } - } - return nil -} - -func (e *execOp) getSecretMountable(ctx context.Context, m *pb.Mount) (cache.Mountable, error) { - if m.SecretOpt == nil { - return nil, errors.Errorf("invalid sercet mount options") - } - sopt := *m.SecretOpt - - id := sopt.ID - if id == "" { - return nil, errors.Errorf("secret ID missing from mount options") - } - - sessionID := session.FromContext(ctx) - if sessionID == "" { - return nil, errors.New("could not access local files without session") - } - - timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second) - defer cancel() - - caller, err := e.sm.Get(timeoutCtx, sessionID) - if err != nil { - return nil, err - } - - dt, err := secrets.GetSecret(ctx, caller, id) - if err != nil { - if errors.Cause(err) == secrets.ErrNotFound && m.SecretOpt.Optional { - return nil, nil - } - return nil, err - } - - return &secretMount{mount: m, data: dt}, nil -} - -type secretMount struct { - mount *pb.Mount - data []byte -} - -func (sm *secretMount) Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) { - return &secretMountInstance{sm: sm}, nil -} - -type secretMountInstance struct { - sm *secretMount - root string -} - -func (sm *secretMountInstance) Mount() ([]mount.Mount, error) { - dir, err := ioutil.TempDir("", "buildkit-secrets") - if err != nil { - return nil, errors.Wrap(err, "failed to create temp dir") - } - - if err := os.Chmod(dir, 0711); err != nil { - return nil, err - } - - tmpMount := mount.Mount{ - Type: "tmpfs", - Source: "tmpfs", - Options: []string{"nodev", "nosuid", "noexec", fmt.Sprintf("uid=%d,gid=%d", os.Geteuid(), os.Getegid())}, - } - - if system.RunningInUserNS() { - tmpMount.Options = nil - } - - if err := mount.All([]mount.Mount{tmpMount}, dir); err != nil { - return nil, errors.Wrap(err, "unable to setup secret mount") - } - sm.root = dir - - randID := identity.NewID() - fp := filepath.Join(dir, randID) - if err := ioutil.WriteFile(fp, sm.sm.data, 0600); err != nil { - sm.Release() - return nil, err - } - - if err := os.Chown(fp, int(sm.sm.mount.SecretOpt.Uid), int(sm.sm.mount.SecretOpt.Gid)); err != nil { - return nil, err - } - - if err := os.Chmod(fp, os.FileMode(sm.sm.mount.SecretOpt.Mode)); err != nil { - return nil, err - } - - return []mount.Mount{{ - Type: "bind", - Source: fp, - Options: []string{"ro", "rbind"}, - }}, nil -} - -func (sm *secretMountInstance) Release() error { - if sm.root != "" { - if err := mount.Unmount(sm.root, 0); err != nil { - return err - } - return os.RemoveAll(sm.root) - } - return nil -} - -func addDefaultEnvvar(env []string, k, v string) []string { - for _, e := range env { - if strings.HasPrefix(e, k+"=") { - return env - } - } - return append(env, k+"="+v) -} - -func (e *execOp) Exec(ctx context.Context, inputs []solver.Result) ([]solver.Result, error) { - var mounts []executor.Mount - var root cache.Mountable - var readonlyRootFS bool - - var outputs []cache.Ref - - defer func() { - for _, o := range outputs { - if o != nil { - go o.Release(context.TODO()) - } - } - }() - - // loop over all mounts, fill in mounts, root and outputs - for _, m := range e.op.Mounts { - var mountable cache.Mountable - var ref cache.ImmutableRef - - if m.Dest == pb.RootMount && m.MountType != pb.MountType_BIND { - return nil, errors.Errorf("invalid mount type %s for %s", m.MountType.String(), m.Dest) - } - - // if mount is based on input validate and load it - if m.Input != pb.Empty { - if int(m.Input) > len(inputs) { - return nil, errors.Errorf("missing input %d", m.Input) - } - inp := inputs[int(m.Input)] - workerRef, ok := inp.Sys().(*worker.WorkerRef) - if !ok { - return nil, errors.Errorf("invalid reference for exec %T", inp.Sys()) - } - ref = workerRef.ImmutableRef - mountable = ref - } - - makeMutable := func(cache.ImmutableRef) (cache.MutableRef, error) { - desc := fmt.Sprintf("mount %s from exec %s", m.Dest, strings.Join(e.op.Meta.Args, " ")) - return e.cm.New(ctx, ref, cache.WithDescription(desc)) - } - - switch m.MountType { - case pb.MountType_BIND: - // if mount creates an output - if m.Output != pb.SkipOutput { - // it it is readonly and not root then output is the input - if m.Readonly && ref != nil && m.Dest != pb.RootMount { - outputs = append(outputs, ref.Clone()) - } else { - // otherwise output and mount is the mutable child - active, err := makeMutable(ref) - if err != nil { - return nil, err - } - outputs = append(outputs, active) - mountable = active - } - } else if ref == nil { - // this case is empty readonly scratch without output that is not really useful for anything but don't error - active, err := makeMutable(ref) - if err != nil { - return nil, err - } - defer active.Release(context.TODO()) - mountable = active - } - - case pb.MountType_CACHE: - if m.CacheOpt == nil { - return nil, errors.Errorf("missing cache mount options") - } - mRef, err := e.getRefCacheDir(ctx, ref, m.CacheOpt.ID, m, m.CacheOpt.Sharing) - if err != nil { - return nil, err - } - mountable = mRef - defer func() { - go mRef.Release(context.TODO()) - }() - if m.Output != pb.SkipOutput && ref != nil { - outputs = append(outputs, ref.Clone()) - } - - case pb.MountType_TMPFS: - mountable = newTmpfs() - - case pb.MountType_SECRET: - secretMount, err := e.getSecretMountable(ctx, m) - if err != nil { - return nil, err - } - if secretMount == nil { - continue - } - mountable = secretMount - - case pb.MountType_SSH: - sshMount, err := e.getSSHMountable(ctx, m) - if err != nil { - return nil, err - } - if sshMount == nil { - continue - } - mountable = sshMount - - default: - return nil, errors.Errorf("mount type %s not implemented", m.MountType) - } - - // validate that there is a mount - if mountable == nil { - return nil, errors.Errorf("mount %s has no input", m.Dest) - } - - // if dest is root we need mutable ref even if there is no output - if m.Dest == pb.RootMount { - root = mountable - readonlyRootFS = m.Readonly - if m.Output == pb.SkipOutput && readonlyRootFS { - active, err := makeMutable(ref) - if err != nil { - return nil, err - } - defer func() { - go active.Release(context.TODO()) - }() - root = active - } - } else { - mounts = append(mounts, executor.Mount{Src: mountable, Dest: m.Dest, Readonly: m.Readonly, Selector: m.Selector}) - } - } - - // sort mounts so parents are mounted first - sort.Slice(mounts, func(i, j int) bool { - return mounts[i].Dest < mounts[j].Dest - }) - - extraHosts, err := parseExtraHosts(e.op.Meta.ExtraHosts) - if err != nil { - return nil, err - } - - meta := executor.Meta{ - Args: e.op.Meta.Args, - Env: e.op.Meta.Env, - Cwd: e.op.Meta.Cwd, - User: e.op.Meta.User, - ReadonlyRootFS: readonlyRootFS, - ExtraHosts: extraHosts, - NetMode: e.op.Network, - } - - if e.op.Meta.ProxyEnv != nil { - meta.Env = append(meta.Env, proxyEnvList(e.op.Meta.ProxyEnv)...) - } - meta.Env = addDefaultEnvvar(meta.Env, "PATH", utilsystem.DefaultPathEnv) - - stdout, stderr := logs.NewLogStreams(ctx, os.Getenv("BUILDKIT_DEBUG_EXEC_OUTPUT") == "1") - defer stdout.Close() - defer stderr.Close() - - if err := e.exec.Exec(ctx, meta, root, mounts, nil, stdout, stderr); err != nil { - return nil, errors.Wrapf(err, "executor failed running %v", meta.Args) - } - - refs := []solver.Result{} - for i, out := range outputs { - if mutable, ok := out.(cache.MutableRef); ok { - ref, err := mutable.Commit(ctx) - if err != nil { - return nil, errors.Wrapf(err, "error committing %s", mutable.ID()) - } - refs = append(refs, worker.NewWorkerRefResult(ref, e.w)) - } else { - refs = append(refs, worker.NewWorkerRefResult(out.(cache.ImmutableRef), e.w)) - } - outputs[i] = nil - } - return refs, nil -} - -func proxyEnvList(p *pb.ProxyEnv) []string { - out := []string{} - if v := p.HttpProxy; v != "" { - out = append(out, "HTTP_PROXY="+v, "http_proxy="+v) - } - if v := p.HttpsProxy; v != "" { - out = append(out, "HTTPS_PROXY="+v, "https_proxy="+v) - } - if v := p.FtpProxy; v != "" { - out = append(out, "FTP_PROXY="+v, "ftp_proxy="+v) - } - if v := p.NoProxy; v != "" { - out = append(out, "NO_PROXY="+v, "no_proxy="+v) - } - return out -} - -func newTmpfs() cache.Mountable { - return &tmpfs{} -} - -type tmpfs struct { -} - -func (f *tmpfs) Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) { - return &tmpfsMount{readonly: readonly}, nil -} - -type tmpfsMount struct { - readonly bool -} - -func (m *tmpfsMount) Mount() ([]mount.Mount, error) { - opt := []string{"nosuid"} - if m.readonly { - opt = append(opt, "ro") - } - return []mount.Mount{{ - Type: "tmpfs", - Source: "tmpfs", - Options: opt, - }}, nil -} -func (m *tmpfsMount) Release() error { - return nil -} - -var cacheRefsLocker = locker.New() -var sharedCacheRefs = &cacheRefs{} - -type cacheRefs struct { - mu sync.Mutex - shares map[string]*cacheRefShare -} - -func (r *cacheRefs) get(key string, fn func() (cache.MutableRef, error)) (cache.MutableRef, error) { - r.mu.Lock() - defer r.mu.Unlock() - - if r.shares == nil { - r.shares = map[string]*cacheRefShare{} - } - - share, ok := r.shares[key] - if ok { - return share.clone(), nil - } - - mref, err := fn() - if err != nil { - return nil, err - } - - share = &cacheRefShare{MutableRef: mref, main: r, key: key, refs: map[*cacheRef]struct{}{}} - r.shares[key] = share - - return share.clone(), nil -} - -type cacheRefShare struct { - cache.MutableRef - mu sync.Mutex - refs map[*cacheRef]struct{} - main *cacheRefs - key string -} - -func (r *cacheRefShare) clone() cache.MutableRef { - cacheRef := &cacheRef{cacheRefShare: r} - r.mu.Lock() - r.refs[cacheRef] = struct{}{} - r.mu.Unlock() - return cacheRef -} - -func (r *cacheRefShare) release(ctx context.Context) error { - if r.main != nil { - r.main.mu.Lock() - defer r.main.mu.Unlock() - delete(r.main.shares, r.key) - } - return r.MutableRef.Release(ctx) -} - -type cacheRef struct { - *cacheRefShare -} - -func (r *cacheRef) Release(ctx context.Context) error { - r.mu.Lock() - defer r.mu.Unlock() - delete(r.refs, r) - if len(r.refs) == 0 { - return r.release(ctx) - } - return nil -} - -func parseExtraHosts(ips []*pb.HostIP) ([]executor.HostIP, error) { - out := make([]executor.HostIP, len(ips)) - for i, hip := range ips { - ip := net.ParseIP(hip.IP) - if ip == nil { - return nil, errors.Errorf("failed to parse IP %s", hip.IP) - } - out[i] = executor.HostIP{ - IP: ip, - Host: hip.Host, - } - } - return out, nil -} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/source.go b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/source.go deleted file mode 100644 index 722861eeb4f0..000000000000 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/source.go +++ /dev/null @@ -1,78 +0,0 @@ -package ops - -import ( - "context" - "sync" - - "github.com/moby/buildkit/solver" - "github.com/moby/buildkit/solver/pb" - "github.com/moby/buildkit/source" - "github.com/moby/buildkit/worker" - digest "github.com/opencontainers/go-digest" -) - -const sourceCacheType = "buildkit.source.v0" - -type sourceOp struct { - mu sync.Mutex - op *pb.Op_Source - platform *pb.Platform - sm *source.Manager - src source.SourceInstance - w worker.Worker -} - -func NewSourceOp(_ solver.Vertex, op *pb.Op_Source, platform *pb.Platform, sm *source.Manager, w worker.Worker) (solver.Op, error) { - return &sourceOp{ - op: op, - sm: sm, - w: w, - platform: platform, - }, nil -} - -func (s *sourceOp) instance(ctx context.Context) (source.SourceInstance, error) { - s.mu.Lock() - defer s.mu.Unlock() - if s.src != nil { - return s.src, nil - } - id, err := source.FromLLB(s.op, s.platform) - if err != nil { - return nil, err - } - src, err := s.sm.Resolve(ctx, id) - if err != nil { - return nil, err - } - s.src = src - return s.src, nil -} - -func (s *sourceOp) CacheMap(ctx context.Context, index int) (*solver.CacheMap, bool, error) { - src, err := s.instance(ctx) - if err != nil { - return nil, false, err - } - k, done, err := src.CacheKey(ctx, index) - if err != nil { - return nil, false, err - } - - return &solver.CacheMap{ - // TODO: add os/arch - Digest: digest.FromBytes([]byte(sourceCacheType + ":" + k)), - }, done, nil -} - -func (s *sourceOp) Exec(ctx context.Context, _ []solver.Result) (outputs []solver.Result, err error) { - src, err := s.instance(ctx) - if err != nil { - return nil, err - } - ref, err := src.Snapshot(ctx) - if err != nil { - return nil, err - } - return []solver.Result{worker.NewWorkerRefResult(ref, s.w)}, nil -} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/result.go b/vendor/github.com/moby/buildkit/solver/llbsolver/result.go deleted file mode 100644 index cd1959c0ba27..000000000000 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/result.go +++ /dev/null @@ -1,60 +0,0 @@ -package llbsolver - -import ( - "bytes" - "context" - "path" - - "github.com/moby/buildkit/cache/contenthash" - "github.com/moby/buildkit/solver" - "github.com/moby/buildkit/worker" - digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "golang.org/x/sync/errgroup" -) - -func NewContentHashFunc(selectors []string) solver.ResultBasedCacheFunc { - return func(ctx context.Context, res solver.Result) (digest.Digest, error) { - ref, ok := res.Sys().(*worker.WorkerRef) - if !ok { - return "", errors.Errorf("invalid reference: %T", res) - } - - if len(selectors) == 0 { - selectors = []string{""} - } - - dgsts := make([][]byte, len(selectors)) - - eg, ctx := errgroup.WithContext(ctx) - - for i, sel := range selectors { - // FIXME(tonistiigi): enabling this parallelization seems to create wrong results for some big inputs(like gobuild) - // func(i int) { - // eg.Go(func() error { - dgst, err := contenthash.Checksum(ctx, ref.ImmutableRef, path.Join("/", sel)) - if err != nil { - return "", err - } - dgsts[i] = []byte(dgst) - // return nil - // }) - // }(i) - } - - if err := eg.Wait(); err != nil { - return "", err - } - - return digest.FromBytes(bytes.Join(dgsts, []byte{0})), nil - } -} - -func workerRefConverter(ctx context.Context, res solver.Result) (*solver.Remote, error) { - ref, ok := res.Sys().(*worker.WorkerRef) - if !ok { - return nil, errors.Errorf("invalid result: %T", res.Sys()) - } - - return ref.Worker.GetRemote(ctx, ref.ImmutableRef, true) -} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/solver.go b/vendor/github.com/moby/buildkit/solver/llbsolver/solver.go deleted file mode 100644 index 6b258d92d48d..000000000000 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/solver.go +++ /dev/null @@ -1,313 +0,0 @@ -package llbsolver - -import ( - "context" - "strings" - "time" - - "github.com/moby/buildkit/cache" - "github.com/moby/buildkit/cache/remotecache" - "github.com/moby/buildkit/client" - controlgateway "github.com/moby/buildkit/control/gateway" - "github.com/moby/buildkit/exporter" - "github.com/moby/buildkit/frontend" - "github.com/moby/buildkit/frontend/gateway" - "github.com/moby/buildkit/identity" - "github.com/moby/buildkit/session" - "github.com/moby/buildkit/solver" - "github.com/moby/buildkit/util/entitlements" - "github.com/moby/buildkit/util/progress" - "github.com/moby/buildkit/worker" - digest "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -const keyEntitlements = "llb.entitlements" - -type ExporterRequest struct { - Exporter exporter.ExporterInstance - CacheExporter remotecache.Exporter - CacheExportMode solver.CacheExportMode -} - -// ResolveWorkerFunc returns default worker for the temporary default non-distributed use cases -type ResolveWorkerFunc func() (worker.Worker, error) - -type Solver struct { - workerController *worker.Controller - solver *solver.Solver - resolveWorker ResolveWorkerFunc - frontends map[string]frontend.Frontend - resolveCacheImporter remotecache.ResolveCacheImporterFunc - platforms []specs.Platform - gatewayForwarder *controlgateway.GatewayForwarder -} - -func New(wc *worker.Controller, f map[string]frontend.Frontend, cache solver.CacheManager, resolveCI remotecache.ResolveCacheImporterFunc, gatewayForwarder *controlgateway.GatewayForwarder) (*Solver, error) { - s := &Solver{ - workerController: wc, - resolveWorker: defaultResolver(wc), - frontends: f, - resolveCacheImporter: resolveCI, - gatewayForwarder: gatewayForwarder, - } - - // executing is currently only allowed on default worker - w, err := wc.GetDefault() - if err != nil { - return nil, err - } - s.platforms = w.Platforms() - - s.solver = solver.NewSolver(solver.SolverOpt{ - ResolveOpFunc: s.resolver(), - DefaultCache: cache, - }) - return s, nil -} - -func (s *Solver) resolver() solver.ResolveOpFunc { - return func(v solver.Vertex, b solver.Builder) (solver.Op, error) { - w, err := s.resolveWorker() - if err != nil { - return nil, err - } - return w.ResolveOp(v, s.Bridge(b)) - } -} - -func (s *Solver) Bridge(b solver.Builder) frontend.FrontendLLBBridge { - return &llbBridge{ - builder: b, - frontends: s.frontends, - resolveWorker: s.resolveWorker, - resolveCacheImporter: s.resolveCacheImporter, - cms: map[string]solver.CacheManager{}, - platforms: s.platforms, - } -} - -func (s *Solver) Solve(ctx context.Context, id string, req frontend.SolveRequest, exp ExporterRequest, ent []entitlements.Entitlement) (*client.SolveResponse, error) { - j, err := s.solver.NewJob(id) - if err != nil { - return nil, err - } - - defer j.Discard() - - set, err := entitlements.WhiteList(ent, supportedEntitlements()) - if err != nil { - return nil, err - } - j.SetValue(keyEntitlements, set) - - j.SessionID = session.FromContext(ctx) - - var res *frontend.Result - if s.gatewayForwarder != nil && req.Definition == nil && req.Frontend == "" { - fwd := gateway.NewBridgeForwarder(ctx, s.Bridge(j), s.workerController) - defer fwd.Discard() - if err := s.gatewayForwarder.RegisterBuild(ctx, id, fwd); err != nil { - return nil, err - } - defer s.gatewayForwarder.UnregisterBuild(ctx, id) - - var err error - select { - case <-fwd.Done(): - res, err = fwd.Result() - case <-ctx.Done(): - err = ctx.Err() - } - if err != nil { - return nil, err - } - } else { - res, err = s.Bridge(j).Solve(ctx, req) - if err != nil { - return nil, err - } - } - - defer func() { - res.EachRef(func(ref solver.CachedResult) error { - go ref.Release(context.TODO()) - return nil - }) - }() - - var exporterResponse map[string]string - if exp := exp.Exporter; exp != nil { - inp := exporter.Source{ - Metadata: res.Metadata, - } - if inp.Metadata == nil { - inp.Metadata = make(map[string][]byte) - } - if res := res.Ref; res != nil { - workerRef, ok := res.Sys().(*worker.WorkerRef) - if !ok { - return nil, errors.Errorf("invalid reference: %T", res.Sys()) - } - inp.Ref = workerRef.ImmutableRef - } - if res.Refs != nil { - m := make(map[string]cache.ImmutableRef, len(res.Refs)) - for k, res := range res.Refs { - if res == nil { - m[k] = nil - } else { - workerRef, ok := res.Sys().(*worker.WorkerRef) - if !ok { - return nil, errors.Errorf("invalid reference: %T", res.Sys()) - } - m[k] = workerRef.ImmutableRef - } - } - inp.Refs = m - } - - if err := inVertexContext(j.Context(ctx), exp.Name(), "", func(ctx context.Context) error { - exporterResponse, err = exp.Export(ctx, inp) - return err - }); err != nil { - return nil, err - } - } - - if e := exp.CacheExporter; e != nil { - if err := inVertexContext(j.Context(ctx), "exporting cache", "", func(ctx context.Context) error { - prepareDone := oneOffProgress(ctx, "preparing build cache for export") - if err := res.EachRef(func(res solver.CachedResult) error { - // all keys have same export chain so exporting others is not needed - _, err := res.CacheKeys()[0].Exporter.ExportTo(ctx, e, solver.CacheExportOpt{ - Convert: workerRefConverter, - Mode: exp.CacheExportMode, - }) - return err - }); err != nil { - return prepareDone(err) - } - prepareDone(nil) - return e.Finalize(ctx) - }); err != nil { - return nil, err - } - } - - if exporterResponse == nil { - exporterResponse = make(map[string]string) - } - - for k, v := range res.Metadata { - if strings.HasPrefix(k, "frontend.") { - exporterResponse[k] = string(v) - } - } - - return &client.SolveResponse{ - ExporterResponse: exporterResponse, - }, nil -} - -func (s *Solver) Status(ctx context.Context, id string, statusChan chan *client.SolveStatus) error { - j, err := s.solver.Get(id) - if err != nil { - close(statusChan) - return err - } - return j.Status(ctx, statusChan) -} - -func defaultResolver(wc *worker.Controller) ResolveWorkerFunc { - return func() (worker.Worker, error) { - return wc.GetDefault() - } -} - -func oneOffProgress(ctx context.Context, id string) func(err error) error { - pw, _, _ := progress.FromContext(ctx) - now := time.Now() - st := progress.Status{ - Started: &now, - } - pw.Write(id, st) - return func(err error) error { - // TODO: set error on status - now := time.Now() - st.Completed = &now - pw.Write(id, st) - pw.Close() - return err - } -} - -func inVertexContext(ctx context.Context, name, id string, f func(ctx context.Context) error) error { - if id == "" { - id = identity.NewID() - } - v := client.Vertex{ - Digest: digest.FromBytes([]byte(id)), - Name: name, - } - pw, _, ctx := progress.FromContext(ctx, progress.WithMetadata("vertex", v.Digest)) - notifyStarted(ctx, &v, false) - defer pw.Close() - err := f(ctx) - notifyCompleted(ctx, &v, err, false) - return err -} - -func notifyStarted(ctx context.Context, v *client.Vertex, cached bool) { - pw, _, _ := progress.FromContext(ctx) - defer pw.Close() - now := time.Now() - v.Started = &now - v.Completed = nil - v.Cached = cached - pw.Write(v.Digest.String(), *v) -} - -func notifyCompleted(ctx context.Context, v *client.Vertex, err error, cached bool) { - pw, _, _ := progress.FromContext(ctx) - defer pw.Close() - now := time.Now() - if v.Started == nil { - v.Started = &now - } - v.Completed = &now - v.Cached = cached - if err != nil { - v.Error = err.Error() - } - pw.Write(v.Digest.String(), *v) -} - -var AllowNetworkHostUnstable = false // TODO: enable in constructor - -func supportedEntitlements() []entitlements.Entitlement { - out := []entitlements.Entitlement{} // nil means no filter - if AllowNetworkHostUnstable { - out = append(out, entitlements.EntitlementNetworkHost) - } - return out -} - -func loadEntitlements(b solver.Builder) (entitlements.Set, error) { - var ent entitlements.Set = map[entitlements.Entitlement]struct{}{} - err := b.EachValue(context.TODO(), keyEntitlements, func(v interface{}) error { - set, ok := v.(entitlements.Set) - if !ok { - return errors.Errorf("invalid entitlements %T", v) - } - for k := range set { - ent[k] = struct{}{} - } - return nil - }) - if err != nil { - return nil, err - } - return ent, nil -} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/vertex.go b/vendor/github.com/moby/buildkit/solver/llbsolver/vertex.go deleted file mode 100644 index f4cd92528667..000000000000 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/vertex.go +++ /dev/null @@ -1,222 +0,0 @@ -package llbsolver - -import ( - "strings" - - "github.com/containerd/containerd/platforms" - "github.com/moby/buildkit/solver" - "github.com/moby/buildkit/solver/pb" - "github.com/moby/buildkit/source" - "github.com/moby/buildkit/util/entitlements" - digest "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -type vertex struct { - sys interface{} - options solver.VertexOptions - inputs []solver.Edge - digest digest.Digest - name string -} - -func (v *vertex) Digest() digest.Digest { - return v.digest -} - -func (v *vertex) Sys() interface{} { - return v.sys -} - -func (v *vertex) Options() solver.VertexOptions { - return v.options -} - -func (v *vertex) Inputs() []solver.Edge { - return v.inputs -} - -func (v *vertex) Name() string { - if name, ok := v.options.Description["llb.customname"]; ok { - return name - } - return v.name -} - -type LoadOpt func(*pb.Op, *pb.OpMetadata, *solver.VertexOptions) error - -func WithValidateCaps() LoadOpt { - cs := pb.Caps.CapSet(pb.Caps.All()) - return func(_ *pb.Op, md *pb.OpMetadata, opt *solver.VertexOptions) error { - if md != nil { - for c := range md.Caps { - if err := cs.Supports(c); err != nil { - return err - } - } - } - return nil - } -} - -func WithCacheSources(cms []solver.CacheManager) LoadOpt { - return func(_ *pb.Op, _ *pb.OpMetadata, opt *solver.VertexOptions) error { - opt.CacheSources = cms - return nil - } -} - -func RuntimePlatforms(p []specs.Platform) LoadOpt { - var defaultPlatform *pb.Platform - pp := make([]specs.Platform, len(p)) - for i := range p { - pp[i] = platforms.Normalize(p[i]) - } - return func(op *pb.Op, _ *pb.OpMetadata, opt *solver.VertexOptions) error { - if op.Platform == nil { - if defaultPlatform == nil { - p := platforms.DefaultSpec() - defaultPlatform = &pb.Platform{ - OS: p.OS, - Architecture: p.Architecture, - } - } - op.Platform = defaultPlatform - } - if _, ok := op.Op.(*pb.Op_Exec); ok { - var found bool - for _, pp := range pp { - if pp.OS == op.Platform.OS && pp.Architecture == op.Platform.Architecture && pp.Variant == op.Platform.Variant { - found = true - break - } - } - if !found { - return errors.Errorf("runtime execution on platform %s not supported", platforms.Format(specs.Platform{OS: op.Platform.OS, Architecture: op.Platform.Architecture, Variant: op.Platform.Variant})) - } - } - return nil - } -} - -func ValidateEntitlements(ent entitlements.Set) LoadOpt { - return func(op *pb.Op, _ *pb.OpMetadata, opt *solver.VertexOptions) error { - switch op := op.Op.(type) { - case *pb.Op_Exec: - if op.Exec.Network == pb.NetMode_HOST { - if !ent.Allowed(entitlements.EntitlementNetworkHost) { - return errors.Errorf("%s is not allowed", entitlements.EntitlementNetworkHost) - } - } - if op.Exec.Network == pb.NetMode_NONE { - if !ent.Allowed(entitlements.EntitlementNetworkNone) { - return errors.Errorf("%s is not allowed", entitlements.EntitlementNetworkNone) - } - } - } - return nil - } -} - -func Load(def *pb.Definition, opts ...LoadOpt) (solver.Edge, error) { - return loadLLB(def, func(dgst digest.Digest, pbOp *pb.Op, load func(digest.Digest) (solver.Vertex, error)) (solver.Vertex, error) { - opMetadata := def.Metadata[dgst] - vtx, err := newVertex(dgst, pbOp, &opMetadata, load, opts...) - if err != nil { - return nil, err - } - return vtx, nil - }) -} - -func newVertex(dgst digest.Digest, op *pb.Op, opMeta *pb.OpMetadata, load func(digest.Digest) (solver.Vertex, error), opts ...LoadOpt) (*vertex, error) { - opt := solver.VertexOptions{} - if opMeta != nil { - opt.IgnoreCache = opMeta.IgnoreCache - opt.Description = opMeta.Description - if opMeta.ExportCache != nil { - opt.ExportCache = &opMeta.ExportCache.Value - } - } - for _, fn := range opts { - if err := fn(op, opMeta, &opt); err != nil { - return nil, err - } - } - vtx := &vertex{sys: op, options: opt, digest: dgst, name: llbOpName(op)} - for _, in := range op.Inputs { - sub, err := load(in.Digest) - if err != nil { - return nil, err - } - vtx.inputs = append(vtx.inputs, solver.Edge{Index: solver.Index(in.Index), Vertex: sub}) - } - return vtx, nil -} - -// loadLLB loads LLB. -// fn is executed sequentially. -func loadLLB(def *pb.Definition, fn func(digest.Digest, *pb.Op, func(digest.Digest) (solver.Vertex, error)) (solver.Vertex, error)) (solver.Edge, error) { - if len(def.Def) == 0 { - return solver.Edge{}, errors.New("invalid empty definition") - } - - allOps := make(map[digest.Digest]*pb.Op) - - var dgst digest.Digest - - for _, dt := range def.Def { - var op pb.Op - if err := (&op).Unmarshal(dt); err != nil { - return solver.Edge{}, errors.Wrap(err, "failed to parse llb proto op") - } - dgst = digest.FromBytes(dt) - allOps[dgst] = &op - } - - lastOp := allOps[dgst] - delete(allOps, dgst) - dgst = lastOp.Inputs[0].Digest - - cache := make(map[digest.Digest]solver.Vertex) - - var rec func(dgst digest.Digest) (solver.Vertex, error) - rec = func(dgst digest.Digest) (solver.Vertex, error) { - if v, ok := cache[dgst]; ok { - return v, nil - } - v, err := fn(dgst, allOps[dgst], rec) - if err != nil { - return nil, err - } - cache[dgst] = v - return v, nil - } - - v, err := rec(dgst) - if err != nil { - return solver.Edge{}, err - } - return solver.Edge{Vertex: v, Index: solver.Index(lastOp.Inputs[0].Index)}, nil -} - -func llbOpName(op *pb.Op) string { - switch op := op.Op.(type) { - case *pb.Op_Source: - if id, err := source.FromLLB(op, nil); err == nil { - if id, ok := id.(*source.LocalIdentifier); ok { - if len(id.IncludePatterns) == 1 { - return op.Source.Identifier + " (" + id.IncludePatterns[0] + ")" - } - } - } - return op.Source.Identifier - case *pb.Op_Exec: - return strings.Join(op.Exec.Meta.Args, " ") - case *pb.Op_Build: - return "build" - default: - return "unknown" - } -} diff --git a/vendor/github.com/moby/buildkit/solver/memorycachestorage.go b/vendor/github.com/moby/buildkit/solver/memorycachestorage.go deleted file mode 100644 index 75c8abdeadce..000000000000 --- a/vendor/github.com/moby/buildkit/solver/memorycachestorage.go +++ /dev/null @@ -1,307 +0,0 @@ -package solver - -import ( - "context" - "sync" - "time" - - "github.com/pkg/errors" -) - -func NewInMemoryCacheStorage() CacheKeyStorage { - return &inMemoryStore{ - byID: map[string]*inMemoryKey{}, - byResult: map[string]map[string]struct{}{}, - } -} - -type inMemoryStore struct { - mu sync.RWMutex - byID map[string]*inMemoryKey - byResult map[string]map[string]struct{} -} - -type inMemoryKey struct { - id string - results map[string]CacheResult - links map[CacheInfoLink]map[string]struct{} - backlinks map[string]struct{} -} - -func (s *inMemoryStore) Exists(id string) bool { - s.mu.RLock() - defer s.mu.RUnlock() - if k, ok := s.byID[id]; ok { - return len(k.links) > 0 || len(k.results) > 0 - } - return false -} - -func newInMemoryKey(id string) *inMemoryKey { - return &inMemoryKey{ - results: map[string]CacheResult{}, - links: map[CacheInfoLink]map[string]struct{}{}, - backlinks: map[string]struct{}{}, - id: id, - } -} - -func (s *inMemoryStore) Walk(fn func(string) error) error { - s.mu.RLock() - ids := make([]string, 0, len(s.byID)) - for id := range s.byID { - ids = append(ids, id) - } - s.mu.RUnlock() - - for _, id := range ids { - if err := fn(id); err != nil { - return err - } - } - return nil -} - -func (s *inMemoryStore) WalkResults(id string, fn func(CacheResult) error) error { - s.mu.RLock() - - k, ok := s.byID[id] - if !ok { - s.mu.RUnlock() - return nil - } - copy := make([]CacheResult, 0, len(k.results)) - for _, res := range k.results { - copy = append(copy, res) - } - s.mu.RUnlock() - - for _, res := range copy { - if err := fn(res); err != nil { - return err - } - } - return nil -} - -func (s *inMemoryStore) Load(id string, resultID string) (CacheResult, error) { - s.mu.RLock() - defer s.mu.RUnlock() - k, ok := s.byID[id] - if !ok { - return CacheResult{}, errors.Wrapf(ErrNotFound, "no such key %s", id) - } - r, ok := k.results[resultID] - if !ok { - return CacheResult{}, errors.WithStack(ErrNotFound) - } - return r, nil -} - -func (s *inMemoryStore) AddResult(id string, res CacheResult) error { - s.mu.Lock() - defer s.mu.Unlock() - k, ok := s.byID[id] - if !ok { - k = newInMemoryKey(id) - s.byID[id] = k - } - k.results[res.ID] = res - m, ok := s.byResult[res.ID] - if !ok { - m = map[string]struct{}{} - s.byResult[res.ID] = m - } - m[id] = struct{}{} - return nil -} - -func (s *inMemoryStore) WalkIDsByResult(resultID string, fn func(string) error) error { - s.mu.Lock() - - ids := map[string]struct{}{} - for id := range s.byResult[resultID] { - ids[id] = struct{}{} - } - s.mu.Unlock() - - for id := range ids { - if err := fn(id); err != nil { - return err - } - } - - return nil -} - -func (s *inMemoryStore) Release(resultID string) error { - s.mu.Lock() - defer s.mu.Unlock() - - ids, ok := s.byResult[resultID] - if !ok { - return nil - } - - for id := range ids { - k, ok := s.byID[id] - if !ok { - continue - } - - delete(k.results, resultID) - delete(s.byResult[resultID], id) - if len(s.byResult[resultID]) == 0 { - delete(s.byResult, resultID) - } - - s.emptyBranchWithParents(k) - } - - return nil -} - -func (s *inMemoryStore) emptyBranchWithParents(k *inMemoryKey) { - if len(k.results) != 0 || len(k.links) != 0 { - return - } - for id := range k.backlinks { - p, ok := s.byID[id] - if !ok { - continue - } - for l := range p.links { - delete(p.links[l], k.id) - if len(p.links[l]) == 0 { - delete(p.links, l) - } - } - s.emptyBranchWithParents(p) - } - - delete(s.byID, k.id) -} - -func (s *inMemoryStore) AddLink(id string, link CacheInfoLink, target string) error { - s.mu.Lock() - defer s.mu.Unlock() - k, ok := s.byID[id] - if !ok { - k = newInMemoryKey(id) - s.byID[id] = k - } - k2, ok := s.byID[target] - if !ok { - k2 = newInMemoryKey(target) - s.byID[target] = k2 - } - m, ok := k.links[link] - if !ok { - m = map[string]struct{}{} - k.links[link] = m - } - - k2.backlinks[id] = struct{}{} - m[target] = struct{}{} - return nil -} - -func (s *inMemoryStore) WalkLinks(id string, link CacheInfoLink, fn func(id string) error) error { - s.mu.RLock() - k, ok := s.byID[id] - if !ok { - s.mu.RUnlock() - return nil - } - var links []string - for target := range k.links[link] { - links = append(links, target) - } - s.mu.RUnlock() - - for _, t := range links { - if err := fn(t); err != nil { - return err - } - } - return nil -} - -func (s *inMemoryStore) HasLink(id string, link CacheInfoLink, target string) bool { - s.mu.RLock() - defer s.mu.RUnlock() - if k, ok := s.byID[id]; ok { - if v, ok := k.links[link]; ok { - if _, ok := v[target]; ok { - return true - } - } - } - return false -} - -func (s *inMemoryStore) WalkBacklinks(id string, fn func(id string, link CacheInfoLink) error) error { - s.mu.RLock() - k, ok := s.byID[id] - if !ok { - s.mu.RUnlock() - return nil - } - var outIDs []string - var outLinks []CacheInfoLink - for bid := range k.backlinks { - b, ok := s.byID[bid] - if !ok { - continue - } - for l, m := range b.links { - if _, ok := m[id]; !ok { - continue - } - outIDs = append(outIDs, bid) - outLinks = append(outLinks, CacheInfoLink{ - Digest: rootKey(l.Digest, l.Output), - Input: l.Input, - Selector: l.Selector, - }) - } - } - s.mu.RUnlock() - - for i := range outIDs { - if err := fn(outIDs[i], outLinks[i]); err != nil { - return err - } - } - return nil -} - -func NewInMemoryResultStorage() CacheResultStorage { - return &inMemoryResultStore{m: &sync.Map{}} -} - -type inMemoryResultStore struct { - m *sync.Map -} - -func (s *inMemoryResultStore) Save(r Result) (CacheResult, error) { - s.m.Store(r.ID(), r) - return CacheResult{ID: r.ID(), CreatedAt: time.Now()}, nil -} - -func (s *inMemoryResultStore) Load(ctx context.Context, res CacheResult) (Result, error) { - v, ok := s.m.Load(res.ID) - if !ok { - return nil, errors.WithStack(ErrNotFound) - } - return v.(Result), nil -} - -func (s *inMemoryResultStore) LoadRemote(ctx context.Context, res CacheResult) (*Remote, error) { - return nil, nil -} - -func (s *inMemoryResultStore) Exists(id string) bool { - _, ok := s.m.Load(id) - return ok -} diff --git a/vendor/github.com/moby/buildkit/solver/pb/attr.go b/vendor/github.com/moby/buildkit/solver/pb/attr.go deleted file mode 100644 index f44c4b477101..000000000000 --- a/vendor/github.com/moby/buildkit/solver/pb/attr.go +++ /dev/null @@ -1,23 +0,0 @@ -package pb - -const AttrKeepGitDir = "git.keepgitdir" -const AttrFullRemoteURL = "git.fullurl" -const AttrLocalSessionID = "local.session" -const AttrLocalUniqueID = "local.unique" -const AttrIncludePatterns = "local.includepattern" -const AttrFollowPaths = "local.followpaths" -const AttrExcludePatterns = "local.excludepatterns" -const AttrSharedKeyHint = "local.sharedkeyhint" -const AttrLLBDefinitionFilename = "llbbuild.filename" - -const AttrHTTPChecksum = "http.checksum" -const AttrHTTPFilename = "http.filename" -const AttrHTTPPerm = "http.perm" -const AttrHTTPUID = "http.uid" -const AttrHTTPGID = "http.gid" - -const AttrImageResolveMode = "image.resolvemode" -const AttrImageResolveModeDefault = "default" -const AttrImageResolveModeForcePull = "pull" -const AttrImageResolveModePreferLocal = "local" -const AttrImageRecordType = "image.recordtype" diff --git a/vendor/github.com/moby/buildkit/solver/pb/caps.go b/vendor/github.com/moby/buildkit/solver/pb/caps.go deleted file mode 100644 index b81ec62c419a..000000000000 --- a/vendor/github.com/moby/buildkit/solver/pb/caps.go +++ /dev/null @@ -1,259 +0,0 @@ -package pb - -import "github.com/moby/buildkit/util/apicaps" - -var Caps apicaps.CapList - -// Every backwards or forwards non-compatible change needs to add a new capability row. -// By default new capabilities should be experimental. After merge a capability is -// considered immutable. After a capability is marked stable it should not be disabled. - -const ( - CapSourceImage apicaps.CapID = "source.image" - CapSourceImageResolveMode apicaps.CapID = "source.image.resolvemode" - CapSourceLocal apicaps.CapID = "source.local" - CapSourceLocalUnique apicaps.CapID = "source.local.unique" - CapSourceLocalSessionID apicaps.CapID = "source.local.sessionid" - CapSourceLocalIncludePatterns apicaps.CapID = "source.local.includepatterns" - CapSourceLocalFollowPaths apicaps.CapID = "source.local.followpaths" - CapSourceLocalExcludePatterns apicaps.CapID = "source.local.excludepatterns" - CapSourceLocalSharedKeyHint apicaps.CapID = "source.local.sharedkeyhint" - - CapSourceGit apicaps.CapID = "source.git" - CapSourceGitKeepDir apicaps.CapID = "source.git.keepgitdir" - CapSourceGitFullURL apicaps.CapID = "source.git.fullurl" - - CapSourceHTTP apicaps.CapID = "source.http" - CapSourceHTTPChecksum apicaps.CapID = "source.http.checksum" - CapSourceHTTPPerm apicaps.CapID = "source.http.perm" - CapSourceHTTPUIDGID apicaps.CapID = "soruce.http.uidgid" - - CapBuildOpLLBFileName apicaps.CapID = "source.buildop.llbfilename" - - CapExecMetaBase apicaps.CapID = "exec.meta.base" - CapExecMetaProxy apicaps.CapID = "exec.meta.proxyenv" - CapExecMetaNetwork apicaps.CapID = "exec.meta.network" - CapExecMetaSetsDefaultPath apicaps.CapID = "exec.meta.setsdefaultpath" - CapExecMountBind apicaps.CapID = "exec.mount.bind" - CapExecMountCache apicaps.CapID = "exec.mount.cache" - CapExecMountCacheSharing apicaps.CapID = "exec.mount.cache.sharing" - CapExecMountSelector apicaps.CapID = "exec.mount.selector" - CapExecMountTmpfs apicaps.CapID = "exec.mount.tmpfs" - CapExecMountSecret apicaps.CapID = "exec.mount.secret" - CapExecMountSSH apicaps.CapID = "exec.mount.ssh" - CapExecCgroupsMounted apicaps.CapID = "exec.cgroup" - - CapConstraints apicaps.CapID = "constraints" - CapPlatform apicaps.CapID = "platform" - - CapMetaIgnoreCache apicaps.CapID = "meta.ignorecache" - CapMetaDescription apicaps.CapID = "meta.description" - CapMetaExportCache apicaps.CapID = "meta.exportcache" -) - -func init() { - - Caps.Init(apicaps.Cap{ - ID: CapSourceImage, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapSourceImageResolveMode, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapSourceLocal, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapSourceLocalUnique, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapSourceLocalSessionID, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapSourceLocalIncludePatterns, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapSourceLocalFollowPaths, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapSourceLocalExcludePatterns, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapSourceLocalSharedKeyHint, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - Caps.Init(apicaps.Cap{ - ID: CapSourceGit, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapSourceGitKeepDir, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapSourceGitFullURL, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapSourceHTTP, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapSourceHTTPChecksum, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapSourceHTTPPerm, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapSourceHTTPUIDGID, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapBuildOpLLBFileName, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapExecMetaBase, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapExecMetaProxy, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapExecMetaNetwork, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapExecMetaSetsDefaultPath, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapExecMountBind, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapExecMountCache, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapExecMountCacheSharing, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapExecMountSelector, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapExecMountTmpfs, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapExecMountSecret, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapExecMountSSH, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapExecCgroupsMounted, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapConstraints, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapPlatform, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapMetaIgnoreCache, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapMetaDescription, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapMetaExportCache, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - -} diff --git a/vendor/github.com/moby/buildkit/solver/pb/const.go b/vendor/github.com/moby/buildkit/solver/pb/const.go deleted file mode 100644 index c2d20b29f204..000000000000 --- a/vendor/github.com/moby/buildkit/solver/pb/const.go +++ /dev/null @@ -1,25 +0,0 @@ -package pb - -// InputIndex is incrementing index to the input vertex -type InputIndex int64 - -// OutputIndex is incrementing index that another vertex can depend on -type OutputIndex int64 - -// RootMount is a base mountpoint -const RootMount = "/" - -// SkipOutput marks a disabled output index -const SkipOutput OutputIndex = -1 - -// Empty marks an input with no content -const Empty InputIndex = -1 - -// LLBBuilder is a special builder for BuildOp that directly builds LLB -const LLBBuilder InputIndex = -1 - -// LLBDefinitionInput marks an input that contains LLB definition for BuildOp -const LLBDefinitionInput = "buildkit.llb.definition" - -// LLBDefaultDefinitionFile is a filename containing the definition in LLBBuilder -const LLBDefaultDefinitionFile = LLBDefinitionInput diff --git a/vendor/github.com/moby/buildkit/solver/pb/generate.go b/vendor/github.com/moby/buildkit/solver/pb/generate.go deleted file mode 100644 index c31e148f2adf..000000000000 --- a/vendor/github.com/moby/buildkit/solver/pb/generate.go +++ /dev/null @@ -1,3 +0,0 @@ -package pb - -//go:generate protoc -I=. -I=../../vendor/ --gogofaster_out=. ops.proto diff --git a/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go b/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go deleted file mode 100644 index fd97675db717..000000000000 --- a/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go +++ /dev/null @@ -1,6131 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: ops.proto - -/* - Package pb is a generated protocol buffer package. - - Package pb provides the protobuf definition of LLB: low-level builder instruction. - LLB is DAG-structured; Op represents a vertex, and Definition represents a graph. - - It is generated from these files: - ops.proto - - It has these top-level messages: - Op - Platform - Input - ExecOp - Meta - Mount - CacheOpt - SecretOpt - SSHOpt - CopyOp - CopySource - SourceOp - BuildOp - BuildInput - OpMetadata - ExportCache - ProxyEnv - WorkerConstraints - Definition - HostIP -*/ -package pb - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" -import _ "github.com/gogo/protobuf/gogoproto" - -import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest" -import github_com_moby_buildkit_util_apicaps "github.com/moby/buildkit/util/apicaps" - -import sortkeys "github.com/gogo/protobuf/sortkeys" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -type NetMode int32 - -const ( - NetMode_UNSET NetMode = 0 - NetMode_HOST NetMode = 1 - NetMode_NONE NetMode = 2 -) - -var NetMode_name = map[int32]string{ - 0: "UNSET", - 1: "HOST", - 2: "NONE", -} -var NetMode_value = map[string]int32{ - "UNSET": 0, - "HOST": 1, - "NONE": 2, -} - -func (x NetMode) String() string { - return proto.EnumName(NetMode_name, int32(x)) -} -func (NetMode) EnumDescriptor() ([]byte, []int) { return fileDescriptorOps, []int{0} } - -// MountType defines a type of a mount from a supported set -type MountType int32 - -const ( - MountType_BIND MountType = 0 - MountType_SECRET MountType = 1 - MountType_SSH MountType = 2 - MountType_CACHE MountType = 3 - MountType_TMPFS MountType = 4 -) - -var MountType_name = map[int32]string{ - 0: "BIND", - 1: "SECRET", - 2: "SSH", - 3: "CACHE", - 4: "TMPFS", -} -var MountType_value = map[string]int32{ - "BIND": 0, - "SECRET": 1, - "SSH": 2, - "CACHE": 3, - "TMPFS": 4, -} - -func (x MountType) String() string { - return proto.EnumName(MountType_name, int32(x)) -} -func (MountType) EnumDescriptor() ([]byte, []int) { return fileDescriptorOps, []int{1} } - -// CacheSharingOpt defines different sharing modes for cache mount -type CacheSharingOpt int32 - -const ( - // SHARED cache mount can be used concurrently by multiple writers - CacheSharingOpt_SHARED CacheSharingOpt = 0 - // PRIVATE creates a new mount if there are multiple writers - CacheSharingOpt_PRIVATE CacheSharingOpt = 1 - // LOCKED pauses second writer until first one releases the mount - CacheSharingOpt_LOCKED CacheSharingOpt = 2 -) - -var CacheSharingOpt_name = map[int32]string{ - 0: "SHARED", - 1: "PRIVATE", - 2: "LOCKED", -} -var CacheSharingOpt_value = map[string]int32{ - "SHARED": 0, - "PRIVATE": 1, - "LOCKED": 2, -} - -func (x CacheSharingOpt) String() string { - return proto.EnumName(CacheSharingOpt_name, int32(x)) -} -func (CacheSharingOpt) EnumDescriptor() ([]byte, []int) { return fileDescriptorOps, []int{2} } - -// Op represents a vertex of the LLB DAG. -type Op struct { - // inputs is a set of input edges. - Inputs []*Input `protobuf:"bytes,1,rep,name=inputs" json:"inputs,omitempty"` - // Types that are valid to be assigned to Op: - // *Op_Exec - // *Op_Source - // *Op_Copy - // *Op_Build - Op isOp_Op `protobuf_oneof:"op"` - Platform *Platform `protobuf:"bytes,10,opt,name=platform" json:"platform,omitempty"` - Constraints *WorkerConstraints `protobuf:"bytes,11,opt,name=constraints" json:"constraints,omitempty"` -} - -func (m *Op) Reset() { *m = Op{} } -func (m *Op) String() string { return proto.CompactTextString(m) } -func (*Op) ProtoMessage() {} -func (*Op) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{0} } - -type isOp_Op interface { - isOp_Op() - MarshalTo([]byte) (int, error) - Size() int -} - -type Op_Exec struct { - Exec *ExecOp `protobuf:"bytes,2,opt,name=exec,oneof"` -} -type Op_Source struct { - Source *SourceOp `protobuf:"bytes,3,opt,name=source,oneof"` -} -type Op_Copy struct { - Copy *CopyOp `protobuf:"bytes,4,opt,name=copy,oneof"` -} -type Op_Build struct { - Build *BuildOp `protobuf:"bytes,5,opt,name=build,oneof"` -} - -func (*Op_Exec) isOp_Op() {} -func (*Op_Source) isOp_Op() {} -func (*Op_Copy) isOp_Op() {} -func (*Op_Build) isOp_Op() {} - -func (m *Op) GetOp() isOp_Op { - if m != nil { - return m.Op - } - return nil -} - -func (m *Op) GetInputs() []*Input { - if m != nil { - return m.Inputs - } - return nil -} - -func (m *Op) GetExec() *ExecOp { - if x, ok := m.GetOp().(*Op_Exec); ok { - return x.Exec - } - return nil -} - -func (m *Op) GetSource() *SourceOp { - if x, ok := m.GetOp().(*Op_Source); ok { - return x.Source - } - return nil -} - -func (m *Op) GetCopy() *CopyOp { - if x, ok := m.GetOp().(*Op_Copy); ok { - return x.Copy - } - return nil -} - -func (m *Op) GetBuild() *BuildOp { - if x, ok := m.GetOp().(*Op_Build); ok { - return x.Build - } - return nil -} - -func (m *Op) GetPlatform() *Platform { - if m != nil { - return m.Platform - } - return nil -} - -func (m *Op) GetConstraints() *WorkerConstraints { - if m != nil { - return m.Constraints - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*Op) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _Op_OneofMarshaler, _Op_OneofUnmarshaler, _Op_OneofSizer, []interface{}{ - (*Op_Exec)(nil), - (*Op_Source)(nil), - (*Op_Copy)(nil), - (*Op_Build)(nil), - } -} - -func _Op_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*Op) - // op - switch x := m.Op.(type) { - case *Op_Exec: - _ = b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Exec); err != nil { - return err - } - case *Op_Source: - _ = b.EncodeVarint(3<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Source); err != nil { - return err - } - case *Op_Copy: - _ = b.EncodeVarint(4<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Copy); err != nil { - return err - } - case *Op_Build: - _ = b.EncodeVarint(5<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Build); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("Op.Op has unexpected type %T", x) - } - return nil -} - -func _Op_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*Op) - switch tag { - case 2: // op.exec - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(ExecOp) - err := b.DecodeMessage(msg) - m.Op = &Op_Exec{msg} - return true, err - case 3: // op.source - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(SourceOp) - err := b.DecodeMessage(msg) - m.Op = &Op_Source{msg} - return true, err - case 4: // op.copy - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(CopyOp) - err := b.DecodeMessage(msg) - m.Op = &Op_Copy{msg} - return true, err - case 5: // op.build - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(BuildOp) - err := b.DecodeMessage(msg) - m.Op = &Op_Build{msg} - return true, err - default: - return false, nil - } -} - -func _Op_OneofSizer(msg proto.Message) (n int) { - m := msg.(*Op) - // op - switch x := m.Op.(type) { - case *Op_Exec: - s := proto.Size(x.Exec) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *Op_Source: - s := proto.Size(x.Source) - n += proto.SizeVarint(3<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *Op_Copy: - s := proto.Size(x.Copy) - n += proto.SizeVarint(4<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *Op_Build: - s := proto.Size(x.Build) - n += proto.SizeVarint(5<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -// Platform is github.com/opencontainers/image-spec/specs-go/v1.Platform -type Platform struct { - Architecture string `protobuf:"bytes,1,opt,name=Architecture,proto3" json:"Architecture,omitempty"` - OS string `protobuf:"bytes,2,opt,name=OS,proto3" json:"OS,omitempty"` - Variant string `protobuf:"bytes,3,opt,name=Variant,proto3" json:"Variant,omitempty"` - OSVersion string `protobuf:"bytes,4,opt,name=OSVersion,proto3" json:"OSVersion,omitempty"` - OSFeatures []string `protobuf:"bytes,5,rep,name=OSFeatures" json:"OSFeatures,omitempty"` -} - -func (m *Platform) Reset() { *m = Platform{} } -func (m *Platform) String() string { return proto.CompactTextString(m) } -func (*Platform) ProtoMessage() {} -func (*Platform) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{1} } - -func (m *Platform) GetArchitecture() string { - if m != nil { - return m.Architecture - } - return "" -} - -func (m *Platform) GetOS() string { - if m != nil { - return m.OS - } - return "" -} - -func (m *Platform) GetVariant() string { - if m != nil { - return m.Variant - } - return "" -} - -func (m *Platform) GetOSVersion() string { - if m != nil { - return m.OSVersion - } - return "" -} - -func (m *Platform) GetOSFeatures() []string { - if m != nil { - return m.OSFeatures - } - return nil -} - -// Input represents an input edge for an Op. -type Input struct { - // digest of the marshaled input Op - Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` - // output index of the input Op - Index OutputIndex `protobuf:"varint,2,opt,name=index,proto3,customtype=OutputIndex" json:"index"` -} - -func (m *Input) Reset() { *m = Input{} } -func (m *Input) String() string { return proto.CompactTextString(m) } -func (*Input) ProtoMessage() {} -func (*Input) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{2} } - -// ExecOp executes a command in a container. -type ExecOp struct { - Meta *Meta `protobuf:"bytes,1,opt,name=meta" json:"meta,omitempty"` - Mounts []*Mount `protobuf:"bytes,2,rep,name=mounts" json:"mounts,omitempty"` - Network NetMode `protobuf:"varint,3,opt,name=network,proto3,enum=pb.NetMode" json:"network,omitempty"` -} - -func (m *ExecOp) Reset() { *m = ExecOp{} } -func (m *ExecOp) String() string { return proto.CompactTextString(m) } -func (*ExecOp) ProtoMessage() {} -func (*ExecOp) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{3} } - -func (m *ExecOp) GetMeta() *Meta { - if m != nil { - return m.Meta - } - return nil -} - -func (m *ExecOp) GetMounts() []*Mount { - if m != nil { - return m.Mounts - } - return nil -} - -func (m *ExecOp) GetNetwork() NetMode { - if m != nil { - return m.Network - } - return NetMode_UNSET -} - -// Meta is a set of arguments for ExecOp. -// Meta is unrelated to LLB metadata. -// FIXME: rename (ExecContext? ExecArgs?) -type Meta struct { - Args []string `protobuf:"bytes,1,rep,name=args" json:"args,omitempty"` - Env []string `protobuf:"bytes,2,rep,name=env" json:"env,omitempty"` - Cwd string `protobuf:"bytes,3,opt,name=cwd,proto3" json:"cwd,omitempty"` - User string `protobuf:"bytes,4,opt,name=user,proto3" json:"user,omitempty"` - ProxyEnv *ProxyEnv `protobuf:"bytes,5,opt,name=proxy_env,json=proxyEnv" json:"proxy_env,omitempty"` - ExtraHosts []*HostIP `protobuf:"bytes,6,rep,name=extraHosts" json:"extraHosts,omitempty"` -} - -func (m *Meta) Reset() { *m = Meta{} } -func (m *Meta) String() string { return proto.CompactTextString(m) } -func (*Meta) ProtoMessage() {} -func (*Meta) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{4} } - -func (m *Meta) GetArgs() []string { - if m != nil { - return m.Args - } - return nil -} - -func (m *Meta) GetEnv() []string { - if m != nil { - return m.Env - } - return nil -} - -func (m *Meta) GetCwd() string { - if m != nil { - return m.Cwd - } - return "" -} - -func (m *Meta) GetUser() string { - if m != nil { - return m.User - } - return "" -} - -func (m *Meta) GetProxyEnv() *ProxyEnv { - if m != nil { - return m.ProxyEnv - } - return nil -} - -func (m *Meta) GetExtraHosts() []*HostIP { - if m != nil { - return m.ExtraHosts - } - return nil -} - -// Mount specifies how to mount an input Op as a filesystem. -type Mount struct { - Input InputIndex `protobuf:"varint,1,opt,name=input,proto3,customtype=InputIndex" json:"input"` - Selector string `protobuf:"bytes,2,opt,name=selector,proto3" json:"selector,omitempty"` - Dest string `protobuf:"bytes,3,opt,name=dest,proto3" json:"dest,omitempty"` - Output OutputIndex `protobuf:"varint,4,opt,name=output,proto3,customtype=OutputIndex" json:"output"` - Readonly bool `protobuf:"varint,5,opt,name=readonly,proto3" json:"readonly,omitempty"` - MountType MountType `protobuf:"varint,6,opt,name=mountType,proto3,enum=pb.MountType" json:"mountType,omitempty"` - CacheOpt *CacheOpt `protobuf:"bytes,20,opt,name=cacheOpt" json:"cacheOpt,omitempty"` - SecretOpt *SecretOpt `protobuf:"bytes,21,opt,name=secretOpt" json:"secretOpt,omitempty"` - SSHOpt *SSHOpt `protobuf:"bytes,22,opt,name=SSHOpt" json:"SSHOpt,omitempty"` -} - -func (m *Mount) Reset() { *m = Mount{} } -func (m *Mount) String() string { return proto.CompactTextString(m) } -func (*Mount) ProtoMessage() {} -func (*Mount) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{5} } - -func (m *Mount) GetSelector() string { - if m != nil { - return m.Selector - } - return "" -} - -func (m *Mount) GetDest() string { - if m != nil { - return m.Dest - } - return "" -} - -func (m *Mount) GetReadonly() bool { - if m != nil { - return m.Readonly - } - return false -} - -func (m *Mount) GetMountType() MountType { - if m != nil { - return m.MountType - } - return MountType_BIND -} - -func (m *Mount) GetCacheOpt() *CacheOpt { - if m != nil { - return m.CacheOpt - } - return nil -} - -func (m *Mount) GetSecretOpt() *SecretOpt { - if m != nil { - return m.SecretOpt - } - return nil -} - -func (m *Mount) GetSSHOpt() *SSHOpt { - if m != nil { - return m.SSHOpt - } - return nil -} - -// CacheOpt defines options specific to cache mounts -type CacheOpt struct { - // ID is an optional namespace for the mount - ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` - // Sharing is the sharing mode for the mount - Sharing CacheSharingOpt `protobuf:"varint,2,opt,name=sharing,proto3,enum=pb.CacheSharingOpt" json:"sharing,omitempty"` -} - -func (m *CacheOpt) Reset() { *m = CacheOpt{} } -func (m *CacheOpt) String() string { return proto.CompactTextString(m) } -func (*CacheOpt) ProtoMessage() {} -func (*CacheOpt) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{6} } - -func (m *CacheOpt) GetID() string { - if m != nil { - return m.ID - } - return "" -} - -func (m *CacheOpt) GetSharing() CacheSharingOpt { - if m != nil { - return m.Sharing - } - return CacheSharingOpt_SHARED -} - -// SecretOpt defines options describing secret mounts -type SecretOpt struct { - // ID of secret. Used for quering the value. - ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` - // UID of secret file - Uid uint32 `protobuf:"varint,2,opt,name=uid,proto3" json:"uid,omitempty"` - // GID of secret file - Gid uint32 `protobuf:"varint,3,opt,name=gid,proto3" json:"gid,omitempty"` - // Mode is the filesystem mode of secret file - Mode uint32 `protobuf:"varint,4,opt,name=mode,proto3" json:"mode,omitempty"` - // Optional defines if secret value is required. Error is produced - // if value is not found and optional is false. - Optional bool `protobuf:"varint,5,opt,name=optional,proto3" json:"optional,omitempty"` -} - -func (m *SecretOpt) Reset() { *m = SecretOpt{} } -func (m *SecretOpt) String() string { return proto.CompactTextString(m) } -func (*SecretOpt) ProtoMessage() {} -func (*SecretOpt) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{7} } - -func (m *SecretOpt) GetID() string { - if m != nil { - return m.ID - } - return "" -} - -func (m *SecretOpt) GetUid() uint32 { - if m != nil { - return m.Uid - } - return 0 -} - -func (m *SecretOpt) GetGid() uint32 { - if m != nil { - return m.Gid - } - return 0 -} - -func (m *SecretOpt) GetMode() uint32 { - if m != nil { - return m.Mode - } - return 0 -} - -func (m *SecretOpt) GetOptional() bool { - if m != nil { - return m.Optional - } - return false -} - -// SSHOpt defines options describing secret mounts -type SSHOpt struct { - // ID of exposed ssh rule. Used for quering the value. - ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` - // UID of agent socket - Uid uint32 `protobuf:"varint,2,opt,name=uid,proto3" json:"uid,omitempty"` - // GID of agent socket - Gid uint32 `protobuf:"varint,3,opt,name=gid,proto3" json:"gid,omitempty"` - // Mode is the filesystem mode of agent socket - Mode uint32 `protobuf:"varint,4,opt,name=mode,proto3" json:"mode,omitempty"` - // Optional defines if ssh socket is required. Error is produced - // if client does not expose ssh. - Optional bool `protobuf:"varint,5,opt,name=optional,proto3" json:"optional,omitempty"` -} - -func (m *SSHOpt) Reset() { *m = SSHOpt{} } -func (m *SSHOpt) String() string { return proto.CompactTextString(m) } -func (*SSHOpt) ProtoMessage() {} -func (*SSHOpt) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{8} } - -func (m *SSHOpt) GetID() string { - if m != nil { - return m.ID - } - return "" -} - -func (m *SSHOpt) GetUid() uint32 { - if m != nil { - return m.Uid - } - return 0 -} - -func (m *SSHOpt) GetGid() uint32 { - if m != nil { - return m.Gid - } - return 0 -} - -func (m *SSHOpt) GetMode() uint32 { - if m != nil { - return m.Mode - } - return 0 -} - -func (m *SSHOpt) GetOptional() bool { - if m != nil { - return m.Optional - } - return false -} - -// CopyOp copies files across Ops. -type CopyOp struct { - Src []*CopySource `protobuf:"bytes,1,rep,name=src" json:"src,omitempty"` - Dest string `protobuf:"bytes,2,opt,name=dest,proto3" json:"dest,omitempty"` -} - -func (m *CopyOp) Reset() { *m = CopyOp{} } -func (m *CopyOp) String() string { return proto.CompactTextString(m) } -func (*CopyOp) ProtoMessage() {} -func (*CopyOp) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{9} } - -func (m *CopyOp) GetSrc() []*CopySource { - if m != nil { - return m.Src - } - return nil -} - -func (m *CopyOp) GetDest() string { - if m != nil { - return m.Dest - } - return "" -} - -// CopySource specifies a source for CopyOp. -type CopySource struct { - Input InputIndex `protobuf:"varint,1,opt,name=input,proto3,customtype=InputIndex" json:"input"` - Selector string `protobuf:"bytes,2,opt,name=selector,proto3" json:"selector,omitempty"` -} - -func (m *CopySource) Reset() { *m = CopySource{} } -func (m *CopySource) String() string { return proto.CompactTextString(m) } -func (*CopySource) ProtoMessage() {} -func (*CopySource) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{10} } - -func (m *CopySource) GetSelector() string { - if m != nil { - return m.Selector - } - return "" -} - -// SourceOp specifies a source such as build contexts and images. -type SourceOp struct { - // TODO: use source type or any type instead of URL protocol. - // identifier e.g. local://, docker-image://, git://, https://... - Identifier string `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"` - // attrs are defined in attr.go - Attrs map[string]string `protobuf:"bytes,2,rep,name=attrs" json:"attrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (m *SourceOp) Reset() { *m = SourceOp{} } -func (m *SourceOp) String() string { return proto.CompactTextString(m) } -func (*SourceOp) ProtoMessage() {} -func (*SourceOp) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{11} } - -func (m *SourceOp) GetIdentifier() string { - if m != nil { - return m.Identifier - } - return "" -} - -func (m *SourceOp) GetAttrs() map[string]string { - if m != nil { - return m.Attrs - } - return nil -} - -// BuildOp is used for nested build invocation. -// BuildOp is experimental and can break without backwards compatibility -type BuildOp struct { - Builder InputIndex `protobuf:"varint,1,opt,name=builder,proto3,customtype=InputIndex" json:"builder"` - Inputs map[string]*BuildInput `protobuf:"bytes,2,rep,name=inputs" json:"inputs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value"` - Def *Definition `protobuf:"bytes,3,opt,name=def" json:"def,omitempty"` - Attrs map[string]string `protobuf:"bytes,4,rep,name=attrs" json:"attrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (m *BuildOp) Reset() { *m = BuildOp{} } -func (m *BuildOp) String() string { return proto.CompactTextString(m) } -func (*BuildOp) ProtoMessage() {} -func (*BuildOp) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{12} } - -func (m *BuildOp) GetInputs() map[string]*BuildInput { - if m != nil { - return m.Inputs - } - return nil -} - -func (m *BuildOp) GetDef() *Definition { - if m != nil { - return m.Def - } - return nil -} - -func (m *BuildOp) GetAttrs() map[string]string { - if m != nil { - return m.Attrs - } - return nil -} - -// BuildInput is used for BuildOp. -type BuildInput struct { - Input InputIndex `protobuf:"varint,1,opt,name=input,proto3,customtype=InputIndex" json:"input"` -} - -func (m *BuildInput) Reset() { *m = BuildInput{} } -func (m *BuildInput) String() string { return proto.CompactTextString(m) } -func (*BuildInput) ProtoMessage() {} -func (*BuildInput) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{13} } - -// OpMetadata is a per-vertex metadata entry, which can be defined for arbitrary Op vertex and overridable on the run time. -type OpMetadata struct { - // ignore_cache specifies to ignore the cache for this Op. - IgnoreCache bool `protobuf:"varint,1,opt,name=ignore_cache,json=ignoreCache,proto3" json:"ignore_cache,omitempty"` - // Description can be used for keeping any text fields that builder doesn't parse - Description map[string]string `protobuf:"bytes,2,rep,name=description" json:"description,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // index 3 reserved for WorkerConstraint in previous versions - // WorkerConstraint worker_constraint = 3; - ExportCache *ExportCache `protobuf:"bytes,4,opt,name=export_cache,json=exportCache" json:"export_cache,omitempty"` - Caps map[github_com_moby_buildkit_util_apicaps.CapID]bool `protobuf:"bytes,5,rep,name=caps,castkey=github.com/moby/buildkit/util/apicaps.CapID" json:"caps" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` -} - -func (m *OpMetadata) Reset() { *m = OpMetadata{} } -func (m *OpMetadata) String() string { return proto.CompactTextString(m) } -func (*OpMetadata) ProtoMessage() {} -func (*OpMetadata) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{14} } - -func (m *OpMetadata) GetIgnoreCache() bool { - if m != nil { - return m.IgnoreCache - } - return false -} - -func (m *OpMetadata) GetDescription() map[string]string { - if m != nil { - return m.Description - } - return nil -} - -func (m *OpMetadata) GetExportCache() *ExportCache { - if m != nil { - return m.ExportCache - } - return nil -} - -func (m *OpMetadata) GetCaps() map[github_com_moby_buildkit_util_apicaps.CapID]bool { - if m != nil { - return m.Caps - } - return nil -} - -type ExportCache struct { - Value bool `protobuf:"varint,1,opt,name=Value,proto3" json:"Value,omitempty"` -} - -func (m *ExportCache) Reset() { *m = ExportCache{} } -func (m *ExportCache) String() string { return proto.CompactTextString(m) } -func (*ExportCache) ProtoMessage() {} -func (*ExportCache) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{15} } - -func (m *ExportCache) GetValue() bool { - if m != nil { - return m.Value - } - return false -} - -type ProxyEnv struct { - HttpProxy string `protobuf:"bytes,1,opt,name=http_proxy,json=httpProxy,proto3" json:"http_proxy,omitempty"` - HttpsProxy string `protobuf:"bytes,2,opt,name=https_proxy,json=httpsProxy,proto3" json:"https_proxy,omitempty"` - FtpProxy string `protobuf:"bytes,3,opt,name=ftp_proxy,json=ftpProxy,proto3" json:"ftp_proxy,omitempty"` - NoProxy string `protobuf:"bytes,4,opt,name=no_proxy,json=noProxy,proto3" json:"no_proxy,omitempty"` -} - -func (m *ProxyEnv) Reset() { *m = ProxyEnv{} } -func (m *ProxyEnv) String() string { return proto.CompactTextString(m) } -func (*ProxyEnv) ProtoMessage() {} -func (*ProxyEnv) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{16} } - -func (m *ProxyEnv) GetHttpProxy() string { - if m != nil { - return m.HttpProxy - } - return "" -} - -func (m *ProxyEnv) GetHttpsProxy() string { - if m != nil { - return m.HttpsProxy - } - return "" -} - -func (m *ProxyEnv) GetFtpProxy() string { - if m != nil { - return m.FtpProxy - } - return "" -} - -func (m *ProxyEnv) GetNoProxy() string { - if m != nil { - return m.NoProxy - } - return "" -} - -// WorkerConstraints defines conditions for the worker -type WorkerConstraints struct { - Filter []string `protobuf:"bytes,1,rep,name=filter" json:"filter,omitempty"` -} - -func (m *WorkerConstraints) Reset() { *m = WorkerConstraints{} } -func (m *WorkerConstraints) String() string { return proto.CompactTextString(m) } -func (*WorkerConstraints) ProtoMessage() {} -func (*WorkerConstraints) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{17} } - -func (m *WorkerConstraints) GetFilter() []string { - if m != nil { - return m.Filter - } - return nil -} - -// Definition is the LLB definition structure with per-vertex metadata entries -type Definition struct { - // def is a list of marshaled Op messages - Def [][]byte `protobuf:"bytes,1,rep,name=def" json:"def,omitempty"` - // metadata contains metadata for the each of the Op messages. - // A key must be an LLB op digest string. Currently, empty string is not expected as a key, but it may change in the future. - Metadata map[github_com_opencontainers_go_digest.Digest]OpMetadata `protobuf:"bytes,2,rep,name=metadata,castkey=github.com/opencontainers/go-digest.Digest" json:"metadata" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value"` -} - -func (m *Definition) Reset() { *m = Definition{} } -func (m *Definition) String() string { return proto.CompactTextString(m) } -func (*Definition) ProtoMessage() {} -func (*Definition) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{18} } - -func (m *Definition) GetDef() [][]byte { - if m != nil { - return m.Def - } - return nil -} - -func (m *Definition) GetMetadata() map[github_com_opencontainers_go_digest.Digest]OpMetadata { - if m != nil { - return m.Metadata - } - return nil -} - -type HostIP struct { - Host string `protobuf:"bytes,1,opt,name=Host,proto3" json:"Host,omitempty"` - IP string `protobuf:"bytes,2,opt,name=IP,proto3" json:"IP,omitempty"` -} - -func (m *HostIP) Reset() { *m = HostIP{} } -func (m *HostIP) String() string { return proto.CompactTextString(m) } -func (*HostIP) ProtoMessage() {} -func (*HostIP) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{19} } - -func (m *HostIP) GetHost() string { - if m != nil { - return m.Host - } - return "" -} - -func (m *HostIP) GetIP() string { - if m != nil { - return m.IP - } - return "" -} - -func init() { - proto.RegisterType((*Op)(nil), "pb.Op") - proto.RegisterType((*Platform)(nil), "pb.Platform") - proto.RegisterType((*Input)(nil), "pb.Input") - proto.RegisterType((*ExecOp)(nil), "pb.ExecOp") - proto.RegisterType((*Meta)(nil), "pb.Meta") - proto.RegisterType((*Mount)(nil), "pb.Mount") - proto.RegisterType((*CacheOpt)(nil), "pb.CacheOpt") - proto.RegisterType((*SecretOpt)(nil), "pb.SecretOpt") - proto.RegisterType((*SSHOpt)(nil), "pb.SSHOpt") - proto.RegisterType((*CopyOp)(nil), "pb.CopyOp") - proto.RegisterType((*CopySource)(nil), "pb.CopySource") - proto.RegisterType((*SourceOp)(nil), "pb.SourceOp") - proto.RegisterType((*BuildOp)(nil), "pb.BuildOp") - proto.RegisterType((*BuildInput)(nil), "pb.BuildInput") - proto.RegisterType((*OpMetadata)(nil), "pb.OpMetadata") - proto.RegisterType((*ExportCache)(nil), "pb.ExportCache") - proto.RegisterType((*ProxyEnv)(nil), "pb.ProxyEnv") - proto.RegisterType((*WorkerConstraints)(nil), "pb.WorkerConstraints") - proto.RegisterType((*Definition)(nil), "pb.Definition") - proto.RegisterType((*HostIP)(nil), "pb.HostIP") - proto.RegisterEnum("pb.NetMode", NetMode_name, NetMode_value) - proto.RegisterEnum("pb.MountType", MountType_name, MountType_value) - proto.RegisterEnum("pb.CacheSharingOpt", CacheSharingOpt_name, CacheSharingOpt_value) -} -func (m *Op) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Op) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Inputs) > 0 { - for _, msg := range m.Inputs { - dAtA[i] = 0xa - i++ - i = encodeVarintOps(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.Op != nil { - nn1, err := m.Op.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += nn1 - } - if m.Platform != nil { - dAtA[i] = 0x52 - i++ - i = encodeVarintOps(dAtA, i, uint64(m.Platform.Size())) - n2, err := m.Platform.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - } - if m.Constraints != nil { - dAtA[i] = 0x5a - i++ - i = encodeVarintOps(dAtA, i, uint64(m.Constraints.Size())) - n3, err := m.Constraints.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - } - return i, nil -} - -func (m *Op_Exec) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.Exec != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintOps(dAtA, i, uint64(m.Exec.Size())) - n4, err := m.Exec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - } - return i, nil -} -func (m *Op_Source) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.Source != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintOps(dAtA, i, uint64(m.Source.Size())) - n5, err := m.Source.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - } - return i, nil -} -func (m *Op_Copy) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.Copy != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintOps(dAtA, i, uint64(m.Copy.Size())) - n6, err := m.Copy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - } - return i, nil -} -func (m *Op_Build) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.Build != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintOps(dAtA, i, uint64(m.Build.Size())) - n7, err := m.Build.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - } - return i, nil -} -func (m *Platform) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Platform) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Architecture) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintOps(dAtA, i, uint64(len(m.Architecture))) - i += copy(dAtA[i:], m.Architecture) - } - if len(m.OS) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintOps(dAtA, i, uint64(len(m.OS))) - i += copy(dAtA[i:], m.OS) - } - if len(m.Variant) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintOps(dAtA, i, uint64(len(m.Variant))) - i += copy(dAtA[i:], m.Variant) - } - if len(m.OSVersion) > 0 { - dAtA[i] = 0x22 - i++ - i = encodeVarintOps(dAtA, i, uint64(len(m.OSVersion))) - i += copy(dAtA[i:], m.OSVersion) - } - if len(m.OSFeatures) > 0 { - for _, s := range m.OSFeatures { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *Input) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Input) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Digest) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintOps(dAtA, i, uint64(len(m.Digest))) - i += copy(dAtA[i:], m.Digest) - } - if m.Index != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintOps(dAtA, i, uint64(m.Index)) - } - return i, nil -} - -func (m *ExecOp) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExecOp) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Meta != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintOps(dAtA, i, uint64(m.Meta.Size())) - n8, err := m.Meta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - } - if len(m.Mounts) > 0 { - for _, msg := range m.Mounts { - dAtA[i] = 0x12 - i++ - i = encodeVarintOps(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.Network != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintOps(dAtA, i, uint64(m.Network)) - } - return i, nil -} - -func (m *Meta) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Meta) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Args) > 0 { - for _, s := range m.Args { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.Env) > 0 { - for _, s := range m.Env { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.Cwd) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintOps(dAtA, i, uint64(len(m.Cwd))) - i += copy(dAtA[i:], m.Cwd) - } - if len(m.User) > 0 { - dAtA[i] = 0x22 - i++ - i = encodeVarintOps(dAtA, i, uint64(len(m.User))) - i += copy(dAtA[i:], m.User) - } - if m.ProxyEnv != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintOps(dAtA, i, uint64(m.ProxyEnv.Size())) - n9, err := m.ProxyEnv.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - } - if len(m.ExtraHosts) > 0 { - for _, msg := range m.ExtraHosts { - dAtA[i] = 0x32 - i++ - i = encodeVarintOps(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *Mount) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Mount) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Input != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintOps(dAtA, i, uint64(m.Input)) - } - if len(m.Selector) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintOps(dAtA, i, uint64(len(m.Selector))) - i += copy(dAtA[i:], m.Selector) - } - if len(m.Dest) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintOps(dAtA, i, uint64(len(m.Dest))) - i += copy(dAtA[i:], m.Dest) - } - if m.Output != 0 { - dAtA[i] = 0x20 - i++ - i = encodeVarintOps(dAtA, i, uint64(m.Output)) - } - if m.Readonly { - dAtA[i] = 0x28 - i++ - if m.Readonly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.MountType != 0 { - dAtA[i] = 0x30 - i++ - i = encodeVarintOps(dAtA, i, uint64(m.MountType)) - } - if m.CacheOpt != nil { - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintOps(dAtA, i, uint64(m.CacheOpt.Size())) - n10, err := m.CacheOpt.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - } - if m.SecretOpt != nil { - dAtA[i] = 0xaa - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintOps(dAtA, i, uint64(m.SecretOpt.Size())) - n11, err := m.SecretOpt.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 - } - if m.SSHOpt != nil { - dAtA[i] = 0xb2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintOps(dAtA, i, uint64(m.SSHOpt.Size())) - n12, err := m.SSHOpt.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 - } - return i, nil -} - -func (m *CacheOpt) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CacheOpt) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.ID) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintOps(dAtA, i, uint64(len(m.ID))) - i += copy(dAtA[i:], m.ID) - } - if m.Sharing != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintOps(dAtA, i, uint64(m.Sharing)) - } - return i, nil -} - -func (m *SecretOpt) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SecretOpt) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.ID) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintOps(dAtA, i, uint64(len(m.ID))) - i += copy(dAtA[i:], m.ID) - } - if m.Uid != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintOps(dAtA, i, uint64(m.Uid)) - } - if m.Gid != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintOps(dAtA, i, uint64(m.Gid)) - } - if m.Mode != 0 { - dAtA[i] = 0x20 - i++ - i = encodeVarintOps(dAtA, i, uint64(m.Mode)) - } - if m.Optional { - dAtA[i] = 0x28 - i++ - if m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - return i, nil -} - -func (m *SSHOpt) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SSHOpt) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.ID) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintOps(dAtA, i, uint64(len(m.ID))) - i += copy(dAtA[i:], m.ID) - } - if m.Uid != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintOps(dAtA, i, uint64(m.Uid)) - } - if m.Gid != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintOps(dAtA, i, uint64(m.Gid)) - } - if m.Mode != 0 { - dAtA[i] = 0x20 - i++ - i = encodeVarintOps(dAtA, i, uint64(m.Mode)) - } - if m.Optional { - dAtA[i] = 0x28 - i++ - if m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - return i, nil -} - -func (m *CopyOp) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CopyOp) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Src) > 0 { - for _, msg := range m.Src { - dAtA[i] = 0xa - i++ - i = encodeVarintOps(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Dest) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintOps(dAtA, i, uint64(len(m.Dest))) - i += copy(dAtA[i:], m.Dest) - } - return i, nil -} - -func (m *CopySource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CopySource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Input != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintOps(dAtA, i, uint64(m.Input)) - } - if len(m.Selector) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintOps(dAtA, i, uint64(len(m.Selector))) - i += copy(dAtA[i:], m.Selector) - } - return i, nil -} - -func (m *SourceOp) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SourceOp) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Identifier) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintOps(dAtA, i, uint64(len(m.Identifier))) - i += copy(dAtA[i:], m.Identifier) - } - if len(m.Attrs) > 0 { - keysForAttrs := make([]string, 0, len(m.Attrs)) - for k, _ := range m.Attrs { - keysForAttrs = append(keysForAttrs, string(k)) - } - sortkeys.Strings(keysForAttrs) - for _, k := range keysForAttrs { - dAtA[i] = 0x12 - i++ - v := m.Attrs[string(k)] - mapSize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + len(v) + sovOps(uint64(len(v))) - i = encodeVarintOps(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintOps(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintOps(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - return i, nil -} - -func (m *BuildOp) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *BuildOp) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Builder != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintOps(dAtA, i, uint64(m.Builder)) - } - if len(m.Inputs) > 0 { - keysForInputs := make([]string, 0, len(m.Inputs)) - for k, _ := range m.Inputs { - keysForInputs = append(keysForInputs, string(k)) - } - sortkeys.Strings(keysForInputs) - for _, k := range keysForInputs { - dAtA[i] = 0x12 - i++ - v := m.Inputs[string(k)] - msgSize := 0 - if v != nil { - msgSize = v.Size() - msgSize += 1 + sovOps(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovOps(uint64(len(k))) + msgSize - i = encodeVarintOps(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintOps(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - if v != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintOps(dAtA, i, uint64(v.Size())) - n13, err := v.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 - } - } - } - if m.Def != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintOps(dAtA, i, uint64(m.Def.Size())) - n14, err := m.Def.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n14 - } - if len(m.Attrs) > 0 { - keysForAttrs := make([]string, 0, len(m.Attrs)) - for k, _ := range m.Attrs { - keysForAttrs = append(keysForAttrs, string(k)) - } - sortkeys.Strings(keysForAttrs) - for _, k := range keysForAttrs { - dAtA[i] = 0x22 - i++ - v := m.Attrs[string(k)] - mapSize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + len(v) + sovOps(uint64(len(v))) - i = encodeVarintOps(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintOps(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintOps(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - return i, nil -} - -func (m *BuildInput) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *BuildInput) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Input != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintOps(dAtA, i, uint64(m.Input)) - } - return i, nil -} - -func (m *OpMetadata) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *OpMetadata) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.IgnoreCache { - dAtA[i] = 0x8 - i++ - if m.IgnoreCache { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if len(m.Description) > 0 { - keysForDescription := make([]string, 0, len(m.Description)) - for k, _ := range m.Description { - keysForDescription = append(keysForDescription, string(k)) - } - sortkeys.Strings(keysForDescription) - for _, k := range keysForDescription { - dAtA[i] = 0x12 - i++ - v := m.Description[string(k)] - mapSize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + len(v) + sovOps(uint64(len(v))) - i = encodeVarintOps(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintOps(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintOps(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if m.ExportCache != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintOps(dAtA, i, uint64(m.ExportCache.Size())) - n15, err := m.ExportCache.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n15 - } - if len(m.Caps) > 0 { - keysForCaps := make([]string, 0, len(m.Caps)) - for k, _ := range m.Caps { - keysForCaps = append(keysForCaps, string(k)) - } - sortkeys.Strings(keysForCaps) - for _, k := range keysForCaps { - dAtA[i] = 0x2a - i++ - v := m.Caps[github_com_moby_buildkit_util_apicaps.CapID(k)] - mapSize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + 1 - i = encodeVarintOps(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintOps(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x10 - i++ - if v { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - } - return i, nil -} - -func (m *ExportCache) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExportCache) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Value { - dAtA[i] = 0x8 - i++ - if m.Value { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - return i, nil -} - -func (m *ProxyEnv) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ProxyEnv) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.HttpProxy) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintOps(dAtA, i, uint64(len(m.HttpProxy))) - i += copy(dAtA[i:], m.HttpProxy) - } - if len(m.HttpsProxy) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintOps(dAtA, i, uint64(len(m.HttpsProxy))) - i += copy(dAtA[i:], m.HttpsProxy) - } - if len(m.FtpProxy) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintOps(dAtA, i, uint64(len(m.FtpProxy))) - i += copy(dAtA[i:], m.FtpProxy) - } - if len(m.NoProxy) > 0 { - dAtA[i] = 0x22 - i++ - i = encodeVarintOps(dAtA, i, uint64(len(m.NoProxy))) - i += copy(dAtA[i:], m.NoProxy) - } - return i, nil -} - -func (m *WorkerConstraints) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WorkerConstraints) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Filter) > 0 { - for _, s := range m.Filter { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *Definition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Definition) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Def) > 0 { - for _, b := range m.Def { - dAtA[i] = 0xa - i++ - i = encodeVarintOps(dAtA, i, uint64(len(b))) - i += copy(dAtA[i:], b) - } - } - if len(m.Metadata) > 0 { - keysForMetadata := make([]string, 0, len(m.Metadata)) - for k, _ := range m.Metadata { - keysForMetadata = append(keysForMetadata, string(k)) - } - sortkeys.Strings(keysForMetadata) - for _, k := range keysForMetadata { - dAtA[i] = 0x12 - i++ - v := m.Metadata[github_com_opencontainers_go_digest.Digest(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovOps(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovOps(uint64(len(k))) + msgSize - i = encodeVarintOps(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintOps(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintOps(dAtA, i, uint64((&v).Size())) - n16, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n16 - } - } - return i, nil -} - -func (m *HostIP) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HostIP) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Host) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintOps(dAtA, i, uint64(len(m.Host))) - i += copy(dAtA[i:], m.Host) - } - if len(m.IP) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintOps(dAtA, i, uint64(len(m.IP))) - i += copy(dAtA[i:], m.IP) - } - return i, nil -} - -func encodeVarintOps(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *Op) Size() (n int) { - var l int - _ = l - if len(m.Inputs) > 0 { - for _, e := range m.Inputs { - l = e.Size() - n += 1 + l + sovOps(uint64(l)) - } - } - if m.Op != nil { - n += m.Op.Size() - } - if m.Platform != nil { - l = m.Platform.Size() - n += 1 + l + sovOps(uint64(l)) - } - if m.Constraints != nil { - l = m.Constraints.Size() - n += 1 + l + sovOps(uint64(l)) - } - return n -} - -func (m *Op_Exec) Size() (n int) { - var l int - _ = l - if m.Exec != nil { - l = m.Exec.Size() - n += 1 + l + sovOps(uint64(l)) - } - return n -} -func (m *Op_Source) Size() (n int) { - var l int - _ = l - if m.Source != nil { - l = m.Source.Size() - n += 1 + l + sovOps(uint64(l)) - } - return n -} -func (m *Op_Copy) Size() (n int) { - var l int - _ = l - if m.Copy != nil { - l = m.Copy.Size() - n += 1 + l + sovOps(uint64(l)) - } - return n -} -func (m *Op_Build) Size() (n int) { - var l int - _ = l - if m.Build != nil { - l = m.Build.Size() - n += 1 + l + sovOps(uint64(l)) - } - return n -} -func (m *Platform) Size() (n int) { - var l int - _ = l - l = len(m.Architecture) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - l = len(m.OS) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - l = len(m.Variant) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - l = len(m.OSVersion) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - if len(m.OSFeatures) > 0 { - for _, s := range m.OSFeatures { - l = len(s) - n += 1 + l + sovOps(uint64(l)) - } - } - return n -} - -func (m *Input) Size() (n int) { - var l int - _ = l - l = len(m.Digest) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - if m.Index != 0 { - n += 1 + sovOps(uint64(m.Index)) - } - return n -} - -func (m *ExecOp) Size() (n int) { - var l int - _ = l - if m.Meta != nil { - l = m.Meta.Size() - n += 1 + l + sovOps(uint64(l)) - } - if len(m.Mounts) > 0 { - for _, e := range m.Mounts { - l = e.Size() - n += 1 + l + sovOps(uint64(l)) - } - } - if m.Network != 0 { - n += 1 + sovOps(uint64(m.Network)) - } - return n -} - -func (m *Meta) Size() (n int) { - var l int - _ = l - if len(m.Args) > 0 { - for _, s := range m.Args { - l = len(s) - n += 1 + l + sovOps(uint64(l)) - } - } - if len(m.Env) > 0 { - for _, s := range m.Env { - l = len(s) - n += 1 + l + sovOps(uint64(l)) - } - } - l = len(m.Cwd) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - l = len(m.User) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - if m.ProxyEnv != nil { - l = m.ProxyEnv.Size() - n += 1 + l + sovOps(uint64(l)) - } - if len(m.ExtraHosts) > 0 { - for _, e := range m.ExtraHosts { - l = e.Size() - n += 1 + l + sovOps(uint64(l)) - } - } - return n -} - -func (m *Mount) Size() (n int) { - var l int - _ = l - if m.Input != 0 { - n += 1 + sovOps(uint64(m.Input)) - } - l = len(m.Selector) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - l = len(m.Dest) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - if m.Output != 0 { - n += 1 + sovOps(uint64(m.Output)) - } - if m.Readonly { - n += 2 - } - if m.MountType != 0 { - n += 1 + sovOps(uint64(m.MountType)) - } - if m.CacheOpt != nil { - l = m.CacheOpt.Size() - n += 2 + l + sovOps(uint64(l)) - } - if m.SecretOpt != nil { - l = m.SecretOpt.Size() - n += 2 + l + sovOps(uint64(l)) - } - if m.SSHOpt != nil { - l = m.SSHOpt.Size() - n += 2 + l + sovOps(uint64(l)) - } - return n -} - -func (m *CacheOpt) Size() (n int) { - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - if m.Sharing != 0 { - n += 1 + sovOps(uint64(m.Sharing)) - } - return n -} - -func (m *SecretOpt) Size() (n int) { - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - if m.Uid != 0 { - n += 1 + sovOps(uint64(m.Uid)) - } - if m.Gid != 0 { - n += 1 + sovOps(uint64(m.Gid)) - } - if m.Mode != 0 { - n += 1 + sovOps(uint64(m.Mode)) - } - if m.Optional { - n += 2 - } - return n -} - -func (m *SSHOpt) Size() (n int) { - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - if m.Uid != 0 { - n += 1 + sovOps(uint64(m.Uid)) - } - if m.Gid != 0 { - n += 1 + sovOps(uint64(m.Gid)) - } - if m.Mode != 0 { - n += 1 + sovOps(uint64(m.Mode)) - } - if m.Optional { - n += 2 - } - return n -} - -func (m *CopyOp) Size() (n int) { - var l int - _ = l - if len(m.Src) > 0 { - for _, e := range m.Src { - l = e.Size() - n += 1 + l + sovOps(uint64(l)) - } - } - l = len(m.Dest) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - return n -} - -func (m *CopySource) Size() (n int) { - var l int - _ = l - if m.Input != 0 { - n += 1 + sovOps(uint64(m.Input)) - } - l = len(m.Selector) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - return n -} - -func (m *SourceOp) Size() (n int) { - var l int - _ = l - l = len(m.Identifier) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - if len(m.Attrs) > 0 { - for k, v := range m.Attrs { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + len(v) + sovOps(uint64(len(v))) - n += mapEntrySize + 1 + sovOps(uint64(mapEntrySize)) - } - } - return n -} - -func (m *BuildOp) Size() (n int) { - var l int - _ = l - if m.Builder != 0 { - n += 1 + sovOps(uint64(m.Builder)) - } - if len(m.Inputs) > 0 { - for k, v := range m.Inputs { - _ = k - _ = v - l = 0 - if v != nil { - l = v.Size() - l += 1 + sovOps(uint64(l)) - } - mapEntrySize := 1 + len(k) + sovOps(uint64(len(k))) + l - n += mapEntrySize + 1 + sovOps(uint64(mapEntrySize)) - } - } - if m.Def != nil { - l = m.Def.Size() - n += 1 + l + sovOps(uint64(l)) - } - if len(m.Attrs) > 0 { - for k, v := range m.Attrs { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + len(v) + sovOps(uint64(len(v))) - n += mapEntrySize + 1 + sovOps(uint64(mapEntrySize)) - } - } - return n -} - -func (m *BuildInput) Size() (n int) { - var l int - _ = l - if m.Input != 0 { - n += 1 + sovOps(uint64(m.Input)) - } - return n -} - -func (m *OpMetadata) Size() (n int) { - var l int - _ = l - if m.IgnoreCache { - n += 2 - } - if len(m.Description) > 0 { - for k, v := range m.Description { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + len(v) + sovOps(uint64(len(v))) - n += mapEntrySize + 1 + sovOps(uint64(mapEntrySize)) - } - } - if m.ExportCache != nil { - l = m.ExportCache.Size() - n += 1 + l + sovOps(uint64(l)) - } - if len(m.Caps) > 0 { - for k, v := range m.Caps { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + 1 - n += mapEntrySize + 1 + sovOps(uint64(mapEntrySize)) - } - } - return n -} - -func (m *ExportCache) Size() (n int) { - var l int - _ = l - if m.Value { - n += 2 - } - return n -} - -func (m *ProxyEnv) Size() (n int) { - var l int - _ = l - l = len(m.HttpProxy) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - l = len(m.HttpsProxy) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - l = len(m.FtpProxy) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - l = len(m.NoProxy) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - return n -} - -func (m *WorkerConstraints) Size() (n int) { - var l int - _ = l - if len(m.Filter) > 0 { - for _, s := range m.Filter { - l = len(s) - n += 1 + l + sovOps(uint64(l)) - } - } - return n -} - -func (m *Definition) Size() (n int) { - var l int - _ = l - if len(m.Def) > 0 { - for _, b := range m.Def { - l = len(b) - n += 1 + l + sovOps(uint64(l)) - } - } - if len(m.Metadata) > 0 { - for k, v := range m.Metadata { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + l + sovOps(uint64(l)) - n += mapEntrySize + 1 + sovOps(uint64(mapEntrySize)) - } - } - return n -} - -func (m *HostIP) Size() (n int) { - var l int - _ = l - l = len(m.Host) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - l = len(m.IP) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - return n -} - -func sovOps(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozOps(x uint64) (n int) { - return sovOps(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Op) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Op: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Op: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Inputs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Inputs = append(m.Inputs, &Input{}) - if err := m.Inputs[len(m.Inputs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Exec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &ExecOp{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Op = &Op_Exec{v} - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &SourceOp{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Op = &Op_Source{v} - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Copy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &CopyOp{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Op = &Op_Copy{v} - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Build", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &BuildOp{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Op = &Op_Build{v} - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Platform", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Platform == nil { - m.Platform = &Platform{} - } - if err := m.Platform.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Constraints", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Constraints == nil { - m.Constraints = &WorkerConstraints{} - } - if err := m.Constraints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Platform) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Platform: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Platform: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Architecture", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Architecture = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OS", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.OS = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Variant", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Variant = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OSVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.OSVersion = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OSFeatures", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.OSFeatures = append(m.OSFeatures, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Input) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Input: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Input: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) - } - m.Index = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Index |= (OutputIndex(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExecOp) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExecOp: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExecOp: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Meta == nil { - m.Meta = &Meta{} - } - if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Mounts", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Mounts = append(m.Mounts, &Mount{}) - if err := m.Mounts[len(m.Mounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) - } - m.Network = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Network |= (NetMode(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Meta) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Meta: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Meta: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Env = append(m.Env, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cwd", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Cwd = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.User = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProxyEnv", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ProxyEnv == nil { - m.ProxyEnv = &ProxyEnv{} - } - if err := m.ProxyEnv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExtraHosts", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExtraHosts = append(m.ExtraHosts, &HostIP{}) - if err := m.ExtraHosts[len(m.ExtraHosts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Mount) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Mount: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Mount: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Input", wireType) - } - m.Input = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Input |= (InputIndex(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Selector = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Dest", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Dest = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Output", wireType) - } - m.Output = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Output |= (OutputIndex(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Readonly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Readonly = bool(v != 0) - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MountType", wireType) - } - m.MountType = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MountType |= (MountType(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 20: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CacheOpt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CacheOpt == nil { - m.CacheOpt = &CacheOpt{} - } - if err := m.CacheOpt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 21: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretOpt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretOpt == nil { - m.SecretOpt = &SecretOpt{} - } - if err := m.SecretOpt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 22: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SSHOpt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SSHOpt == nil { - m.SSHOpt = &SSHOpt{} - } - if err := m.SSHOpt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CacheOpt) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CacheOpt: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CacheOpt: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Sharing", wireType) - } - m.Sharing = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Sharing |= (CacheSharingOpt(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SecretOpt) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SecretOpt: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SecretOpt: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType) - } - m.Uid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Uid |= (uint32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Gid", wireType) - } - m.Gid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Gid |= (uint32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) - } - m.Mode = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Mode |= (uint32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Optional = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SSHOpt) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SSHOpt: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SSHOpt: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType) - } - m.Uid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Uid |= (uint32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Gid", wireType) - } - m.Gid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Gid |= (uint32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) - } - m.Mode = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Mode |= (uint32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Optional = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CopyOp) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CopyOp: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CopyOp: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Src", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Src = append(m.Src, &CopySource{}) - if err := m.Src[len(m.Src)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Dest", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Dest = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CopySource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CopySource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CopySource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Input", wireType) - } - m.Input = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Input |= (InputIndex(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Selector = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SourceOp) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SourceOp: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SourceOp: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Identifier", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Identifier = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attrs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Attrs == nil { - m.Attrs = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthOps - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthOps - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Attrs[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *BuildOp) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BuildOp: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BuildOp: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Builder", wireType) - } - m.Builder = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Builder |= (InputIndex(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Inputs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Inputs == nil { - m.Inputs = make(map[string]*BuildInput) - } - var mapkey string - var mapvalue *BuildInput - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthOps - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthOps - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthOps - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &BuildInput{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Inputs[mapkey] = mapvalue - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Def", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Def == nil { - m.Def = &Definition{} - } - if err := m.Def.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attrs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Attrs == nil { - m.Attrs = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthOps - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthOps - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Attrs[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *BuildInput) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BuildInput: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BuildInput: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Input", wireType) - } - m.Input = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Input |= (InputIndex(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *OpMetadata) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: OpMetadata: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: OpMetadata: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IgnoreCache", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.IgnoreCache = bool(v != 0) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Description == nil { - m.Description = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthOps - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthOps - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Description[mapkey] = mapvalue - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExportCache", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ExportCache == nil { - m.ExportCache = &ExportCache{} - } - if err := m.ExportCache.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Caps", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Caps == nil { - m.Caps = make(map[github_com_moby_buildkit_util_apicaps.CapID]bool) - } - var mapkey github_com_moby_buildkit_util_apicaps.CapID - var mapvalue bool - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthOps - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = github_com_moby_buildkit_util_apicaps.CapID(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapvaluetemp int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapvaluetemp |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - mapvalue = bool(mapvaluetemp != 0) - } else { - iNdEx = entryPreIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Caps[github_com_moby_buildkit_util_apicaps.CapID(mapkey)] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExportCache) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExportCache: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExportCache: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Value = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ProxyEnv) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ProxyEnv: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ProxyEnv: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HttpProxy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.HttpProxy = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HttpsProxy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.HttpsProxy = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FtpProxy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FtpProxy = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NoProxy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.NoProxy = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WorkerConstraints) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WorkerConstraints: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WorkerConstraints: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Filter = append(m.Filter, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Definition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Definition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Definition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Def", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Def = append(m.Def, make([]byte, postIndex-iNdEx)) - copy(m.Def[len(m.Def)-1], dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Metadata == nil { - m.Metadata = make(map[github_com_opencontainers_go_digest.Digest]OpMetadata) - } - var mapkey github_com_opencontainers_go_digest.Digest - mapvalue := &OpMetadata{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthOps - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthOps - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthOps - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &OpMetadata{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Metadata[github_com_opencontainers_go_digest.Digest(mapkey)] = *mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HostIP) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HostIP: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HostIP: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Host = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IP = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipOps(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowOps - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowOps - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowOps - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthOps - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowOps - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipOps(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthOps = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowOps = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("ops.proto", fileDescriptorOps) } - -var fileDescriptorOps = []byte{ - // 1444 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0x4b, 0x6f, 0x1b, 0x47, - 0x12, 0x16, 0x87, 0xcf, 0x29, 0x4a, 0x32, 0xb7, 0xfd, 0x58, 0xae, 0xd6, 0x2b, 0x69, 0xc7, 0xbb, - 0x81, 0x2c, 0x59, 0x14, 0x40, 0x03, 0xb6, 0x91, 0x83, 0x11, 0xf1, 0x61, 0x88, 0x71, 0x24, 0x0a, - 0x4d, 0x45, 0x39, 0x1a, 0xa3, 0x61, 0x93, 0x1a, 0x88, 0x9a, 0x1e, 0xcc, 0x34, 0x6d, 0xf1, 0x92, - 0x83, 0x7f, 0x41, 0x80, 0x00, 0xb9, 0xe7, 0x98, 0x1f, 0x91, 0xbb, 0x8f, 0x41, 0x4e, 0x49, 0x0e, - 0x4e, 0xa0, 0xfc, 0x91, 0xa0, 0xaa, 0x7b, 0x38, 0xe3, 0x47, 0x10, 0x1b, 0x09, 0x72, 0x62, 0x75, - 0xd5, 0xd7, 0x5f, 0xd7, 0xab, 0xbb, 0x86, 0x60, 0xcb, 0x30, 0x6e, 0x84, 0x91, 0x54, 0x92, 0x59, - 0xe1, 0xc9, 0xca, 0xf6, 0xd8, 0x57, 0xa7, 0xd3, 0x93, 0x86, 0x27, 0xcf, 0x77, 0xc6, 0x72, 0x2c, - 0x77, 0xc8, 0x74, 0x32, 0x1d, 0xd1, 0x8a, 0x16, 0x24, 0xe9, 0x2d, 0xce, 0xd7, 0x16, 0x58, 0xfd, - 0x90, 0xfd, 0x17, 0x4a, 0x7e, 0x10, 0x4e, 0x55, 0x5c, 0xcf, 0xad, 0xe7, 0x37, 0xaa, 0x4d, 0xbb, - 0x11, 0x9e, 0x34, 0x7a, 0xa8, 0xe1, 0xc6, 0xc0, 0xd6, 0xa1, 0x20, 0x2e, 0x84, 0x57, 0xb7, 0xd6, - 0x73, 0x1b, 0xd5, 0x26, 0x20, 0xa0, 0x7b, 0x21, 0xbc, 0x7e, 0xb8, 0xb7, 0xc0, 0xc9, 0xc2, 0x3e, - 0x80, 0x52, 0x2c, 0xa7, 0x91, 0x27, 0xea, 0x79, 0xc2, 0x2c, 0x22, 0x66, 0x40, 0x1a, 0x42, 0x19, - 0x2b, 0x32, 0x79, 0x32, 0x9c, 0xd5, 0x0b, 0x29, 0x53, 0x5b, 0x86, 0x33, 0xcd, 0x84, 0x16, 0x76, - 0x0b, 0x8a, 0x27, 0x53, 0x7f, 0x32, 0xac, 0x17, 0x09, 0x52, 0x45, 0x48, 0x0b, 0x15, 0x84, 0xd1, - 0x36, 0xb6, 0x01, 0x95, 0x70, 0xe2, 0xaa, 0x91, 0x8c, 0xce, 0xeb, 0x90, 0x1e, 0x78, 0x68, 0x74, - 0x7c, 0x6e, 0x65, 0xf7, 0xa1, 0xea, 0xc9, 0x20, 0x56, 0x91, 0xeb, 0x07, 0x2a, 0xae, 0x57, 0x09, - 0x7c, 0x1d, 0xc1, 0x9f, 0xc9, 0xe8, 0x4c, 0x44, 0xed, 0xd4, 0xc8, 0xb3, 0xc8, 0x56, 0x01, 0x2c, - 0x19, 0x3a, 0x5f, 0xe5, 0xa0, 0x92, 0xb0, 0x32, 0x07, 0x16, 0x77, 0x23, 0xef, 0xd4, 0x57, 0xc2, - 0x53, 0xd3, 0x48, 0xd4, 0x73, 0xeb, 0xb9, 0x0d, 0x9b, 0xbf, 0xa2, 0x63, 0xcb, 0x60, 0xf5, 0x07, - 0x94, 0x28, 0x9b, 0x5b, 0xfd, 0x01, 0xab, 0x43, 0xf9, 0xd8, 0x8d, 0x7c, 0x37, 0x50, 0x94, 0x19, - 0x9b, 0x27, 0x4b, 0x76, 0x13, 0xec, 0xfe, 0xe0, 0x58, 0x44, 0xb1, 0x2f, 0x03, 0xca, 0x87, 0xcd, - 0x53, 0x05, 0x5b, 0x05, 0xe8, 0x0f, 0x1e, 0x09, 0x17, 0x49, 0xe3, 0x7a, 0x71, 0x3d, 0xbf, 0x61, - 0xf3, 0x8c, 0xc6, 0xf9, 0x1c, 0x8a, 0x54, 0x23, 0xf6, 0x31, 0x94, 0x86, 0xfe, 0x58, 0xc4, 0x4a, - 0xbb, 0xd3, 0x6a, 0xbe, 0x78, 0xb9, 0xb6, 0xf0, 0xd3, 0xcb, 0xb5, 0xcd, 0x4c, 0x33, 0xc8, 0x50, - 0x04, 0x9e, 0x0c, 0x94, 0xeb, 0x07, 0x22, 0x8a, 0x77, 0xc6, 0x72, 0x5b, 0x6f, 0x69, 0x74, 0xe8, - 0x87, 0x1b, 0x06, 0x76, 0x1b, 0x8a, 0x7e, 0x30, 0x14, 0x17, 0xe4, 0x7f, 0xbe, 0x75, 0xd5, 0x50, - 0x55, 0xfb, 0x53, 0x15, 0x4e, 0x55, 0x0f, 0x4d, 0x5c, 0x23, 0x9c, 0x10, 0x4a, 0xba, 0x05, 0xd8, - 0x4d, 0x28, 0x9c, 0x0b, 0xe5, 0xd2, 0xf1, 0xd5, 0x66, 0x05, 0x53, 0xbb, 0x2f, 0x94, 0xcb, 0x49, - 0x8b, 0xdd, 0x75, 0x2e, 0xa7, 0x98, 0x7a, 0x2b, 0xed, 0xae, 0x7d, 0xd4, 0x70, 0x63, 0x60, 0xff, - 0x87, 0x72, 0x20, 0xd4, 0x33, 0x19, 0x9d, 0x51, 0x8a, 0x96, 0x75, 0xcd, 0x0f, 0x84, 0xda, 0x97, - 0x43, 0xc1, 0x13, 0x9b, 0xf3, 0x4d, 0x0e, 0x0a, 0x48, 0xcc, 0x18, 0x14, 0xdc, 0x68, 0xac, 0xdb, - 0xd5, 0xe6, 0x24, 0xb3, 0x1a, 0xe4, 0x45, 0xf0, 0x94, 0xce, 0xb0, 0x39, 0x8a, 0xa8, 0xf1, 0x9e, - 0x0d, 0x4d, 0xd2, 0x51, 0xc4, 0x7d, 0xd3, 0x58, 0x44, 0x26, 0xd7, 0x24, 0xb3, 0xdb, 0x60, 0x87, - 0x91, 0xbc, 0x98, 0x3d, 0xc1, 0xdd, 0xc5, 0x4c, 0x27, 0xa1, 0xb2, 0x1b, 0x3c, 0xe5, 0x95, 0xd0, - 0x48, 0x6c, 0x13, 0x40, 0x5c, 0xa8, 0xc8, 0xdd, 0x93, 0xb1, 0x8a, 0xeb, 0x25, 0x8a, 0x86, 0x1a, - 0x18, 0x15, 0xbd, 0x43, 0x9e, 0xb1, 0x3a, 0xdf, 0x5b, 0x50, 0xa4, 0x20, 0xd9, 0x06, 0xa6, 0x34, - 0x9c, 0xea, 0xea, 0xe4, 0x5b, 0xcc, 0xa4, 0x14, 0xa8, 0x78, 0xf3, 0x8c, 0x62, 0x21, 0x57, 0xa0, - 0x12, 0x8b, 0x89, 0xf0, 0x94, 0x8c, 0x4c, 0xff, 0xcc, 0xd7, 0xe8, 0xfa, 0x10, 0x4b, 0xac, 0xa3, - 0x21, 0x99, 0x6d, 0x41, 0x49, 0x52, 0x5d, 0x28, 0xa0, 0xdf, 0xa9, 0x96, 0x81, 0x20, 0x79, 0x24, - 0xdc, 0xa1, 0x0c, 0x26, 0x33, 0x0a, 0xb3, 0xc2, 0xe7, 0x6b, 0xb6, 0x05, 0x36, 0x55, 0xe2, 0x68, - 0x16, 0x8a, 0x7a, 0x89, 0x2a, 0xb0, 0x34, 0xaf, 0x12, 0x2a, 0x79, 0x6a, 0xc7, 0x9b, 0xe7, 0xb9, - 0xde, 0xa9, 0xe8, 0x87, 0xaa, 0x7e, 0x2d, 0xcd, 0x57, 0xdb, 0xe8, 0xf8, 0xdc, 0x8a, 0xb4, 0xb1, - 0xf0, 0x22, 0xa1, 0x10, 0x7a, 0x9d, 0xa0, 0x44, 0x3b, 0x48, 0x94, 0x3c, 0xb5, 0x33, 0x07, 0x4a, - 0x83, 0xc1, 0x1e, 0x22, 0x6f, 0xa4, 0x2f, 0x83, 0xd6, 0x70, 0x63, 0x71, 0x7a, 0x50, 0x49, 0x8e, - 0xc1, 0x6b, 0xd6, 0xeb, 0x98, 0x0b, 0x68, 0xf5, 0x3a, 0x6c, 0x1b, 0xca, 0xf1, 0xa9, 0x1b, 0xf9, - 0xc1, 0x98, 0x72, 0xb7, 0xdc, 0xbc, 0x3a, 0xf7, 0x6a, 0xa0, 0xf5, 0xc8, 0x94, 0x60, 0x1c, 0x09, - 0xf6, 0xdc, 0x8d, 0x37, 0xb8, 0x6a, 0x90, 0x9f, 0xfa, 0x43, 0xe2, 0x59, 0xe2, 0x28, 0xa2, 0x66, - 0xec, 0xeb, 0x5e, 0x5a, 0xe2, 0x28, 0x62, 0x41, 0xce, 0xe5, 0x50, 0x50, 0xea, 0x97, 0x38, 0xc9, - 0x98, 0x63, 0x19, 0x2a, 0x5f, 0x06, 0xee, 0x24, 0xc9, 0x71, 0xb2, 0x76, 0x26, 0x49, 0x7c, 0x7f, - 0xcb, 0x69, 0x0f, 0xa1, 0xa4, 0x5f, 0x55, 0xb6, 0x0e, 0xf9, 0x38, 0xf2, 0xcc, 0xcb, 0xbe, 0x9c, - 0x3c, 0xb7, 0xfa, 0x61, 0xe6, 0x68, 0x9a, 0xb7, 0x96, 0x95, 0xb6, 0x96, 0xc3, 0x01, 0x52, 0xd8, - 0x5f, 0xd3, 0xc2, 0xce, 0x97, 0x39, 0xa8, 0x24, 0x03, 0x01, 0x5f, 0x37, 0x7f, 0x28, 0x02, 0xe5, - 0x8f, 0x7c, 0x11, 0x99, 0x64, 0x64, 0x34, 0x6c, 0x1b, 0x8a, 0xae, 0x52, 0x51, 0xf2, 0x68, 0xfc, - 0x33, 0x3b, 0x4d, 0x1a, 0xbb, 0x68, 0xe9, 0x06, 0x2a, 0x9a, 0x71, 0x8d, 0x5a, 0x79, 0x00, 0x90, - 0x2a, 0x31, 0x7f, 0x67, 0x62, 0x66, 0x58, 0x51, 0x64, 0xd7, 0xa0, 0xf8, 0xd4, 0x9d, 0x4c, 0x85, - 0x71, 0x4a, 0x2f, 0x3e, 0xb4, 0x1e, 0xe4, 0x9c, 0x6f, 0x2d, 0x28, 0x9b, 0xe9, 0xc2, 0xee, 0x40, - 0x99, 0xa6, 0x8b, 0xf1, 0xe8, 0xed, 0x91, 0x26, 0x10, 0xb6, 0x33, 0x1f, 0x9b, 0x19, 0x1f, 0x0d, - 0x95, 0x1e, 0x9f, 0xc6, 0xc7, 0x74, 0x88, 0xe6, 0x87, 0x62, 0x64, 0xe6, 0x23, 0x95, 0xa2, 0x23, - 0x46, 0x7e, 0xe0, 0x63, 0xcd, 0x38, 0x9a, 0xd8, 0x9d, 0x24, 0xea, 0x02, 0x31, 0xde, 0xc8, 0x32, - 0xbe, 0x19, 0x74, 0x0f, 0xaa, 0x99, 0x63, 0xde, 0x12, 0xf5, 0xff, 0xb2, 0x51, 0x9b, 0x23, 0x89, - 0x4e, 0x0f, 0xf7, 0x34, 0x0b, 0x7f, 0x22, 0x7f, 0xf7, 0x00, 0x52, 0xca, 0x77, 0xef, 0x14, 0xe7, - 0x79, 0x1e, 0xa0, 0x1f, 0xe2, 0x73, 0x3e, 0x74, 0x69, 0x4a, 0x2c, 0xfa, 0xe3, 0x40, 0x46, 0xe2, - 0x09, 0x3d, 0x1f, 0xb4, 0xbf, 0xc2, 0xab, 0x5a, 0x47, 0xb7, 0x98, 0xed, 0x42, 0x75, 0x28, 0x62, - 0x2f, 0xf2, 0xa9, 0xc9, 0x4d, 0xd2, 0xd7, 0x30, 0xa6, 0x94, 0xa7, 0xd1, 0x49, 0x11, 0x3a, 0x57, - 0xd9, 0x3d, 0xac, 0x09, 0x8b, 0xe2, 0x22, 0x94, 0x91, 0x32, 0xa7, 0xe8, 0x8f, 0x90, 0x2b, 0xfa, - 0x73, 0x06, 0xf5, 0x74, 0x12, 0xaf, 0x8a, 0x74, 0xc1, 0x5c, 0x28, 0x78, 0x6e, 0xa8, 0x27, 0x70, - 0xb5, 0x59, 0x7f, 0xed, 0xbc, 0xb6, 0x1b, 0xea, 0xa4, 0xb5, 0xee, 0x62, 0xac, 0xcf, 0x7f, 0x5e, - 0xdb, 0xca, 0x8c, 0xdd, 0x73, 0x79, 0x32, 0xdb, 0xa1, 0x7e, 0x39, 0xf3, 0xd5, 0xce, 0x54, 0xf9, - 0x93, 0x1d, 0x37, 0xf4, 0x91, 0x0e, 0x37, 0xf6, 0x3a, 0x9c, 0xa8, 0x57, 0x1e, 0x42, 0xed, 0x75, - 0xbf, 0xdf, 0xa7, 0x06, 0x2b, 0xf7, 0xc1, 0x9e, 0xfb, 0xf1, 0x47, 0x1b, 0x2b, 0xd9, 0xe2, 0xdd, - 0x82, 0x6a, 0x26, 0x6e, 0x04, 0x1e, 0x13, 0x50, 0x67, 0x5f, 0x2f, 0x9c, 0xe7, 0xf8, 0x05, 0x94, - 0xcc, 0xc0, 0xff, 0x00, 0x9c, 0x2a, 0x15, 0x3e, 0xa1, 0xa1, 0x68, 0x0e, 0xb1, 0x51, 0x43, 0x08, - 0xb6, 0x06, 0x55, 0x5c, 0xc4, 0xc6, 0xae, 0x3d, 0xa5, 0x1d, 0xb1, 0x06, 0xfc, 0x1b, 0xec, 0xd1, - 0x7c, 0xbb, 0x1e, 0x66, 0x95, 0x51, 0xb2, 0xfb, 0x5f, 0x50, 0x09, 0xa4, 0xb1, 0xe9, 0x19, 0x5d, - 0x0e, 0x24, 0x99, 0x9c, 0x2d, 0xf8, 0xc7, 0x1b, 0x9f, 0x6b, 0xec, 0x06, 0x94, 0x46, 0xfe, 0x44, - 0xd1, 0x75, 0xc5, 0xb1, 0x6f, 0x56, 0xce, 0x8f, 0x39, 0x80, 0xf4, 0x6a, 0x61, 0x46, 0xf0, 0xde, - 0x21, 0x66, 0x51, 0xdf, 0xb3, 0x09, 0x54, 0xce, 0x4d, 0x05, 0x4d, 0x1f, 0xdd, 0x7c, 0xf5, 0x3a, - 0x36, 0x92, 0x02, 0xeb, 0xda, 0x36, 0x4d, 0x6d, 0xdf, 0xe7, 0x93, 0x6a, 0x7e, 0xc2, 0xca, 0x63, - 0x58, 0x7a, 0x85, 0xee, 0x1d, 0x6f, 0x6a, 0xda, 0x65, 0xd9, 0x92, 0xdd, 0x81, 0x92, 0xfe, 0xdc, - 0xc0, 0x77, 0x1b, 0x25, 0x43, 0x43, 0x32, 0xcd, 0x96, 0xc3, 0xe4, 0xe3, 0xb3, 0x77, 0xb8, 0xb9, - 0x01, 0x65, 0xf3, 0x19, 0xc5, 0x6c, 0x28, 0x7e, 0x7a, 0x30, 0xe8, 0x1e, 0xd5, 0x16, 0x58, 0x05, - 0x0a, 0x7b, 0xfd, 0xc1, 0x51, 0x2d, 0x87, 0xd2, 0x41, 0xff, 0xa0, 0x5b, 0xb3, 0x36, 0x3f, 0x02, - 0x7b, 0x3e, 0xee, 0x51, 0xdd, 0xea, 0x1d, 0x74, 0x6a, 0x0b, 0x0c, 0xa0, 0x34, 0xe8, 0xb6, 0x79, - 0x17, 0xc1, 0x65, 0xc8, 0x0f, 0x06, 0x7b, 0x35, 0x0b, 0xa9, 0xda, 0xbb, 0xed, 0xbd, 0x6e, 0x2d, - 0x8f, 0xe2, 0xd1, 0xfe, 0xe1, 0xa3, 0x41, 0xad, 0xb0, 0x79, 0x0f, 0xae, 0xbc, 0x36, 0x6e, 0x69, - 0xf7, 0xde, 0x2e, 0xef, 0x22, 0x53, 0x15, 0xca, 0x87, 0xbc, 0x77, 0xbc, 0x7b, 0xd4, 0xad, 0xe5, - 0xd0, 0xf0, 0x49, 0xbf, 0xfd, 0xb8, 0xdb, 0xa9, 0x59, 0xad, 0x6b, 0x2f, 0x2e, 0x57, 0x73, 0xdf, - 0x5d, 0xae, 0xe6, 0x7e, 0xb8, 0x5c, 0xcd, 0xfd, 0x72, 0xb9, 0x9a, 0xfb, 0xe2, 0xd7, 0xd5, 0x85, - 0x93, 0x12, 0xfd, 0x45, 0xb9, 0xfb, 0x5b, 0x00, 0x00, 0x00, 0xff, 0xff, 0x84, 0xfe, 0x08, 0x0c, - 0xe2, 0x0c, 0x00, 0x00, -} diff --git a/vendor/github.com/moby/buildkit/solver/pb/ops.proto b/vendor/github.com/moby/buildkit/solver/pb/ops.proto deleted file mode 100644 index 09442f60c4cb..000000000000 --- a/vendor/github.com/moby/buildkit/solver/pb/ops.proto +++ /dev/null @@ -1,214 +0,0 @@ -syntax = "proto3"; - -// Package pb provides the protobuf definition of LLB: low-level builder instruction. -// LLB is DAG-structured; Op represents a vertex, and Definition represents a graph. -package pb; - -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; - -option (gogoproto.stable_marshaler_all) = true; - -// Op represents a vertex of the LLB DAG. -message Op { - // inputs is a set of input edges. - repeated Input inputs = 1; - oneof op { - ExecOp exec = 2; - SourceOp source = 3; - CopyOp copy = 4; - BuildOp build = 5; - } - Platform platform = 10; - WorkerConstraints constraints = 11; -} - -// Platform is github.com/opencontainers/image-spec/specs-go/v1.Platform -message Platform { - string Architecture = 1; - string OS = 2; - string Variant = 3; - string OSVersion = 4; // unused - repeated string OSFeatures = 5; // unused -} - -// Input represents an input edge for an Op. -message Input { - // digest of the marshaled input Op - string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; - // output index of the input Op - int64 index = 2 [(gogoproto.customtype) = "OutputIndex", (gogoproto.nullable) = false]; -} - -// ExecOp executes a command in a container. -message ExecOp { - Meta meta = 1; - repeated Mount mounts = 2; - NetMode network = 3; -} - -// Meta is a set of arguments for ExecOp. -// Meta is unrelated to LLB metadata. -// FIXME: rename (ExecContext? ExecArgs?) -message Meta { - repeated string args = 1; - repeated string env = 2; - string cwd = 3; - string user = 4; - ProxyEnv proxy_env = 5; - repeated HostIP extraHosts = 6; -} - -enum NetMode { - UNSET = 0; // sandbox - HOST = 1; - NONE = 2; -} - -// Mount specifies how to mount an input Op as a filesystem. -message Mount { - int64 input = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; - string selector = 2; - string dest = 3; - int64 output = 4 [(gogoproto.customtype) = "OutputIndex", (gogoproto.nullable) = false]; - bool readonly = 5; - MountType mountType = 6; - CacheOpt cacheOpt = 20; - SecretOpt secretOpt = 21; - SSHOpt SSHOpt = 22; -} - -// MountType defines a type of a mount from a supported set -enum MountType { - BIND = 0; - SECRET = 1; - SSH = 2; - CACHE = 3; - TMPFS = 4; -} - -// CacheOpt defines options specific to cache mounts -message CacheOpt { - // ID is an optional namespace for the mount - string ID = 1; - // Sharing is the sharing mode for the mount - CacheSharingOpt sharing = 2; -} - -// CacheSharingOpt defines different sharing modes for cache mount -enum CacheSharingOpt { - // SHARED cache mount can be used concurrently by multiple writers - SHARED = 0; - // PRIVATE creates a new mount if there are multiple writers - PRIVATE = 1; - // LOCKED pauses second writer until first one releases the mount - LOCKED = 2; -} - -// SecretOpt defines options describing secret mounts -message SecretOpt { - // ID of secret. Used for quering the value. - string ID = 1; - // UID of secret file - uint32 uid = 2; - // GID of secret file - uint32 gid = 3; - // Mode is the filesystem mode of secret file - uint32 mode = 4; - // Optional defines if secret value is required. Error is produced - // if value is not found and optional is false. - bool optional = 5; -} - -// SSHOpt defines options describing secret mounts -message SSHOpt { - // ID of exposed ssh rule. Used for quering the value. - string ID = 1; - // UID of agent socket - uint32 uid = 2; - // GID of agent socket - uint32 gid = 3; - // Mode is the filesystem mode of agent socket - uint32 mode = 4; - // Optional defines if ssh socket is required. Error is produced - // if client does not expose ssh. - bool optional = 5; -} - -// CopyOp copies files across Ops. -message CopyOp { - repeated CopySource src = 1; - string dest = 2; -} - -// CopySource specifies a source for CopyOp. -message CopySource { - int64 input = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; - string selector = 2; -} - -// SourceOp specifies a source such as build contexts and images. -message SourceOp { - // TODO: use source type or any type instead of URL protocol. - // identifier e.g. local://, docker-image://, git://, https://... - string identifier = 1; - // attrs are defined in attr.go - map attrs = 2; -} - -// BuildOp is used for nested build invocation. -// BuildOp is experimental and can break without backwards compatibility -message BuildOp { - int64 builder = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; - map inputs = 2; - Definition def = 3; - map attrs = 4; - // outputs -} - -// BuildInput is used for BuildOp. -message BuildInput { - int64 input = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; -} - -// OpMetadata is a per-vertex metadata entry, which can be defined for arbitrary Op vertex and overridable on the run time. -message OpMetadata { - // ignore_cache specifies to ignore the cache for this Op. - bool ignore_cache = 1; - // Description can be used for keeping any text fields that builder doesn't parse - map description = 2; - // index 3 reserved for WorkerConstraint in previous versions - // WorkerConstraint worker_constraint = 3; - ExportCache export_cache = 4; - - map caps = 5 [(gogoproto.castkey) = "github.com/moby/buildkit/util/apicaps.CapID", (gogoproto.nullable) = false]; -} - -message ExportCache { - bool Value = 1; -} - -message ProxyEnv { - string http_proxy = 1; - string https_proxy = 2; - string ftp_proxy = 3; - string no_proxy = 4; -} - -// WorkerConstraints defines conditions for the worker -message WorkerConstraints { - repeated string filter = 1; // containerd-style filter -} - -// Definition is the LLB definition structure with per-vertex metadata entries -message Definition { - // def is a list of marshaled Op messages - repeated bytes def = 1; - // metadata contains metadata for the each of the Op messages. - // A key must be an LLB op digest string. Currently, empty string is not expected as a key, but it may change in the future. - map metadata = 2 [(gogoproto.castkey) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; -} - -message HostIP { - string Host = 1; - string IP = 2; -} \ No newline at end of file diff --git a/vendor/github.com/moby/buildkit/solver/pb/platform.go b/vendor/github.com/moby/buildkit/solver/pb/platform.go deleted file mode 100644 index a434aa716882..000000000000 --- a/vendor/github.com/moby/buildkit/solver/pb/platform.go +++ /dev/null @@ -1,41 +0,0 @@ -package pb - -import ( - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - -func (p *Platform) Spec() specs.Platform { - return specs.Platform{ - OS: p.OS, - Architecture: p.Architecture, - Variant: p.Variant, - OSVersion: p.OSVersion, - OSFeatures: p.OSFeatures, - } -} - -func PlatformFromSpec(p specs.Platform) Platform { - return Platform{ - OS: p.OS, - Architecture: p.Architecture, - Variant: p.Variant, - OSVersion: p.OSVersion, - OSFeatures: p.OSFeatures, - } -} - -func ToSpecPlatforms(p []Platform) []specs.Platform { - out := make([]specs.Platform, 0, len(p)) - for _, pp := range p { - out = append(out, pp.Spec()) - } - return out -} - -func PlatformsFromSpec(p []specs.Platform) []Platform { - out := make([]Platform, 0, len(p)) - for _, pp := range p { - out = append(out, PlatformFromSpec(pp)) - } - return out -} diff --git a/vendor/github.com/moby/buildkit/solver/progress.go b/vendor/github.com/moby/buildkit/solver/progress.go deleted file mode 100644 index 14f3f5e0199c..000000000000 --- a/vendor/github.com/moby/buildkit/solver/progress.go +++ /dev/null @@ -1,109 +0,0 @@ -package solver - -import ( - "context" - "io" - "time" - - "github.com/moby/buildkit/client" - "github.com/moby/buildkit/util/progress" - digest "github.com/opencontainers/go-digest" - "github.com/sirupsen/logrus" -) - -func (j *Job) Status(ctx context.Context, ch chan *client.SolveStatus) error { - vs := &vertexStream{cache: map[digest.Digest]*client.Vertex{}} - pr := j.pr.Reader(ctx) - defer func() { - if enc := vs.encore(); len(enc) > 0 { - ch <- &client.SolveStatus{Vertexes: enc} - } - close(ch) - }() - - for { - p, err := pr.Read(ctx) - if err != nil { - if err == io.EOF { - return nil - } - return err - } - ss := &client.SolveStatus{} - for _, p := range p { - switch v := p.Sys.(type) { - case client.Vertex: - ss.Vertexes = append(ss.Vertexes, vs.append(v)...) - - case progress.Status: - vtx, ok := p.Meta("vertex") - if !ok { - logrus.Warnf("progress %s status without vertex info", p.ID) - continue - } - vs := &client.VertexStatus{ - ID: p.ID, - Vertex: vtx.(digest.Digest), - Name: v.Action, - Total: int64(v.Total), - Current: int64(v.Current), - Timestamp: p.Timestamp, - Started: v.Started, - Completed: v.Completed, - } - ss.Statuses = append(ss.Statuses, vs) - case client.VertexLog: - vtx, ok := p.Meta("vertex") - if !ok { - logrus.Warnf("progress %s log without vertex info", p.ID) - continue - } - v.Vertex = vtx.(digest.Digest) - v.Timestamp = p.Timestamp - ss.Logs = append(ss.Logs, &v) - } - } - select { - case <-ctx.Done(): - return ctx.Err() - case ch <- ss: - } - } -} - -type vertexStream struct { - cache map[digest.Digest]*client.Vertex -} - -func (vs *vertexStream) append(v client.Vertex) []*client.Vertex { - var out []*client.Vertex - vs.cache[v.Digest] = &v - if v.Started != nil { - for _, inp := range v.Inputs { - if inpv, ok := vs.cache[inp]; ok { - if !inpv.Cached && inpv.Completed == nil { - inpv.Cached = true - inpv.Started = v.Started - inpv.Completed = v.Started - out = append(out, vs.append(*inpv)...) - delete(vs.cache, inp) - } - } - } - } - vcopy := v - return append(out, &vcopy) -} - -func (vs *vertexStream) encore() []*client.Vertex { - var out []*client.Vertex - for _, v := range vs.cache { - if v.Started != nil && v.Completed == nil { - now := time.Now() - v.Completed = &now - v.Error = context.Canceled.Error() - out = append(out, v) - } - } - return out -} diff --git a/vendor/github.com/moby/buildkit/solver/result.go b/vendor/github.com/moby/buildkit/solver/result.go deleted file mode 100644 index b217ae083e4c..000000000000 --- a/vendor/github.com/moby/buildkit/solver/result.go +++ /dev/null @@ -1,105 +0,0 @@ -package solver - -import ( - "context" - "sync" - "sync/atomic" - - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// SharedResult is a result that can be cloned -type SharedResult struct { - mu sync.Mutex - main Result -} - -func NewSharedResult(main Result) *SharedResult { - return &SharedResult{main: main} -} - -func (r *SharedResult) Clone() Result { - r.mu.Lock() - defer r.mu.Unlock() - - r1, r2 := dup(r.main) - r.main = r1 - return r2 -} - -func (r *SharedResult) Release(ctx context.Context) error { - r.mu.Lock() - defer r.mu.Unlock() - return r.main.Release(ctx) -} - -func dup(res Result) (Result, Result) { - sem := int64(0) - return &splitResult{Result: res, sem: &sem}, &splitResult{Result: res, sem: &sem} -} - -type splitResult struct { - Result - released int64 - sem *int64 -} - -func (r *splitResult) Release(ctx context.Context) error { - if atomic.AddInt64(&r.released, 1) > 1 { - err := errors.Errorf("releasing already released reference") - logrus.Error(err) - return err - } - if atomic.AddInt64(r.sem, 1) == 2 { - return r.Result.Release(ctx) - } - return nil -} - -// NewCachedResult combines a result and cache key into cached result -func NewCachedResult(res Result, k []ExportableCacheKey) CachedResult { - return &cachedResult{res, k} -} - -type cachedResult struct { - Result - k []ExportableCacheKey -} - -func (cr *cachedResult) CacheKeys() []ExportableCacheKey { - return cr.k -} - -func NewSharedCachedResult(res CachedResult) *SharedCachedResult { - return &SharedCachedResult{ - SharedResult: NewSharedResult(res), - CachedResult: res, - } -} - -func (r *SharedCachedResult) Clone() CachedResult { - return &clonedCachedResult{Result: r.SharedResult.Clone(), cr: r.CachedResult} -} - -func (r *SharedCachedResult) Release(ctx context.Context) error { - return r.SharedResult.Release(ctx) -} - -type clonedCachedResult struct { - Result - cr CachedResult -} - -func (r *clonedCachedResult) ID() string { - return r.Result.ID() -} - -func (cr *clonedCachedResult) CacheKeys() []ExportableCacheKey { - return cr.cr.CacheKeys() -} - -type SharedCachedResult struct { - *SharedResult - CachedResult -} diff --git a/vendor/github.com/moby/buildkit/solver/scheduler.go b/vendor/github.com/moby/buildkit/solver/scheduler.go deleted file mode 100644 index 7fe981eff454..000000000000 --- a/vendor/github.com/moby/buildkit/solver/scheduler.go +++ /dev/null @@ -1,410 +0,0 @@ -package solver - -import ( - "context" - "os" - "sync" - - "github.com/moby/buildkit/solver/internal/pipe" - "github.com/moby/buildkit/util/cond" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -var debugScheduler = false // TODO: replace with logs in build trace - -func init() { - if os.Getenv("BUILDKIT_SCHEDULER_DEBUG") == "1" { - debugScheduler = true - } -} - -func newScheduler(ef edgeFactory) *scheduler { - s := &scheduler{ - waitq: map[*edge]struct{}{}, - incoming: map[*edge][]*edgePipe{}, - outgoing: map[*edge][]*edgePipe{}, - - stopped: make(chan struct{}), - closed: make(chan struct{}), - - ef: ef, - } - s.cond = cond.NewStatefulCond(&s.mu) - - go s.loop() - - return s -} - -type dispatcher struct { - next *dispatcher - e *edge -} - -type scheduler struct { - cond *cond.StatefulCond - mu sync.Mutex - muQ sync.Mutex - - ef edgeFactory - - waitq map[*edge]struct{} - next *dispatcher - last *dispatcher - stopped chan struct{} - stoppedOnce sync.Once - closed chan struct{} - - incoming map[*edge][]*edgePipe - outgoing map[*edge][]*edgePipe -} - -func (s *scheduler) Stop() { - s.stoppedOnce.Do(func() { - close(s.stopped) - }) - <-s.closed -} - -func (s *scheduler) loop() { - defer func() { - close(s.closed) - }() - - go func() { - <-s.stopped - s.mu.Lock() - s.cond.Signal() - s.mu.Unlock() - }() - - s.mu.Lock() - for { - select { - case <-s.stopped: - s.mu.Unlock() - return - default: - } - s.muQ.Lock() - l := s.next - if l != nil { - if l == s.last { - s.last = nil - } - s.next = l.next - delete(s.waitq, l.e) - } - s.muQ.Unlock() - if l == nil { - s.cond.Wait() - continue - } - s.dispatch(l.e) - } -} - -// dispatch schedules an edge to be processed -func (s *scheduler) dispatch(e *edge) { - inc := make([]pipe.Sender, len(s.incoming[e])) - for i, p := range s.incoming[e] { - inc[i] = p.Sender - } - out := make([]pipe.Receiver, len(s.outgoing[e])) - for i, p := range s.outgoing[e] { - out[i] = p.Receiver - } - - e.hasActiveOutgoing = false - updates := []pipe.Receiver{} - for _, p := range out { - if ok := p.Receive(); ok { - updates = append(updates, p) - } - if !p.Status().Completed { - e.hasActiveOutgoing = true - } - } - - pf := &pipeFactory{s: s, e: e} - - // unpark the edge - debugSchedulerPreUnpark(e, inc, updates, out) - e.unpark(inc, updates, out, pf) - debugSchedulerPostUnpark(e, inc) - -postUnpark: - // set up new requests that didn't complete/were added by this run - openIncoming := make([]*edgePipe, 0, len(inc)) - for _, r := range s.incoming[e] { - if !r.Sender.Status().Completed { - openIncoming = append(openIncoming, r) - } - } - if len(openIncoming) > 0 { - s.incoming[e] = openIncoming - } else { - delete(s.incoming, e) - } - - openOutgoing := make([]*edgePipe, 0, len(out)) - for _, r := range s.outgoing[e] { - if !r.Receiver.Status().Completed { - openOutgoing = append(openOutgoing, r) - } - } - if len(openOutgoing) > 0 { - s.outgoing[e] = openOutgoing - } else { - delete(s.outgoing, e) - } - - // if keys changed there might be possiblity for merge with other edge - if e.keysDidChange { - if k := e.currentIndexKey(); k != nil { - // skip this if not at least 1 key per dep - origEdge := e.index.LoadOrStore(k, e) - if origEdge != nil { - logrus.Debugf("merging edge %s to %s\n", e.edge.Vertex.Name(), origEdge.edge.Vertex.Name()) - if s.mergeTo(origEdge, e) { - s.ef.setEdge(e.edge, origEdge) - } - } - } - e.keysDidChange = false - } - - // validation to avoid deadlocks/resource leaks: - // TODO: if these start showing up in error reports they can be changed - // to error the edge instead. They can only appear from algorithm bugs in - // unpark(), not for any external input. - if len(openIncoming) > 0 && len(openOutgoing) == 0 { - e.markFailed(pf, errors.New("buildkit scheduler error: return leaving incoming open. Please report this with BUILDKIT_SCHEDULER_DEBUG=1")) - goto postUnpark - } - if len(openIncoming) == 0 && len(openOutgoing) > 0 { - e.markFailed(pf, errors.New("buildkit scheduler error: return leaving outgoing open. Please report this with BUILDKIT_SCHEDULER_DEBUG=1")) - goto postUnpark - } -} - -// signal notifies that an edge needs to be processed again -func (s *scheduler) signal(e *edge) { - s.muQ.Lock() - if _, ok := s.waitq[e]; !ok { - d := &dispatcher{e: e} - if s.last == nil { - s.next = d - } else { - s.last.next = d - } - s.last = d - s.waitq[e] = struct{}{} - s.cond.Signal() - } - s.muQ.Unlock() -} - -// build evaluates edge into a result -func (s *scheduler) build(ctx context.Context, edge Edge) (CachedResult, error) { - s.mu.Lock() - e := s.ef.getEdge(edge) - if e == nil { - s.mu.Unlock() - return nil, errors.Errorf("invalid request %v for build", edge) - } - - wait := make(chan struct{}) - - var p *pipe.Pipe - p = s.newPipe(e, nil, pipe.Request{Payload: &edgeRequest{desiredState: edgeStatusComplete}}) - p.OnSendCompletion = func() { - p.Receiver.Receive() - if p.Receiver.Status().Completed { - close(wait) - } - } - s.mu.Unlock() - - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - go func() { - <-ctx.Done() - p.Receiver.Cancel() - }() - - <-wait - - if err := p.Receiver.Status().Err; err != nil { - return nil, err - } - return p.Receiver.Status().Value.(*edgeState).result.Clone(), nil -} - -// newPipe creates a new request pipe between two edges -func (s *scheduler) newPipe(target, from *edge, req pipe.Request) *pipe.Pipe { - p := &edgePipe{ - Pipe: pipe.New(req), - Target: target, - From: from, - } - - s.signal(target) - if from != nil { - p.OnSendCompletion = func() { - p.mu.Lock() - defer p.mu.Unlock() - s.signal(p.From) - } - s.outgoing[from] = append(s.outgoing[from], p) - } - s.incoming[target] = append(s.incoming[target], p) - p.OnReceiveCompletion = func() { - p.mu.Lock() - defer p.mu.Unlock() - s.signal(p.Target) - } - return p.Pipe -} - -// newRequestWithFunc creates a new request pipe that invokes a async function -func (s *scheduler) newRequestWithFunc(e *edge, f func(context.Context) (interface{}, error)) pipe.Receiver { - pp, start := pipe.NewWithFunction(f) - p := &edgePipe{ - Pipe: pp, - From: e, - } - p.OnSendCompletion = func() { - p.mu.Lock() - defer p.mu.Unlock() - s.signal(p.From) - } - s.outgoing[e] = append(s.outgoing[e], p) - go start() - return p.Receiver -} - -// mergeTo merges the state from one edge to another. source edge is discarded. -func (s *scheduler) mergeTo(target, src *edge) bool { - if !target.edge.Vertex.Options().IgnoreCache && src.edge.Vertex.Options().IgnoreCache { - return false - } - for _, inc := range s.incoming[src] { - inc.mu.Lock() - inc.Target = target - s.incoming[target] = append(s.incoming[target], inc) - inc.mu.Unlock() - } - - for _, out := range s.outgoing[src] { - out.mu.Lock() - out.From = target - s.outgoing[target] = append(s.outgoing[target], out) - out.mu.Unlock() - out.Receiver.Cancel() - } - - delete(s.incoming, src) - delete(s.outgoing, src) - s.signal(target) - - for i, d := range src.deps { - for _, k := range d.keys { - target.secondaryExporters = append(target.secondaryExporters, expDep{i, CacheKeyWithSelector{CacheKey: k, Selector: src.cacheMap.Deps[i].Selector}}) - } - if d.slowCacheKey != nil { - target.secondaryExporters = append(target.secondaryExporters, expDep{i, CacheKeyWithSelector{CacheKey: *d.slowCacheKey}}) - } - if d.result != nil { - for _, dk := range d.result.CacheKeys() { - target.secondaryExporters = append(target.secondaryExporters, expDep{i, CacheKeyWithSelector{CacheKey: dk, Selector: src.cacheMap.Deps[i].Selector}}) - } - } - } - - // TODO(tonistiigi): merge cache providers - - return true -} - -// edgeFactory allows access to the edges from a shared graph -type edgeFactory interface { - getEdge(Edge) *edge - setEdge(Edge, *edge) -} - -type pipeFactory struct { - e *edge - s *scheduler -} - -func (pf *pipeFactory) NewInputRequest(ee Edge, req *edgeRequest) pipe.Receiver { - target := pf.s.ef.getEdge(ee) - if target == nil { - panic("failed to get edge") // TODO: return errored pipe - } - p := pf.s.newPipe(target, pf.e, pipe.Request{Payload: req}) - if debugScheduler { - logrus.Debugf("> newPipe %s %p desiredState=%s", ee.Vertex.Name(), p, req.desiredState) - } - return p.Receiver -} - -func (pf *pipeFactory) NewFuncRequest(f func(context.Context) (interface{}, error)) pipe.Receiver { - p := pf.s.newRequestWithFunc(pf.e, f) - if debugScheduler { - logrus.Debugf("> newFunc %p", p) - } - return p -} - -func debugSchedulerPreUnpark(e *edge, inc []pipe.Sender, updates, allPipes []pipe.Receiver) { - if !debugScheduler { - return - } - logrus.Debugf(">> unpark %s req=%d upt=%d out=%d state=%s %s", e.edge.Vertex.Name(), len(inc), len(updates), len(allPipes), e.state, e.edge.Vertex.Digest()) - - for i, dep := range e.deps { - des := edgeStatusInitial - if dep.req != nil { - des = dep.req.Request().(*edgeRequest).desiredState - } - logrus.Debugf(":: dep%d %s state=%s des=%s keys=%d hasslowcache=%v", i, e.edge.Vertex.Inputs()[i].Vertex.Name(), dep.state, des, len(dep.keys), e.slowCacheFunc(dep) != nil) - } - - for i, in := range inc { - req := in.Request() - logrus.Debugf("> incoming-%d: %p dstate=%s canceled=%v", i, in, req.Payload.(*edgeRequest).desiredState, req.Canceled) - } - - for i, up := range updates { - if up == e.cacheMapReq { - logrus.Debugf("> update-%d: %p cacheMapReq complete=%v", i, up, up.Status().Completed) - } else if up == e.execReq { - logrus.Debugf("> update-%d: %p execReq complete=%v", i, up, up.Status().Completed) - } else { - st, ok := up.Status().Value.(*edgeState) - if ok { - index := -1 - if dep, ok := e.depRequests[up]; ok { - index = int(dep.index) - } - logrus.Debugf("> update-%d: %p input-%d keys=%d state=%s", i, up, index, len(st.keys), st.state) - } else { - logrus.Debugf("> update-%d: unknown", i) - } - } - } -} - -func debugSchedulerPostUnpark(e *edge, inc []pipe.Sender) { - if !debugScheduler { - return - } - for i, in := range inc { - logrus.Debugf("< incoming-%d: %p completed=%v", i, in, in.Status().Completed) - } - logrus.Debugf("<< unpark %s\n", e.edge.Vertex.Name()) -} diff --git a/vendor/github.com/moby/buildkit/solver/scheduler_test.go b/vendor/github.com/moby/buildkit/solver/scheduler_test.go deleted file mode 100644 index 4e3814686cf0..000000000000 --- a/vendor/github.com/moby/buildkit/solver/scheduler_test.go +++ /dev/null @@ -1,3679 +0,0 @@ -package solver - -import ( - "context" - _ "crypto/sha256" - "fmt" - "math" - "math/rand" - "os" - "sync/atomic" - "testing" - "time" - - "github.com/moby/buildkit/identity" - digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "github.com/stretchr/testify/require" - "golang.org/x/sync/errgroup" -) - -func init() { - if debugScheduler { - logrus.SetOutput(os.Stdout) - logrus.SetLevel(logrus.DebugLevel) - } -} - -func TestSingleLevelActiveGraph(t *testing.T) { - t.Parallel() - ctx := context.TODO() - - s := NewSolver(SolverOpt{ - ResolveOpFunc: testOpResolver, - }) - defer s.Close() - - j0, err := s.NewJob("job0") - require.NoError(t, err) - - defer func() { - if j0 != nil { - j0.Discard() - } - }() - - g0 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v0", - value: "result0", - }), - } - g0.Vertex.(*vertex).setupCallCounters() - - res, err := j0.Build(ctx, g0) - require.NoError(t, err) - require.NotNil(t, res) - require.Equal(t, unwrap(res), "result0") - - require.Equal(t, *g0.Vertex.(*vertex).cacheCallCount, int64(1)) - require.Equal(t, *g0.Vertex.(*vertex).execCallCount, int64(1)) - - // calling again with same digest just uses the active queue - j1, err := s.NewJob("job1") - require.NoError(t, err) - - defer func() { - if j1 != nil { - j1.Discard() - } - }() - - g1 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v0", - value: "result1", - }), - } - g1.Vertex.(*vertex).setupCallCounters() - - res, err = j1.Build(ctx, g1) - require.NoError(t, err) - require.Equal(t, unwrap(res), "result0") - - require.Equal(t, *g0.Vertex.(*vertex).cacheCallCount, int64(1)) - require.Equal(t, *g0.Vertex.(*vertex).execCallCount, int64(1)) - require.Equal(t, *g1.Vertex.(*vertex).cacheCallCount, int64(0)) - require.Equal(t, *g1.Vertex.(*vertex).execCallCount, int64(0)) - - require.NoError(t, j0.Discard()) - j0 = nil - - // after discarding j0, j1 still holds the state - - j2, err := s.NewJob("job2") - require.NoError(t, err) - - defer func() { - if j2 != nil { - j2.Discard() - } - }() - - g2 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v0", - value: "result2", - }), - } - g2.Vertex.(*vertex).setupCallCounters() - - res, err = j2.Build(ctx, g2) - require.NoError(t, err) - require.Equal(t, unwrap(res), "result0") - - require.Equal(t, *g0.Vertex.(*vertex).cacheCallCount, int64(1)) - require.Equal(t, *g0.Vertex.(*vertex).execCallCount, int64(1)) - require.Equal(t, *g1.Vertex.(*vertex).cacheCallCount, int64(0)) - require.Equal(t, *g1.Vertex.(*vertex).execCallCount, int64(0)) - require.Equal(t, *g2.Vertex.(*vertex).cacheCallCount, int64(0)) - require.Equal(t, *g2.Vertex.(*vertex).execCallCount, int64(0)) - - require.NoError(t, j1.Discard()) - j1 = nil - require.NoError(t, j2.Discard()) - j2 = nil - - // everything should be released now - - j3, err := s.NewJob("job3") - require.NoError(t, err) - - defer func() { - if j3 != nil { - j3.Discard() - } - }() - - g3 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v0", - value: "result3", - }), - } - g3.Vertex.(*vertex).setupCallCounters() - - res, err = j3.Build(ctx, g3) - require.NoError(t, err) - require.Equal(t, unwrap(res), "result3") - - require.Equal(t, *g3.Vertex.(*vertex).cacheCallCount, int64(1)) - require.Equal(t, *g3.Vertex.(*vertex).execCallCount, int64(1)) - - require.NoError(t, j3.Discard()) - j3 = nil - - // repeat the same test but make sure the build run in parallel now - - j4, err := s.NewJob("job4") - require.NoError(t, err) - - defer func() { - if j4 != nil { - j4.Discard() - } - }() - - j5, err := s.NewJob("job5") - require.NoError(t, err) - - defer func() { - if j5 != nil { - j5.Discard() - } - }() - - g4 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v0", - cacheDelay: 100 * time.Millisecond, - value: "result4", - }), - } - g4.Vertex.(*vertex).setupCallCounters() - - eg, _ := errgroup.WithContext(ctx) - - eg.Go(func() error { - res, err := j4.Build(ctx, g4) - require.NoError(t, err) - require.Equal(t, unwrap(res), "result4") - return err - }) - - eg.Go(func() error { - res, err := j5.Build(ctx, g4) - require.NoError(t, err) - require.Equal(t, unwrap(res), "result4") - return err - }) - - require.NoError(t, eg.Wait()) - - require.Equal(t, *g4.Vertex.(*vertex).cacheCallCount, int64(1)) - require.Equal(t, *g4.Vertex.(*vertex).execCallCount, int64(1)) -} - -func TestSingleLevelCache(t *testing.T) { - t.Parallel() - ctx := context.TODO() - - s := NewSolver(SolverOpt{ - ResolveOpFunc: testOpResolver, - }) - defer s.Close() - - j0, err := s.NewJob("job0") - require.NoError(t, err) - - defer func() { - if j0 != nil { - j0.Discard() - } - }() - - g0 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v0", - cacheKeySeed: "seed0", - value: "result0", - }), - } - g0.Vertex.(*vertex).setupCallCounters() - - res, err := j0.Build(ctx, g0) - require.NoError(t, err) - require.Equal(t, unwrap(res), "result0") - - require.NoError(t, j0.Discard()) - j0 = nil - - // first try that there is no match for different cache - j1, err := s.NewJob("job1") - require.NoError(t, err) - - defer func() { - if j1 != nil { - j1.Discard() - } - }() - - g1 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v1", - cacheKeySeed: "seed1", - value: "result1", - }), - } - g1.Vertex.(*vertex).setupCallCounters() - - res, err = j1.Build(ctx, g1) - require.NoError(t, err) - require.Equal(t, unwrap(res), "result1") - - require.Equal(t, *g1.Vertex.(*vertex).cacheCallCount, int64(1)) - require.Equal(t, *g1.Vertex.(*vertex).execCallCount, int64(1)) - - require.NoError(t, j1.Discard()) - j1 = nil - - // expect cache match for first build - - j2, err := s.NewJob("job2") - require.NoError(t, err) - - defer func() { - if j2 != nil { - j2.Discard() - } - }() - - g2 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v2", - cacheKeySeed: "seed0", // same as first build - value: "result2", - }), - } - g2.Vertex.(*vertex).setupCallCounters() - - res, err = j2.Build(ctx, g2) - require.NoError(t, err) - require.Equal(t, unwrap(res), "result0") - - require.Equal(t, *g0.Vertex.(*vertex).cacheCallCount, int64(1)) - require.Equal(t, *g0.Vertex.(*vertex).execCallCount, int64(1)) - require.Equal(t, *g2.Vertex.(*vertex).cacheCallCount, int64(1)) - require.Equal(t, *g2.Vertex.(*vertex).execCallCount, int64(0)) - - require.NoError(t, j2.Discard()) - j2 = nil - -} - -func TestSingleLevelCacheParallel(t *testing.T) { - t.Parallel() - ctx := context.TODO() - - s := NewSolver(SolverOpt{ - ResolveOpFunc: testOpResolver, - }) - defer s.Close() - - // rebuild in parallel. only executed once. - - j0, err := s.NewJob("job0") - require.NoError(t, err) - - defer func() { - if j0 != nil { - j0.Discard() - } - }() - - wait2Ready := blockingFuncion(2) - - g0 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v0", - cacheKeySeed: "seed0", - cachePreFunc: wait2Ready, - value: "result0", - }), - } - g0.Vertex.(*vertex).setupCallCounters() - - j1, err := s.NewJob("job1") - require.NoError(t, err) - - defer func() { - if j1 != nil { - j1.Discard() - } - }() - - g1 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v1", - cacheKeySeed: "seed0", // same as g0 - cachePreFunc: wait2Ready, - value: "result0", - }), - } - g1.Vertex.(*vertex).setupCallCounters() - - eg, _ := errgroup.WithContext(ctx) - - eg.Go(func() error { - res, err := j0.Build(ctx, g0) - require.NoError(t, err) - require.Equal(t, unwrap(res), "result0") - return err - }) - - eg.Go(func() error { - res, err := j1.Build(ctx, g1) - require.NoError(t, err) - require.Equal(t, unwrap(res), "result0") - return err - }) - - require.NoError(t, eg.Wait()) - - require.Equal(t, int64(1), *g0.Vertex.(*vertex).cacheCallCount) - require.Equal(t, int64(1), *g1.Vertex.(*vertex).cacheCallCount) - // only one execution ran - require.Equal(t, int64(1), *g0.Vertex.(*vertex).execCallCount+*g1.Vertex.(*vertex).execCallCount) - -} - -func TestMultiLevelCacheParallel(t *testing.T) { - t.Parallel() - ctx := context.TODO() - - s := NewSolver(SolverOpt{ - ResolveOpFunc: testOpResolver, - }) - defer s.Close() - - // rebuild in parallel. only executed once. - - j0, err := s.NewJob("job0") - require.NoError(t, err) - - defer func() { - if j0 != nil { - j0.Discard() - } - }() - - wait2Ready := blockingFuncion(2) - wait2Ready2 := blockingFuncion(2) - - g0 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v0", - cacheKeySeed: "seed0", - cachePreFunc: wait2Ready, - value: "result0", - inputs: []Edge{{ - Vertex: vtx(vtxOpt{ - name: "v0-c0", - cacheKeySeed: "seed0-c0", - cachePreFunc: wait2Ready2, - value: "result0-c0", - })}, - }, - }), - } - g0.Vertex.(*vertex).setupCallCounters() - - j1, err := s.NewJob("job1") - require.NoError(t, err) - - defer func() { - if j1 != nil { - j1.Discard() - } - }() - - g1 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v1", - cacheKeySeed: "seed0", // same as g0 - cachePreFunc: wait2Ready, - value: "result0", - inputs: []Edge{{ - Vertex: vtx(vtxOpt{ - name: "v1-c0", - cacheKeySeed: "seed0-c0", // same as g0 - cachePreFunc: wait2Ready2, - value: "result0-c", - })}, - }, - }), - } - g1.Vertex.(*vertex).setupCallCounters() - - eg, _ := errgroup.WithContext(ctx) - - eg.Go(func() error { - res, err := j0.Build(ctx, g0) - require.NoError(t, err) - require.Equal(t, unwrap(res), "result0") - return err - }) - - eg.Go(func() error { - res, err := j1.Build(ctx, g1) - require.NoError(t, err) - require.Equal(t, unwrap(res), "result0") - return err - }) - - require.NoError(t, eg.Wait()) - - require.Equal(t, int64(2), *g0.Vertex.(*vertex).cacheCallCount) - require.Equal(t, int64(2), *g1.Vertex.(*vertex).cacheCallCount) - require.Equal(t, int64(2), *g0.Vertex.(*vertex).execCallCount+*g1.Vertex.(*vertex).execCallCount) -} - -func TestSingleCancelCache(t *testing.T) { - t.Parallel() - ctx := context.TODO() - - s := NewSolver(SolverOpt{ - ResolveOpFunc: testOpResolver, - }) - defer s.Close() - - j0, err := s.NewJob("job0") - require.NoError(t, err) - - defer func() { - if j0 != nil { - j0.Discard() - } - }() - - ctx, cancel := context.WithCancel(ctx) - - g0 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v0", - cachePreFunc: func(ctx context.Context) error { - cancel() - <-ctx.Done() - return nil // error should still come from context - }, - }), - } - g0.Vertex.(*vertex).setupCallCounters() - - _, err = j0.Build(ctx, g0) - require.Error(t, err) - require.Equal(t, errors.Cause(err), context.Canceled) - - require.Equal(t, *g0.Vertex.(*vertex).cacheCallCount, int64(1)) - require.Equal(t, *g0.Vertex.(*vertex).execCallCount, int64(0)) - - require.NoError(t, j0.Discard()) - j0 = nil - -} -func TestSingleCancelExec(t *testing.T) { - t.Parallel() - ctx := context.TODO() - - s := NewSolver(SolverOpt{ - ResolveOpFunc: testOpResolver, - }) - defer s.Close() - - j1, err := s.NewJob("job1") - require.NoError(t, err) - - defer func() { - if j1 != nil { - j1.Discard() - } - }() - - ctx, cancel := context.WithCancel(ctx) - - g1 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v2", - execPreFunc: func(ctx context.Context) error { - cancel() - <-ctx.Done() - return nil // error should still come from context - }, - }), - } - g1.Vertex.(*vertex).setupCallCounters() - - _, err = j1.Build(ctx, g1) - require.Error(t, err) - require.Equal(t, errors.Cause(err), context.Canceled) - - require.Equal(t, *g1.Vertex.(*vertex).cacheCallCount, int64(1)) - require.Equal(t, *g1.Vertex.(*vertex).execCallCount, int64(1)) - - require.NoError(t, j1.Discard()) - j1 = nil -} - -func TestSingleCancelParallel(t *testing.T) { - t.Parallel() - ctx := context.TODO() - - s := NewSolver(SolverOpt{ - ResolveOpFunc: testOpResolver, - }) - defer s.Close() - - // run 2 in parallel cancel first, second one continues without errors - eg, ctx := errgroup.WithContext(ctx) - - firstReady := make(chan struct{}) - firstErrored := make(chan struct{}) - - eg.Go(func() error { - j, err := s.NewJob("job2") - require.NoError(t, err) - - defer func() { - if j != nil { - j.Discard() - } - }() - - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - g := Edge{ - Vertex: vtx(vtxOpt{ - name: "v2", - value: "result2", - cachePreFunc: func(ctx context.Context) error { - close(firstReady) - time.Sleep(200 * time.Millisecond) - cancel() - <-firstErrored - return nil - }, - }), - } - - _, err = j.Build(ctx, g) - close(firstErrored) - require.Error(t, err) - require.Equal(t, errors.Cause(err), context.Canceled) - return nil - }) - - eg.Go(func() error { - j, err := s.NewJob("job3") - require.NoError(t, err) - - defer func() { - if j != nil { - j.Discard() - } - }() - - g := Edge{ - Vertex: vtx(vtxOpt{ - name: "v2", - }), - } - <-firstReady - - res, err := j.Build(ctx, g) - require.NoError(t, err) - require.Equal(t, unwrap(res), "result2") - return err - }) - - require.NoError(t, eg.Wait()) -} - -func TestMultiLevelCalculation(t *testing.T) { - t.Parallel() - ctx := context.TODO() - - l := NewSolver(SolverOpt{ - ResolveOpFunc: testOpResolver, - }) - defer l.Close() - - j0, err := l.NewJob("j0") - require.NoError(t, err) - - defer func() { - if j0 != nil { - j0.Discard() - } - }() - - g := Edge{ - Vertex: vtxSum(1, vtxOpt{ - inputs: []Edge{ - {Vertex: vtxSum(0, vtxOpt{ - inputs: []Edge{ - {Vertex: vtxConst(7, vtxOpt{})}, - {Vertex: vtxConst(2, vtxOpt{})}, - }, - })}, - {Vertex: vtxSum(0, vtxOpt{ - inputs: []Edge{ - {Vertex: vtxConst(7, vtxOpt{})}, - {Vertex: vtxConst(2, vtxOpt{})}, - }, - })}, - {Vertex: vtxConst(2, vtxOpt{})}, - {Vertex: vtxConst(2, vtxOpt{})}, - {Vertex: vtxConst(19, vtxOpt{})}, - }, - }), - } - - res, err := j0.Build(ctx, g) - require.NoError(t, err) - require.Equal(t, unwrapInt(res), 42) // 1 + 2*(7 + 2) + 2 + 2 + 19 - - require.NoError(t, j0.Discard()) - j0 = nil - - // repeating same build with cache should behave the same - j1, err := l.NewJob("j1") - require.NoError(t, err) - - defer func() { - if j1 != nil { - j1.Discard() - } - }() - - g2 := Edge{ - Vertex: vtxSum(1, vtxOpt{ - inputs: []Edge{ - {Vertex: vtxSum(0, vtxOpt{ - inputs: []Edge{ - {Vertex: vtxConst(7, vtxOpt{})}, - {Vertex: vtxConst(2, vtxOpt{})}, - }, - })}, - {Vertex: vtxSum(0, vtxOpt{ - inputs: []Edge{ - {Vertex: vtxConst(7, vtxOpt{})}, - {Vertex: vtxConst(2, vtxOpt{})}, - }, - })}, - {Vertex: vtxConst(2, vtxOpt{})}, - {Vertex: vtxConst(2, vtxOpt{})}, - {Vertex: vtxConst(19, vtxOpt{})}, - }, - }), - } - res, err = j1.Build(ctx, g2) - require.NoError(t, err) - require.Equal(t, unwrapInt(res), 42) - -} - -func TestHugeGraph(t *testing.T) { - t.Parallel() - ctx := context.TODO() - - rand.Seed(time.Now().UnixNano()) - - cacheManager := newTrackingCacheManager(NewInMemoryCacheManager()) - - l := NewSolver(SolverOpt{ - ResolveOpFunc: testOpResolver, - DefaultCache: cacheManager, - }) - defer l.Close() - - j0, err := l.NewJob("j0") - require.NoError(t, err) - - defer func() { - if j0 != nil { - j0.Discard() - } - }() - - nodes := 1000 - - g, v := generateSubGraph(nodes) - // printGraph(g, "") - g.Vertex.(*vertexSum).setupCallCounters() - - res, err := j0.Build(ctx, g) - require.NoError(t, err) - require.Equal(t, unwrapInt(res), v) - require.Equal(t, int64(nodes), *g.Vertex.(*vertexSum).cacheCallCount) - // execCount := *g.Vertex.(*vertexSum).execCallCount - // require.True(t, execCount < 1000) - // require.True(t, execCount > 600) - require.Equal(t, int64(0), cacheManager.loadCounter) - - require.NoError(t, j0.Discard()) - j0 = nil - - j1, err := l.NewJob("j1") - require.NoError(t, err) - - defer func() { - if j1 != nil { - j1.Discard() - } - }() - - g.Vertex.(*vertexSum).setupCallCounters() - res, err = j1.Build(ctx, g) - require.NoError(t, err) - require.Equal(t, unwrapInt(res), v) - - require.Equal(t, int64(nodes), *g.Vertex.(*vertexSum).cacheCallCount) - require.Equal(t, int64(0), *g.Vertex.(*vertexSum).execCallCount) - require.Equal(t, int64(1), cacheManager.loadCounter) -} - -// TestOptimizedCacheAccess tests that inputs are not loaded from cache unless -// they are really needed -func TestOptimizedCacheAccess(t *testing.T) { - t.Parallel() - ctx := context.TODO() - - cacheManager := newTrackingCacheManager(NewInMemoryCacheManager()) - - l := NewSolver(SolverOpt{ - ResolveOpFunc: testOpResolver, - DefaultCache: cacheManager, - }) - defer l.Close() - - j0, err := l.NewJob("j0") - require.NoError(t, err) - - defer func() { - if j0 != nil { - j0.Discard() - } - }() - - g0 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v0", - cacheKeySeed: "seed0", - value: "result0", - inputs: []Edge{ - {Vertex: vtx(vtxOpt{ - name: "v1", - cacheKeySeed: "seed1", - value: "result1", - })}, - {Vertex: vtx(vtxOpt{ - name: "v2", - cacheKeySeed: "seed2", - value: "result2", - })}, - }, - slowCacheCompute: map[int]ResultBasedCacheFunc{ - 1: digestFromResult, - }, - }), - } - g0.Vertex.(*vertex).setupCallCounters() - - res, err := j0.Build(ctx, g0) - require.NoError(t, err) - require.Equal(t, unwrap(res), "result0") - - require.Equal(t, int64(3), *g0.Vertex.(*vertex).cacheCallCount) - require.Equal(t, int64(3), *g0.Vertex.(*vertex).execCallCount) - require.Equal(t, int64(0), cacheManager.loadCounter) - - require.NoError(t, j0.Discard()) - j0 = nil - - // changing cache seed for the input with slow cache should not pull result1 - - j1, err := l.NewJob("j1") - require.NoError(t, err) - - defer func() { - if j1 != nil { - j1.Discard() - } - }() - - g1 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v0", - cacheKeySeed: "seed0", - value: "result0-nocache", - inputs: []Edge{ - {Vertex: vtx(vtxOpt{ - name: "v1", - cacheKeySeed: "seed1", - value: "result1-nocache", - })}, - {Vertex: vtx(vtxOpt{ - name: "v2-changed", - cacheKeySeed: "seed2-changed", - value: "result2", // produces same slow key as g0 - })}, - }, - slowCacheCompute: map[int]ResultBasedCacheFunc{ - 1: digestFromResult, - }, - }), - } - g1.Vertex.(*vertex).setupCallCounters() - - res, err = j1.Build(ctx, g1) - require.NoError(t, err) - require.Equal(t, unwrap(res), "result0") - - require.Equal(t, int64(3), *g1.Vertex.(*vertex).cacheCallCount) - require.Equal(t, int64(1), *g1.Vertex.(*vertex).execCallCount) - require.Equal(t, int64(1), cacheManager.loadCounter) - - require.NoError(t, j1.Discard()) - j1 = nil -} - -// TestOptimizedCacheAccess2 is a more narrow case that tests that inputs are -// not loaded from cache unless they are really needed. Inputs that match by -// definition should be less prioritized for slow cache calculation than the -// inputs that didn't. -func TestOptimizedCacheAccess2(t *testing.T) { - t.Parallel() - ctx := context.TODO() - - cacheManager := newTrackingCacheManager(NewInMemoryCacheManager()) - - l := NewSolver(SolverOpt{ - ResolveOpFunc: testOpResolver, - DefaultCache: cacheManager, - }) - defer l.Close() - - j0, err := l.NewJob("j0") - require.NoError(t, err) - - defer func() { - if j0 != nil { - j0.Discard() - } - }() - - g0 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v0", - cacheKeySeed: "seed0", - value: "result0", - inputs: []Edge{ - {Vertex: vtx(vtxOpt{ - name: "v1", - cacheKeySeed: "seed1", - value: "result1", - })}, - {Vertex: vtx(vtxOpt{ - name: "v2", - cacheKeySeed: "seed2", - value: "result2", - })}, - }, - slowCacheCompute: map[int]ResultBasedCacheFunc{ - 0: digestFromResult, - 1: digestFromResult, - }, - }), - } - g0.Vertex.(*vertex).setupCallCounters() - - res, err := j0.Build(ctx, g0) - require.NoError(t, err) - require.Equal(t, unwrap(res), "result0") - - require.Equal(t, int64(3), *g0.Vertex.(*vertex).cacheCallCount) - require.Equal(t, int64(3), *g0.Vertex.(*vertex).execCallCount) - require.Equal(t, int64(0), cacheManager.loadCounter) - - require.NoError(t, j0.Discard()) - j0 = nil - - // changing cache seed for the input with slow cache should not pull result1 - - j1, err := l.NewJob("j1") - require.NoError(t, err) - - defer func() { - if j1 != nil { - j1.Discard() - } - }() - - g1 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v0", - cacheKeySeed: "seed0", - value: "result0-nocache", - inputs: []Edge{ - {Vertex: vtx(vtxOpt{ - name: "v1", - cacheKeySeed: "seed1", - value: "result1", - })}, - {Vertex: vtx(vtxOpt{ - name: "v2-changed", - cacheKeySeed: "seed2-changed", - value: "result2", // produces same slow key as g0 - })}, - }, - slowCacheCompute: map[int]ResultBasedCacheFunc{ - 0: digestFromResult, - 1: digestFromResult, - }, - }), - } - g1.Vertex.(*vertex).setupCallCounters() - - res, err = j1.Build(ctx, g1) - require.NoError(t, err) - require.Equal(t, unwrap(res), "result0") - - require.Equal(t, int64(3), *g1.Vertex.(*vertex).cacheCallCount) - require.Equal(t, int64(1), *g1.Vertex.(*vertex).execCallCount) - require.Equal(t, int64(1), cacheManager.loadCounter) // v1 is never loaded nor executed - - require.NoError(t, j1.Discard()) - j1 = nil - - // make sure that both inputs are still used for slow cache hit - j2, err := l.NewJob("j2") - require.NoError(t, err) - - defer func() { - if j2 != nil { - j2.Discard() - } - }() - - g2 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v0", - cacheKeySeed: "seed0", - value: "result0-nocache", - inputs: []Edge{ - {Vertex: vtx(vtxOpt{ - name: "v1", - cacheKeySeed: "seed1-changed2", - value: "result1", - })}, - {Vertex: vtx(vtxOpt{ - name: "v2-changed", - cacheKeySeed: "seed2-changed2", - value: "result2", // produces same slow key as g0 - })}, - }, - slowCacheCompute: map[int]ResultBasedCacheFunc{ - 0: digestFromResult, - 1: digestFromResult, - }, - }), - } - g2.Vertex.(*vertex).setupCallCounters() - - res, err = j2.Build(ctx, g2) - require.NoError(t, err) - require.Equal(t, unwrap(res), "result0") - - require.Equal(t, int64(3), *g2.Vertex.(*vertex).cacheCallCount) - require.Equal(t, int64(2), *g2.Vertex.(*vertex).execCallCount) - require.Equal(t, int64(2), cacheManager.loadCounter) - - require.NoError(t, j2.Discard()) - j1 = nil -} - -func TestSlowCache(t *testing.T) { - t.Parallel() - ctx := context.TODO() - - rand.Seed(time.Now().UnixNano()) - - l := NewSolver(SolverOpt{ - ResolveOpFunc: testOpResolver, - }) - defer l.Close() - - j0, err := l.NewJob("j0") - require.NoError(t, err) - - defer func() { - if j0 != nil { - j0.Discard() - } - }() - - g0 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v0", - cacheKeySeed: "seed0", - value: "result0", - inputs: []Edge{ - {Vertex: vtx(vtxOpt{ - name: "v1", - cacheKeySeed: "seed1", - value: "result1", - })}, - }, - slowCacheCompute: map[int]ResultBasedCacheFunc{ - 0: digestFromResult, - }, - }), - } - - res, err := j0.Build(ctx, g0) - require.NoError(t, err) - require.Equal(t, unwrap(res), "result0") - - require.NoError(t, j0.Discard()) - j0 = nil - - j1, err := l.NewJob("j1") - require.NoError(t, err) - - defer func() { - if j1 != nil { - j1.Discard() - } - }() - - g1 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v2", - cacheKeySeed: "seed0", - value: "not-cached", - inputs: []Edge{ - {Vertex: vtx(vtxOpt{ - name: "v3", - cacheKeySeed: "seed3", - value: "result1", // used for slow key - })}, - }, - slowCacheCompute: map[int]ResultBasedCacheFunc{ - 0: digestFromResult, - }, - }), - } - - res, err = j1.Build(ctx, g1) - require.NoError(t, err) - require.Equal(t, unwrap(res), "result0") - - require.NoError(t, j1.Discard()) - j1 = nil - -} - -// TestParallelInputs validates that inputs are processed in parallel -func TestParallelInputs(t *testing.T) { - t.Parallel() - ctx := context.TODO() - - rand.Seed(time.Now().UnixNano()) - - l := NewSolver(SolverOpt{ - ResolveOpFunc: testOpResolver, - }) - defer l.Close() - - j0, err := l.NewJob("j0") - require.NoError(t, err) - - defer func() { - if j0 != nil { - j0.Discard() - } - }() - - wait2Ready := blockingFuncion(2) - wait2Ready2 := blockingFuncion(2) - - g0 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v0", - cacheKeySeed: "seed0", - value: "result0", - inputs: []Edge{ - {Vertex: vtx(vtxOpt{ - name: "v1", - cacheKeySeed: "seed1", - value: "result1", - cachePreFunc: wait2Ready, - execPreFunc: wait2Ready2, - })}, - {Vertex: vtx(vtxOpt{ - name: "v2", - cacheKeySeed: "seed2", - value: "result2", - cachePreFunc: wait2Ready, - execPreFunc: wait2Ready2, - })}, - }, - }), - } - g0.Vertex.(*vertex).setupCallCounters() - - res, err := j0.Build(ctx, g0) - require.NoError(t, err) - require.Equal(t, unwrap(res), "result0") - - require.NoError(t, j0.Discard()) - j0 = nil - - require.Equal(t, int64(3), *g0.Vertex.(*vertex).cacheCallCount) - require.Equal(t, int64(3), *g0.Vertex.(*vertex).execCallCount) -} - -func TestErrorReturns(t *testing.T) { - t.Parallel() - ctx := context.TODO() - - rand.Seed(time.Now().UnixNano()) - - l := NewSolver(SolverOpt{ - ResolveOpFunc: testOpResolver, - }) - defer l.Close() - - j0, err := l.NewJob("j0") - require.NoError(t, err) - - defer func() { - if j0 != nil { - j0.Discard() - } - }() - - g0 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v0", - cacheKeySeed: "seed0", - value: "result0", - inputs: []Edge{ - {Vertex: vtx(vtxOpt{ - name: "v1", - cacheKeySeed: "seed1", - value: "result1", - cachePreFunc: func(ctx context.Context) error { - return errors.Errorf("error-from-test") - }, - })}, - {Vertex: vtx(vtxOpt{ - name: "v2", - cacheKeySeed: "seed2", - value: "result2", - })}, - }, - }), - } - - _, err = j0.Build(ctx, g0) - require.Error(t, err) - require.Contains(t, errors.Cause(err).Error(), "error-from-test") - - require.NoError(t, j0.Discard()) - j0 = nil - - // error with cancel error. to check that this isn't mixed up with regular build cancel. - - j1, err := l.NewJob("j1") - require.NoError(t, err) - - defer func() { - if j1 != nil { - j1.Discard() - } - }() - - g1 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v0", - cacheKeySeed: "seed0", - value: "result0", - inputs: []Edge{ - {Vertex: vtx(vtxOpt{ - name: "v1", - cacheKeySeed: "seed1", - value: "result1", - cachePreFunc: func(ctx context.Context) error { - return context.Canceled - }, - })}, - {Vertex: vtx(vtxOpt{ - name: "v2", - cacheKeySeed: "seed2", - value: "result2", - })}, - }, - }), - } - - _, err = j1.Build(ctx, g1) - require.Error(t, err) - require.Equal(t, errors.Cause(err), context.Canceled) - - require.NoError(t, j1.Discard()) - j1 = nil - - // error from exec - - j2, err := l.NewJob("j2") - require.NoError(t, err) - - defer func() { - if j2 != nil { - j2.Discard() - } - }() - - g2 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v0", - cacheKeySeed: "seed0", - value: "result0", - inputs: []Edge{ - {Vertex: vtx(vtxOpt{ - name: "v1", - cacheKeySeed: "seed1", - value: "result1", - })}, - {Vertex: vtx(vtxOpt{ - name: "v2", - cacheKeySeed: "seed3", - value: "result2", - execPreFunc: func(ctx context.Context) error { - return errors.Errorf("exec-error-from-test") - }, - })}, - }, - }), - } - - _, err = j2.Build(ctx, g2) - require.Error(t, err) - require.Contains(t, errors.Cause(err).Error(), "exec-error-from-test") - - require.NoError(t, j2.Discard()) - j1 = nil - -} - -func TestMultipleCacheSources(t *testing.T) { - t.Parallel() - ctx := context.TODO() - - cacheManager := newTrackingCacheManager(NewInMemoryCacheManager()) - - l := NewSolver(SolverOpt{ - ResolveOpFunc: testOpResolver, - DefaultCache: cacheManager, - }) - defer l.Close() - - j0, err := l.NewJob("j0") - require.NoError(t, err) - - defer func() { - if j0 != nil { - j0.Discard() - } - }() - - g0 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v0", - cacheKeySeed: "seed0", - value: "result0", - inputs: []Edge{ - {Vertex: vtx(vtxOpt{ - name: "v1", - cacheKeySeed: "seed1", - value: "result1", - })}, - }, - }), - } - - res, err := j0.Build(ctx, g0) - require.NoError(t, err) - require.Equal(t, unwrap(res), "result0") - require.Equal(t, int64(0), cacheManager.loadCounter) - - require.NoError(t, j0.Discard()) - j0 = nil - - cacheManager2 := newTrackingCacheManager(NewInMemoryCacheManager()) - - l2 := NewSolver(SolverOpt{ - ResolveOpFunc: testOpResolver, - DefaultCache: cacheManager2, - }) - defer l2.Close() - - j1, err := l.NewJob("j1") - require.NoError(t, err) - - defer func() { - if j1 != nil { - j1.Discard() - } - }() - - g1 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v0", - cacheKeySeed: "seed0", - value: "result0-no-cache", - cacheSource: cacheManager, - inputs: []Edge{ - {Vertex: vtx(vtxOpt{ - name: "v1", - cacheKeySeed: "seed1", - value: "result1-no-cache", - cacheSource: cacheManager, - })}, - }, - }), - } - - res, err = j1.Build(ctx, g1) - require.NoError(t, err) - require.Equal(t, unwrap(res), "result0") - require.Equal(t, int64(1), cacheManager.loadCounter) - require.Equal(t, int64(0), cacheManager2.loadCounter) - - require.NoError(t, j1.Discard()) - j0 = nil - - // build on top of old cache - j2, err := l.NewJob("j2") - require.NoError(t, err) - - defer func() { - if j2 != nil { - j2.Discard() - } - }() - - g2 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v2", - cacheKeySeed: "seed2", - value: "result2", - inputs: []Edge{g1}, - }), - } - - res, err = j1.Build(ctx, g2) - require.NoError(t, err) - require.Equal(t, unwrap(res), "result2") - require.Equal(t, int64(2), cacheManager.loadCounter) - require.Equal(t, int64(0), cacheManager2.loadCounter) - - require.NoError(t, j1.Discard()) - j1 = nil -} - -func TestRepeatBuildWithIgnoreCache(t *testing.T) { - t.Parallel() - ctx := context.TODO() - - l := NewSolver(SolverOpt{ - ResolveOpFunc: testOpResolver, - }) - defer l.Close() - - j0, err := l.NewJob("j0") - require.NoError(t, err) - - defer func() { - if j0 != nil { - j0.Discard() - } - }() - - g0 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v0", - cacheKeySeed: "seed0", - value: "result0", - inputs: []Edge{ - {Vertex: vtx(vtxOpt{ - name: "v1", - cacheKeySeed: "seed1", - value: "result1", - })}, - }, - }), - } - g0.Vertex.(*vertex).setupCallCounters() - - res, err := j0.Build(ctx, g0) - require.NoError(t, err) - require.Equal(t, unwrap(res), "result0") - require.Equal(t, int64(2), *g0.Vertex.(*vertex).cacheCallCount) - require.Equal(t, int64(2), *g0.Vertex.(*vertex).execCallCount) - - require.NoError(t, j0.Discard()) - j0 = nil - - // rebuild with ignore-cache reevaluates everything - - j1, err := l.NewJob("j1") - require.NoError(t, err) - - defer func() { - if j1 != nil { - j1.Discard() - } - }() - - g1 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v0", - cacheKeySeed: "seed0", - value: "result0-1", - ignoreCache: true, - inputs: []Edge{ - {Vertex: vtx(vtxOpt{ - name: "v1", - cacheKeySeed: "seed1", - value: "result1-1", - ignoreCache: true, - })}, - }, - }), - } - g1.Vertex.(*vertex).setupCallCounters() - - res, err = j1.Build(ctx, g1) - require.NoError(t, err) - require.Equal(t, unwrap(res), "result0-1") - require.Equal(t, int64(2), *g1.Vertex.(*vertex).cacheCallCount) - require.Equal(t, int64(2), *g1.Vertex.(*vertex).execCallCount) - - require.NoError(t, j1.Discard()) - j1 = nil - - // ignore-cache in child reevaluates parent - - j2, err := l.NewJob("j2") - require.NoError(t, err) - - defer func() { - if j2 != nil { - j2.Discard() - } - }() - - g2 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v0", - cacheKeySeed: "seed0", - value: "result0-2", - inputs: []Edge{ - {Vertex: vtx(vtxOpt{ - name: "v1", - cacheKeySeed: "seed1", - value: "result1-2", - ignoreCache: true, - })}, - }, - }), - } - g2.Vertex.(*vertex).setupCallCounters() - - res, err = j2.Build(ctx, g2) - require.NoError(t, err) - require.Equal(t, unwrap(res), "result0-2") - require.Equal(t, int64(2), *g2.Vertex.(*vertex).cacheCallCount) - require.Equal(t, int64(2), *g2.Vertex.(*vertex).execCallCount) - - require.NoError(t, j2.Discard()) - j2 = nil -} - -// TestIgnoreCacheResumeFromSlowCache tests that parent cache resumes if child -// with ignore-cache generates same slow cache key -func TestIgnoreCacheResumeFromSlowCache(t *testing.T) { - t.Parallel() - ctx := context.TODO() - - l := NewSolver(SolverOpt{ - ResolveOpFunc: testOpResolver, - }) - defer l.Close() - - j0, err := l.NewJob("j0") - require.NoError(t, err) - - defer func() { - if j0 != nil { - j0.Discard() - } - }() - - g0 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v0", - cacheKeySeed: "seed0", - value: "result0", - slowCacheCompute: map[int]ResultBasedCacheFunc{ - 0: digestFromResult, - }, - inputs: []Edge{ - {Vertex: vtx(vtxOpt{ - name: "v1", - cacheKeySeed: "seed1", - value: "result1", - })}, - }, - }), - } - g0.Vertex.(*vertex).setupCallCounters() - - res, err := j0.Build(ctx, g0) - require.NoError(t, err) - require.Equal(t, unwrap(res), "result0") - require.Equal(t, int64(2), *g0.Vertex.(*vertex).cacheCallCount) - require.Equal(t, int64(2), *g0.Vertex.(*vertex).execCallCount) - - require.NoError(t, j0.Discard()) - j0 = nil - - // rebuild reevaluates child, but not parent - - j1, err := l.NewJob("j1") - require.NoError(t, err) - - defer func() { - if j1 != nil { - j1.Discard() - } - }() - - g1 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v0-1", // doesn't matter but avoid match because another bug - cacheKeySeed: "seed0", - value: "result0-no-cache", - slowCacheCompute: map[int]ResultBasedCacheFunc{ - 0: digestFromResult, - }, - inputs: []Edge{ - {Vertex: vtx(vtxOpt{ - name: "v1-1", - cacheKeySeed: "seed1-1", // doesn't matter but avoid match because another bug - value: "result1", // same as g0 - ignoreCache: true, - })}, - }, - }), - } - g1.Vertex.(*vertex).setupCallCounters() - - res, err = j1.Build(ctx, g1) - require.NoError(t, err) - require.Equal(t, unwrap(res), "result0") - require.Equal(t, int64(2), *g1.Vertex.(*vertex).cacheCallCount) - require.Equal(t, int64(1), *g1.Vertex.(*vertex).execCallCount) - - require.NoError(t, j1.Discard()) - j1 = nil -} - -func TestParallelBuildsIgnoreCache(t *testing.T) { - t.Parallel() - ctx := context.TODO() - - l := NewSolver(SolverOpt{ - ResolveOpFunc: testOpResolver, - }) - defer l.Close() - - j0, err := l.NewJob("j0") - require.NoError(t, err) - - defer func() { - if j0 != nil { - j0.Discard() - } - }() - - g0 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v0", - cacheKeySeed: "seed0", - value: "result0", - }), - } - g0.Vertex.(*vertex).setupCallCounters() - - res, err := j0.Build(ctx, g0) - require.NoError(t, err) - - require.Equal(t, unwrap(res), "result0") - - // match by vertex digest - j1, err := l.NewJob("j1") - require.NoError(t, err) - - defer func() { - if j1 != nil { - j1.Discard() - } - }() - - g1 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v0", - cacheKeySeed: "seed1", - value: "result1", - ignoreCache: true, - }), - } - g1.Vertex.(*vertex).setupCallCounters() - - res, err = j1.Build(ctx, g1) - require.NoError(t, err) - - require.Equal(t, unwrap(res), "result1") - - require.NoError(t, j0.Discard()) - j0 = nil - require.NoError(t, j1.Discard()) - j1 = nil - - // new base - j2, err := l.NewJob("j2") - require.NoError(t, err) - - defer func() { - if j2 != nil { - j2.Discard() - } - }() - - g2 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v2", - cacheKeySeed: "seed2", - value: "result2", - }), - } - g2.Vertex.(*vertex).setupCallCounters() - - res, err = j2.Build(ctx, g2) - require.NoError(t, err) - - require.Equal(t, unwrap(res), "result2") - - // match by cache key - j3, err := l.NewJob("j3") - require.NoError(t, err) - - defer func() { - if j3 != nil { - j3.Discard() - } - }() - - g3 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v3", - cacheKeySeed: "seed2", - value: "result3", - ignoreCache: true, - }), - } - g3.Vertex.(*vertex).setupCallCounters() - - res, err = j3.Build(ctx, g3) - require.NoError(t, err) - - require.Equal(t, unwrap(res), "result3") - - // add another ignorecache merges now - - j4, err := l.NewJob("j4") - require.NoError(t, err) - - defer func() { - if j4 != nil { - j4.Discard() - } - }() - - g4 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v4", - cacheKeySeed: "seed2", // same as g2/g3 - value: "result4", - ignoreCache: true, - }), - } - g4.Vertex.(*vertex).setupCallCounters() - - res, err = j4.Build(ctx, g4) - require.NoError(t, err) - - require.Equal(t, unwrap(res), "result3") - - // add another !ignorecache merges now - - j5, err := l.NewJob("j5") - require.NoError(t, err) - - defer func() { - if j5 != nil { - j5.Discard() - } - }() - - g5 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v5", - cacheKeySeed: "seed2", // same as g2/g3/g4 - value: "result5", - }), - } - g5.Vertex.(*vertex).setupCallCounters() - - res, err = j5.Build(ctx, g5) - require.NoError(t, err) - - require.Equal(t, unwrap(res), "result3") -} - -func TestSubbuild(t *testing.T) { - t.Parallel() - ctx := context.TODO() - - l := NewSolver(SolverOpt{ - ResolveOpFunc: testOpResolver, - }) - defer l.Close() - - j0, err := l.NewJob("j0") - require.NoError(t, err) - - defer func() { - if j0 != nil { - j0.Discard() - } - }() - - g0 := Edge{ - Vertex: vtxSum(1, vtxOpt{ - inputs: []Edge{ - {Vertex: vtxSubBuild(Edge{Vertex: vtxConst(7, vtxOpt{})}, vtxOpt{ - cacheKeySeed: "seed0", - })}, - }, - }), - } - g0.Vertex.(*vertexSum).setupCallCounters() - - res, err := j0.Build(ctx, g0) - require.NoError(t, err) - require.Equal(t, unwrapInt(res), 8) - - require.Equal(t, int64(2), *g0.Vertex.(*vertexSum).cacheCallCount) - require.Equal(t, int64(2), *g0.Vertex.(*vertexSum).execCallCount) - - require.NoError(t, j0.Discard()) - j0 = nil - - j1, err := l.NewJob("j1") - require.NoError(t, err) - - defer func() { - if j1 != nil { - j1.Discard() - } - }() - - g0.Vertex.(*vertexSum).setupCallCounters() - - res, err = j1.Build(ctx, g0) - require.NoError(t, err) - require.Equal(t, unwrapInt(res), 8) - - require.Equal(t, int64(2), *g0.Vertex.(*vertexSum).cacheCallCount) - require.Equal(t, int64(0), *g0.Vertex.(*vertexSum).execCallCount) - - require.NoError(t, j1.Discard()) - j1 = nil - -} - -func TestCacheWithSelector(t *testing.T) { - t.Parallel() - ctx := context.TODO() - - cacheManager := newTrackingCacheManager(NewInMemoryCacheManager()) - - l := NewSolver(SolverOpt{ - ResolveOpFunc: testOpResolver, - DefaultCache: cacheManager, - }) - defer l.Close() - - j0, err := l.NewJob("j0") - require.NoError(t, err) - - defer func() { - if j0 != nil { - j0.Discard() - } - }() - - g0 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v0", - cacheKeySeed: "seed0", - value: "result0", - inputs: []Edge{ - {Vertex: vtx(vtxOpt{ - name: "v1", - cacheKeySeed: "seed1", - value: "result1", - })}, - }, - selectors: map[int]digest.Digest{ - 0: dgst("sel0"), - }, - }), - } - g0.Vertex.(*vertex).setupCallCounters() - - res, err := j0.Build(ctx, g0) - require.NoError(t, err) - require.Equal(t, unwrap(res), "result0") - - require.Equal(t, int64(2), *g0.Vertex.(*vertex).cacheCallCount) - require.Equal(t, int64(2), *g0.Vertex.(*vertex).execCallCount) - require.Equal(t, int64(0), cacheManager.loadCounter) - - require.NoError(t, j0.Discard()) - j0 = nil - - // repeat, cache is matched - - j1, err := l.NewJob("j1") - require.NoError(t, err) - - defer func() { - if j1 != nil { - j1.Discard() - } - }() - - g1 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v0", - cacheKeySeed: "seed0", - value: "result0-no-cache", - inputs: []Edge{ - {Vertex: vtx(vtxOpt{ - name: "v1", - cacheKeySeed: "seed1", - value: "result1-no-cache", - })}, - }, - selectors: map[int]digest.Digest{ - 0: dgst("sel0"), - }, - }), - } - g1.Vertex.(*vertex).setupCallCounters() - - res, err = j1.Build(ctx, g1) - require.NoError(t, err) - require.Equal(t, unwrap(res), "result0") - - require.Equal(t, int64(2), *g1.Vertex.(*vertex).cacheCallCount) - require.Equal(t, int64(0), *g1.Vertex.(*vertex).execCallCount) - require.Equal(t, int64(1), cacheManager.loadCounter) - - require.NoError(t, j1.Discard()) - j1 = nil - - // using different selector doesn't match - - j2, err := l.NewJob("j2") - require.NoError(t, err) - - defer func() { - if j2 != nil { - j2.Discard() - } - }() - - g2 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v0", - cacheKeySeed: "seed0", - value: "result0-1", - inputs: []Edge{ - {Vertex: vtx(vtxOpt{ - name: "v1", - cacheKeySeed: "seed1", - value: "result1-1", - })}, - }, - selectors: map[int]digest.Digest{ - 0: dgst("sel1"), - }, - }), - } - g2.Vertex.(*vertex).setupCallCounters() - - res, err = j2.Build(ctx, g2) - require.NoError(t, err) - require.Equal(t, unwrap(res), "result0-1") - - require.Equal(t, int64(2), *g2.Vertex.(*vertex).cacheCallCount) - require.Equal(t, int64(1), *g2.Vertex.(*vertex).execCallCount) - require.Equal(t, int64(2), cacheManager.loadCounter) - - require.NoError(t, j2.Discard()) - j2 = nil -} - -func TestCacheSlowWithSelector(t *testing.T) { - t.Parallel() - ctx := context.TODO() - - cacheManager := newTrackingCacheManager(NewInMemoryCacheManager()) - - l := NewSolver(SolverOpt{ - ResolveOpFunc: testOpResolver, - DefaultCache: cacheManager, - }) - defer l.Close() - - j0, err := l.NewJob("j0") - require.NoError(t, err) - - defer func() { - if j0 != nil { - j0.Discard() - } - }() - - g0 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v0", - cacheKeySeed: "seed0", - value: "result0", - inputs: []Edge{ - {Vertex: vtx(vtxOpt{ - name: "v1", - cacheKeySeed: "seed1", - value: "result1", - })}, - }, - selectors: map[int]digest.Digest{ - 0: dgst("sel0"), - }, - slowCacheCompute: map[int]ResultBasedCacheFunc{ - 0: digestFromResult, - }, - }), - } - g0.Vertex.(*vertex).setupCallCounters() - - res, err := j0.Build(ctx, g0) - require.NoError(t, err) - require.Equal(t, unwrap(res), "result0") - - require.Equal(t, int64(2), *g0.Vertex.(*vertex).cacheCallCount) - require.Equal(t, int64(2), *g0.Vertex.(*vertex).execCallCount) - require.Equal(t, int64(0), cacheManager.loadCounter) - - require.NoError(t, j0.Discard()) - j0 = nil - - // repeat, cache is matched - - j1, err := l.NewJob("j1") - require.NoError(t, err) - - defer func() { - if j1 != nil { - j1.Discard() - } - }() - - g1 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v0", - cacheKeySeed: "seed0", - value: "result0-no-cache", - inputs: []Edge{ - {Vertex: vtx(vtxOpt{ - name: "v1", - cacheKeySeed: "seed1", - value: "result1-no-cache", - })}, - }, - selectors: map[int]digest.Digest{ - 0: dgst("sel1"), - }, - slowCacheCompute: map[int]ResultBasedCacheFunc{ - 0: digestFromResult, - }, - }), - } - g1.Vertex.(*vertex).setupCallCounters() - - res, err = j1.Build(ctx, g1) - require.NoError(t, err) - require.Equal(t, unwrap(res), "result0") - - require.Equal(t, int64(2), *g1.Vertex.(*vertex).cacheCallCount) - require.Equal(t, int64(0), *g1.Vertex.(*vertex).execCallCount) - require.Equal(t, int64(2), cacheManager.loadCounter) - - require.NoError(t, j1.Discard()) - j1 = nil -} - -func TestCacheExporting(t *testing.T) { - t.Parallel() - ctx := context.TODO() - - cacheManager := newTrackingCacheManager(NewInMemoryCacheManager()) - - l := NewSolver(SolverOpt{ - ResolveOpFunc: testOpResolver, - DefaultCache: cacheManager, - }) - defer l.Close() - - j0, err := l.NewJob("j0") - require.NoError(t, err) - - defer func() { - if j0 != nil { - j0.Discard() - } - }() - - g0 := Edge{ - Vertex: vtxSum(1, vtxOpt{ - inputs: []Edge{ - {Vertex: vtxConst(2, vtxOpt{})}, - {Vertex: vtxConst(3, vtxOpt{})}, - }, - }), - } - - res, err := j0.Build(ctx, g0) - require.NoError(t, err) - require.Equal(t, unwrapInt(res), 6) - - require.NoError(t, j0.Discard()) - j0 = nil - - expTarget := newTestExporterTarget() - - _, err = res.CacheKeys()[0].Exporter.ExportTo(ctx, expTarget, testExporterOpts(true)) - require.NoError(t, err) - - expTarget.normalize() - - require.Equal(t, len(expTarget.records), 3) - require.Equal(t, expTarget.records[0].results, 1) - require.Equal(t, expTarget.records[1].results, 0) - require.Equal(t, expTarget.records[2].results, 0) - require.Equal(t, expTarget.records[0].links, 2) - require.Equal(t, expTarget.records[1].links, 0) - require.Equal(t, expTarget.records[2].links, 0) - - j1, err := l.NewJob("j1") - require.NoError(t, err) - - defer func() { - if j1 != nil { - j1.Discard() - } - }() - - res, err = j1.Build(ctx, g0) - require.NoError(t, err) - require.Equal(t, unwrapInt(res), 6) - - require.NoError(t, j1.Discard()) - j1 = nil - - expTarget = newTestExporterTarget() - - _, err = res.CacheKeys()[0].Exporter.ExportTo(ctx, expTarget, testExporterOpts(true)) - require.NoError(t, err) - - expTarget.normalize() - // the order of the records isn't really significant - require.Equal(t, len(expTarget.records), 3) - require.Equal(t, expTarget.records[0].results, 1) - require.Equal(t, expTarget.records[1].results, 0) - require.Equal(t, expTarget.records[2].results, 0) - require.Equal(t, expTarget.records[0].links, 2) - require.Equal(t, expTarget.records[1].links, 0) - require.Equal(t, expTarget.records[2].links, 0) -} - -func TestCacheExportingModeMin(t *testing.T) { - t.Parallel() - ctx := context.TODO() - - cacheManager := newTrackingCacheManager(NewInMemoryCacheManager()) - - l := NewSolver(SolverOpt{ - ResolveOpFunc: testOpResolver, - DefaultCache: cacheManager, - }) - defer l.Close() - - j0, err := l.NewJob("j0") - require.NoError(t, err) - - defer func() { - if j0 != nil { - j0.Discard() - } - }() - - g0 := Edge{ - Vertex: vtxSum(1, vtxOpt{ - inputs: []Edge{ - {Vertex: vtxSum(2, vtxOpt{ - inputs: []Edge{ - {Vertex: vtxConst(3, vtxOpt{})}, - }, - })}, - {Vertex: vtxConst(5, vtxOpt{})}, - }, - }), - } - - res, err := j0.Build(ctx, g0) - require.NoError(t, err) - require.Equal(t, unwrapInt(res), 11) - - require.NoError(t, j0.Discard()) - j0 = nil - - expTarget := newTestExporterTarget() - - _, err = res.CacheKeys()[0].Exporter.ExportTo(ctx, expTarget, testExporterOpts(false)) - require.NoError(t, err) - - expTarget.normalize() - - require.Equal(t, len(expTarget.records), 4) - require.Equal(t, expTarget.records[0].results, 1) - require.Equal(t, expTarget.records[1].results, 0) - require.Equal(t, expTarget.records[2].results, 0) - require.Equal(t, expTarget.records[3].results, 0) - require.Equal(t, expTarget.records[0].links, 2) - require.Equal(t, expTarget.records[1].links, 1) - require.Equal(t, expTarget.records[2].links, 0) - require.Equal(t, expTarget.records[3].links, 0) - - j1, err := l.NewJob("j1") - require.NoError(t, err) - - defer func() { - if j1 != nil { - j1.Discard() - } - }() - - res, err = j1.Build(ctx, g0) - require.NoError(t, err) - require.Equal(t, unwrapInt(res), 11) - - require.NoError(t, j1.Discard()) - j1 = nil - - expTarget = newTestExporterTarget() - - _, err = res.CacheKeys()[0].Exporter.ExportTo(ctx, expTarget, testExporterOpts(false)) - require.NoError(t, err) - - expTarget.normalize() - // the order of the records isn't really significant - require.Equal(t, len(expTarget.records), 4) - require.Equal(t, expTarget.records[0].results, 1) - require.Equal(t, expTarget.records[1].results, 0) - require.Equal(t, expTarget.records[2].results, 0) - require.Equal(t, expTarget.records[3].results, 0) - require.Equal(t, expTarget.records[0].links, 2) - require.Equal(t, expTarget.records[1].links, 1) - require.Equal(t, expTarget.records[2].links, 0) - require.Equal(t, expTarget.records[3].links, 0) - - // one more check with all mode - j2, err := l.NewJob("j2") - require.NoError(t, err) - - defer func() { - if j2 != nil { - j2.Discard() - } - }() - - res, err = j2.Build(ctx, g0) - require.NoError(t, err) - require.Equal(t, unwrapInt(res), 11) - - require.NoError(t, j2.Discard()) - j2 = nil - - expTarget = newTestExporterTarget() - - _, err = res.CacheKeys()[0].Exporter.ExportTo(ctx, expTarget, testExporterOpts(true)) - require.NoError(t, err) - - expTarget.normalize() - // the order of the records isn't really significant - require.Equal(t, len(expTarget.records), 4) - require.Equal(t, expTarget.records[0].results, 1) - require.Equal(t, expTarget.records[1].results, 1) - require.Equal(t, expTarget.records[2].results, 0) - require.Equal(t, expTarget.records[3].results, 0) - require.Equal(t, expTarget.records[0].links, 2) - require.Equal(t, expTarget.records[1].links, 1) - require.Equal(t, expTarget.records[2].links, 0) - require.Equal(t, expTarget.records[3].links, 0) -} - -func TestSlowCacheAvoidAccess(t *testing.T) { - t.Parallel() - ctx := context.TODO() - - cacheManager := newTrackingCacheManager(NewInMemoryCacheManager()) - - l := NewSolver(SolverOpt{ - ResolveOpFunc: testOpResolver, - DefaultCache: cacheManager, - }) - defer l.Close() - - j0, err := l.NewJob("j0") - require.NoError(t, err) - - defer func() { - if j0 != nil { - j0.Discard() - } - }() - - g0 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v0", - cacheKeySeed: "seed0", - cachePreFunc: func(context.Context) error { - select { - case <-time.After(50 * time.Millisecond): - case <-ctx.Done(): - } - return nil - }, - value: "result0", - inputs: []Edge{{ - Vertex: vtx(vtxOpt{ - name: "v1", - cacheKeySeed: "seed1", - value: "result1", - inputs: []Edge{ - {Vertex: vtx(vtxOpt{ - name: "v2", - cacheKeySeed: "seed2", - value: "result2", - })}, - }, - selectors: map[int]digest.Digest{ - 0: dgst("sel0"), - }, - slowCacheCompute: map[int]ResultBasedCacheFunc{ - 0: digestFromResult, - }, - }), - }}, - }), - } - g0.Vertex.(*vertex).setupCallCounters() - - res, err := j0.Build(ctx, g0) - require.NoError(t, err) - require.Equal(t, unwrap(res), "result0") - require.Equal(t, int64(0), cacheManager.loadCounter) - - require.NoError(t, j0.Discard()) - j0 = nil - - j1, err := l.NewJob("j1") - require.NoError(t, err) - - g0.Vertex.(*vertex).setupCallCounters() - - defer func() { - if j1 != nil { - j1.Discard() - } - }() - - res, err = j1.Build(ctx, g0) - require.NoError(t, err) - require.Equal(t, unwrap(res), "result0") - - require.NoError(t, j1.Discard()) - j1 = nil - - require.Equal(t, int64(3), *g0.Vertex.(*vertex).cacheCallCount) - require.Equal(t, int64(0), *g0.Vertex.(*vertex).execCallCount) - require.Equal(t, int64(1), cacheManager.loadCounter) -} - -// TestSlowCacheAvoidExecOnCache tests a regression where an input with -// possible matches and a content based checksum should not try to checksum -// before other inputs with no keys have at least made into a slow state. -// moby/buildkit#648 -func TestSlowCacheAvoidLoadOnCache(t *testing.T) { - t.Parallel() - ctx := context.TODO() - - cacheManager := newTrackingCacheManager(NewInMemoryCacheManager()) - - l := NewSolver(SolverOpt{ - ResolveOpFunc: testOpResolver, - DefaultCache: cacheManager, - }) - defer l.Close() - - j0, err := l.NewJob("j0") - require.NoError(t, err) - - defer func() { - if j0 != nil { - j0.Discard() - } - }() - - g0 := Edge{ - Vertex: vtx(vtxOpt{ - name: "vmain", - cacheKeySeed: "seedmain", - value: "resultmain", - inputs: []Edge{{ - Vertex: vtx(vtxOpt{ - name: "v0", - cacheKeySeed: "seed0", - value: "result0", - inputs: []Edge{ - { - Vertex: vtx(vtxOpt{ - name: "v1", - cacheKeySeed: "seed1", - value: "result1", - inputs: []Edge{{ - Vertex: vtx(vtxOpt{ - name: "v3", - cacheKeySeed: "seed3", - value: "result3", - }), - }}, - slowCacheCompute: map[int]ResultBasedCacheFunc{ - 0: digestFromResult, - }, - }), - }, - { - Vertex: vtx(vtxOpt{ - name: "v2", - cacheKeySeed: "seed2", - value: "result2", - }), - }, - }, - slowCacheCompute: map[int]ResultBasedCacheFunc{ - 1: digestFromResult, - }, - }), - }}, - }), - } - g0.Vertex.(*vertex).setupCallCounters() - - res, err := j0.Build(ctx, g0) - require.NoError(t, err) - require.Equal(t, unwrap(res), "resultmain") - require.Equal(t, int64(0), cacheManager.loadCounter) - - require.NoError(t, j0.Discard()) - j0 = nil - - j1, err := l.NewJob("j1") - require.NoError(t, err) - - // the switch of the cache key for v3 forcing it to be reexecuted - // testing that this does not cause v2 to be reloaded for cache for its - // checksum recalculation - g0 = Edge{ - Vertex: vtx(vtxOpt{ - name: "vmain", - cacheKeySeed: "seedmain", - value: "resultmain", - inputs: []Edge{{ - Vertex: vtx(vtxOpt{ - name: "v0", - cacheKeySeed: "seed0", - value: "result0", - inputs: []Edge{ - { - Vertex: vtx(vtxOpt{ - name: "v1", - cacheKeySeed: "seed1", - value: "result1", - inputs: []Edge{{ - Vertex: vtx(vtxOpt{ - name: "v3", - cacheKeySeed: "seed3-new", - value: "result3", - }), - }}, - execPreFunc: func(context.Context) error { - select { - case <-time.After(50 * time.Millisecond): - case <-ctx.Done(): - } - return nil - }, - slowCacheCompute: map[int]ResultBasedCacheFunc{ - 0: digestFromResult, - }, - }), - }, - { - Vertex: vtx(vtxOpt{ - name: "v2", - cacheKeySeed: "seed2", - value: "result2", - }), - }, - }, - slowCacheCompute: map[int]ResultBasedCacheFunc{ - 1: digestFromResult, - }, - }), - }}, - }), - } - - g0.Vertex.(*vertex).setupCallCounters() - - defer func() { - if j1 != nil { - j1.Discard() - } - }() - - res, err = j1.Build(ctx, g0) - require.NoError(t, err) - require.Equal(t, unwrap(res), "resultmain") - - require.NoError(t, j1.Discard()) - j1 = nil - - require.Equal(t, int64(5), *g0.Vertex.(*vertex).cacheCallCount) - require.Equal(t, int64(1), *g0.Vertex.(*vertex).execCallCount) - require.Equal(t, int64(1), cacheManager.loadCounter) -} - -func TestCacheMultipleMaps(t *testing.T) { - t.Parallel() - ctx := context.TODO() - - cacheManager := newTrackingCacheManager(NewInMemoryCacheManager()) - - l := NewSolver(SolverOpt{ - ResolveOpFunc: testOpResolver, - DefaultCache: cacheManager, - }) - defer l.Close() - - j0, err := l.NewJob("j0") - require.NoError(t, err) - - defer func() { - if j0 != nil { - j0.Discard() - } - }() - - g0 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v0", - cacheKeySeed: "seed0", - cacheKeySeeds: []func() string{ - func() string { return "seed1" }, - func() string { return "seed2" }, - }, - value: "result0", - }), - } - res, err := j0.Build(ctx, g0) - require.NoError(t, err) - require.Equal(t, unwrap(res), "result0") - - require.NoError(t, j0.Discard()) - j0 = nil - - expTarget := newTestExporterTarget() - - _, err = res.CacheKeys()[0].Exporter.ExportTo(ctx, expTarget, testExporterOpts(true)) - require.NoError(t, err) - - expTarget.normalize() - require.Equal(t, len(expTarget.records), 3) - - j1, err := l.NewJob("j1") - require.NoError(t, err) - - defer func() { - if j1 != nil { - j1.Discard() - } - }() - - called := false - g1 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v1", - cacheKeySeed: "seed1", - cacheKeySeeds: []func() string{ - func() string { called = true; return "seed3" }, - }, - value: "result0-not-cached", - }), - } - - res, err = j1.Build(ctx, g1) - require.NoError(t, err) - require.Equal(t, unwrap(res), "result0") - - require.NoError(t, j1.Discard()) - j1 = nil - - expTarget = newTestExporterTarget() - - _, err = res.CacheKeys()[0].Exporter.ExportTo(ctx, expTarget, testExporterOpts(true)) - require.NoError(t, err) - - require.Equal(t, len(expTarget.records), 3) - require.Equal(t, called, false) - - j2, err := l.NewJob("j2") - require.NoError(t, err) - - defer func() { - if j2 != nil { - j2.Discard() - } - }() - - g2 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v2", - cacheKeySeed: "seed3", - cacheKeySeeds: []func() string{ - func() string { called = true; return "seed2" }, - }, - value: "result0-not-cached", - }), - } - - res, err = j2.Build(ctx, g2) - require.NoError(t, err) - require.Equal(t, unwrap(res), "result0") - - require.NoError(t, j2.Discard()) - j2 = nil - - expTarget = newTestExporterTarget() - - _, err = res.CacheKeys()[0].Exporter.ExportTo(ctx, expTarget, testExporterOpts(true)) - require.NoError(t, err) - - require.Equal(t, len(expTarget.records), 3) - require.Equal(t, called, true) -} - -func TestCacheInputMultipleMaps(t *testing.T) { - t.Parallel() - ctx := context.TODO() - - cacheManager := newTrackingCacheManager(NewInMemoryCacheManager()) - - l := NewSolver(SolverOpt{ - ResolveOpFunc: testOpResolver, - DefaultCache: cacheManager, - }) - defer l.Close() - - j0, err := l.NewJob("j0") - require.NoError(t, err) - - defer func() { - if j0 != nil { - j0.Discard() - } - }() - - g0 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v0", - cacheKeySeed: "seed0", - value: "result0", - inputs: []Edge{{ - Vertex: vtx(vtxOpt{ - name: "v1", - cacheKeySeed: "seed1", - cacheKeySeeds: []func() string{ - func() string { return "seed2" }, - }, - value: "result1", - }), - }}, - }), - } - res, err := j0.Build(ctx, g0) - require.NoError(t, err) - require.Equal(t, unwrap(res), "result0") - - expTarget := newTestExporterTarget() - - _, err = res.CacheKeys()[0].Exporter.ExportTo(ctx, expTarget, testExporterOpts(true)) - require.NoError(t, err) - - expTarget.normalize() - require.Equal(t, len(expTarget.records), 3) - - require.NoError(t, j0.Discard()) - j0 = nil - - j1, err := l.NewJob("j1") - require.NoError(t, err) - - defer func() { - if j1 != nil { - j1.Discard() - } - }() - - g1 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v0", - cacheKeySeed: "seed0", - value: "result0-no-cache", - inputs: []Edge{{ - Vertex: vtx(vtxOpt{ - name: "v1", - cacheKeySeed: "seed1.changed", - cacheKeySeeds: []func() string{ - func() string { return "seed2" }, - }, - value: "result1-no-cache", - }), - }}, - }), - } - res, err = j1.Build(ctx, g1) - require.NoError(t, err) - require.Equal(t, unwrap(res), "result0") - - _, err = res.CacheKeys()[0].Exporter.ExportTo(ctx, expTarget, testExporterOpts(true)) - require.NoError(t, err) - - expTarget.normalize() - require.Equal(t, len(expTarget.records), 3) - - require.NoError(t, j1.Discard()) - j1 = nil -} - -func TestCacheExportingPartialSelector(t *testing.T) { - t.Parallel() - ctx := context.TODO() - - cacheManager := newTrackingCacheManager(NewInMemoryCacheManager()) - - l := NewSolver(SolverOpt{ - ResolveOpFunc: testOpResolver, - DefaultCache: cacheManager, - }) - defer l.Close() - - j0, err := l.NewJob("j0") - require.NoError(t, err) - - defer func() { - if j0 != nil { - j0.Discard() - } - }() - - g0 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v0", - cacheKeySeed: "seed0", - value: "result0", - inputs: []Edge{ - {Vertex: vtx(vtxOpt{ - name: "v1", - cacheKeySeed: "seed1", - value: "result1", - })}, - }, - selectors: map[int]digest.Digest{ - 0: dgst("sel0"), - }, - slowCacheCompute: map[int]ResultBasedCacheFunc{ - 0: digestFromResult, - }, - }), - } - - res, err := j0.Build(ctx, g0) - require.NoError(t, err) - require.Equal(t, unwrap(res), "result0") - - require.NoError(t, j0.Discard()) - j0 = nil - - expTarget := newTestExporterTarget() - - _, err = res.CacheKeys()[0].Exporter.ExportTo(ctx, expTarget, testExporterOpts(true)) - require.NoError(t, err) - - expTarget.normalize() - require.Equal(t, len(expTarget.records), 3) - require.Equal(t, expTarget.records[0].results, 1) - require.Equal(t, expTarget.records[1].results, 0) - require.Equal(t, expTarget.records[2].results, 0) - require.Equal(t, expTarget.records[0].links, 2) - require.Equal(t, expTarget.records[1].links, 0) - require.Equal(t, expTarget.records[2].links, 0) - - // repeat so that all coming from cache are retained - j1, err := l.NewJob("j1") - require.NoError(t, err) - - defer func() { - if j1 != nil { - j1.Discard() - } - }() - - g1 := g0 - - res, err = j1.Build(ctx, g1) - require.NoError(t, err) - require.Equal(t, unwrap(res), "result0") - - require.NoError(t, j1.Discard()) - j1 = nil - - expTarget = newTestExporterTarget() - - _, err = res.CacheKeys()[0].Exporter.ExportTo(ctx, expTarget, testExporterOpts(true)) - require.NoError(t, err) - - expTarget.normalize() - - // the order of the records isn't really significant - require.Equal(t, len(expTarget.records), 3) - require.Equal(t, expTarget.records[0].results, 1) - require.Equal(t, expTarget.records[1].results, 0) - require.Equal(t, expTarget.records[2].results, 0) - require.Equal(t, expTarget.records[0].links, 2) - require.Equal(t, expTarget.records[1].links, 0) - require.Equal(t, expTarget.records[2].links, 0) - - // repeat with forcing a slow key recomputation - j2, err := l.NewJob("j2") - require.NoError(t, err) - - defer func() { - if j2 != nil { - j2.Discard() - } - }() - - g2 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v0", - cacheKeySeed: "seed0", - value: "result0", - inputs: []Edge{ - {Vertex: vtx(vtxOpt{ - name: "v1", - cacheKeySeed: "seed1-net", - value: "result1", - })}, - }, - selectors: map[int]digest.Digest{ - 0: dgst("sel0"), - }, - slowCacheCompute: map[int]ResultBasedCacheFunc{ - 0: digestFromResult, - }, - }), - } - - res, err = j2.Build(ctx, g2) - require.NoError(t, err) - require.Equal(t, unwrap(res), "result0") - - require.NoError(t, j2.Discard()) - j2 = nil - - expTarget = newTestExporterTarget() - - _, err = res.CacheKeys()[0].Exporter.ExportTo(ctx, expTarget, testExporterOpts(true)) - require.NoError(t, err) - - expTarget.normalize() - - // the order of the records isn't really significant - // adds one - require.Equal(t, len(expTarget.records), 4) - require.Equal(t, expTarget.records[0].results, 1) - require.Equal(t, expTarget.records[1].results, 0) - require.Equal(t, expTarget.records[2].results, 0) - require.Equal(t, expTarget.records[3].results, 0) - require.Equal(t, expTarget.records[0].links, 3) - require.Equal(t, expTarget.records[1].links, 0) - require.Equal(t, expTarget.records[2].links, 0) - require.Equal(t, expTarget.records[3].links, 0) - - // repeat with a wrapper - j3, err := l.NewJob("j3") - require.NoError(t, err) - - defer func() { - if j3 != nil { - j3.Discard() - } - }() - - g3 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v2", - cacheKeySeed: "seed2", - value: "result2", - inputs: []Edge{g2}, - }, - ), - } - - res, err = j3.Build(ctx, g3) - require.NoError(t, err) - require.Equal(t, unwrap(res), "result2") - - require.NoError(t, j3.Discard()) - j3 = nil - - expTarget = newTestExporterTarget() - - _, err = res.CacheKeys()[0].Exporter.ExportTo(ctx, expTarget, testExporterOpts(true)) - require.NoError(t, err) - - expTarget.normalize() - - // adds one extra result - // the order of the records isn't really significant - require.Equal(t, len(expTarget.records), 5) - require.Equal(t, expTarget.records[0].results, 1) - require.Equal(t, expTarget.records[1].results, 1) - require.Equal(t, expTarget.records[2].results, 0) - require.Equal(t, expTarget.records[3].results, 0) - require.Equal(t, expTarget.records[4].results, 0) - require.Equal(t, expTarget.records[0].links, 1) - require.Equal(t, expTarget.records[1].links, 3) - require.Equal(t, expTarget.records[2].links, 0) - require.Equal(t, expTarget.records[3].links, 0) - require.Equal(t, expTarget.records[4].links, 0) -} - -func TestCacheExportingMergedKey(t *testing.T) { - t.Parallel() - ctx := context.TODO() - - cacheManager := newTrackingCacheManager(NewInMemoryCacheManager()) - - l := NewSolver(SolverOpt{ - ResolveOpFunc: testOpResolver, - DefaultCache: cacheManager, - }) - defer l.Close() - - j0, err := l.NewJob("j0") - require.NoError(t, err) - - defer func() { - if j0 != nil { - j0.Discard() - } - }() - - g0 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v0", - cacheKeySeed: "seed0", - value: "result0", - inputs: []Edge{ - { - Vertex: vtx(vtxOpt{ - name: "v1", - cacheKeySeed: "seed1", - value: "result1", - inputs: []Edge{ - { - Vertex: vtx(vtxOpt{ - name: "v2", - cacheKeySeed: "seed2", - value: "result2", - }), - }, - }, - slowCacheCompute: map[int]ResultBasedCacheFunc{ - 0: digestFromResult, - }, - }), - }, - { - Vertex: vtx(vtxOpt{ - name: "v1-diff", - cacheKeySeed: "seed1", - value: "result1", - inputs: []Edge{ - { - Vertex: vtx(vtxOpt{ - name: "v3", - cacheKeySeed: "seed3", - value: "result2", - }), - }, - }, - slowCacheCompute: map[int]ResultBasedCacheFunc{ - 0: digestFromResult, - }, - }), - }, - }, - }), - } - - res, err := j0.Build(ctx, g0) - require.NoError(t, err) - require.Equal(t, unwrap(res), "result0") - - require.NoError(t, j0.Discard()) - j0 = nil - - expTarget := newTestExporterTarget() - - _, err = res.CacheKeys()[0].Exporter.ExportTo(ctx, expTarget, testExporterOpts(true)) - require.NoError(t, err) - - expTarget.normalize() - - require.Equal(t, len(expTarget.records), 5) -} - -// moby/buildkit#434 -func TestMergedEdgesLookup(t *testing.T) { - t.Parallel() - - rand.Seed(time.Now().UnixNano()) - - // this test requires multiple runs to trigger the race - for i := 0; i < 20; i++ { - func() { - ctx := context.TODO() - - cacheManager := newTrackingCacheManager(NewInMemoryCacheManager()) - - l := NewSolver(SolverOpt{ - ResolveOpFunc: testOpResolver, - DefaultCache: cacheManager, - }) - defer l.Close() - - j0, err := l.NewJob("j0") - require.NoError(t, err) - - defer func() { - if j0 != nil { - j0.Discard() - } - }() - - g := Edge{ - Vertex: vtxSum(3, vtxOpt{inputs: []Edge{ - {Vertex: vtxSum(0, vtxOpt{inputs: []Edge{ - {Vertex: vtxSum(2, vtxOpt{inputs: []Edge{ - {Vertex: vtxConst(2, vtxOpt{})}, - }})}, - {Vertex: vtxConst(0, vtxOpt{})}, - }})}, - {Vertex: vtxSum(2, vtxOpt{inputs: []Edge{ - {Vertex: vtxConst(2, vtxOpt{})}, - }})}, - }}), - } - g.Vertex.(*vertexSum).setupCallCounters() - - res, err := j0.Build(ctx, g) - require.NoError(t, err) - require.Equal(t, unwrapInt(res), 11) - require.Equal(t, int64(7), *g.Vertex.(*vertexSum).cacheCallCount) - require.Equal(t, int64(0), cacheManager.loadCounter) - - require.NoError(t, j0.Discard()) - j0 = nil - }() - } -} - -func TestInputRequestDeadlock(t *testing.T) { - t.Parallel() - ctx := context.TODO() - - s := NewSolver(SolverOpt{ - ResolveOpFunc: testOpResolver, - }) - defer s.Close() - - j0, err := s.NewJob("job0") - require.NoError(t, err) - - defer func() { - if j0 != nil { - j0.Discard() - } - }() - - g0 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v0", - cacheKeySeed: "seed0", - value: "result0", - inputs: []Edge{ - { - Vertex: vtx(vtxOpt{ - name: "v1", - cacheKeySeed: "seed1", - value: "result1", - }), - }, - { - Vertex: vtx(vtxOpt{ - name: "v2", - cacheKeySeed: "seed2", - value: "result2", - }), - }, - }, - }), - } - - _, err = j0.Build(ctx, g0) - require.NoError(t, err) - require.NoError(t, j0.Discard()) - j0 = nil - - j1, err := s.NewJob("job1") - require.NoError(t, err) - - defer func() { - if j1 != nil { - j1.Discard() - } - }() - - g1 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v0", - cacheKeySeed: "seed0-1", - value: "result0", - inputs: []Edge{ - { - Vertex: vtx(vtxOpt{ - name: "v1", - cacheKeySeed: "seed1-1", - value: "result1", - }), - }, - { - Vertex: vtx(vtxOpt{ - name: "v2", - cacheKeySeed: "seed2-1", - value: "result2", - }), - }, - }, - }), - } - - _, err = j1.Build(ctx, g1) - require.NoError(t, err) - require.NoError(t, j1.Discard()) - j1 = nil - - j2, err := s.NewJob("job2") - require.NoError(t, err) - - defer func() { - if j2 != nil { - j2.Discard() - } - }() - - g2 := Edge{ - Vertex: vtx(vtxOpt{ - name: "v0", - cacheKeySeed: "seed0-1", - value: "result0", - inputs: []Edge{ - { - Vertex: vtx(vtxOpt{ - name: "v1", - cacheKeySeed: "seed1", - value: "result1", - }), - }, - { - Vertex: vtx(vtxOpt{ - name: "v2", - cacheKeySeed: "seed2-1", - value: "result2", - }), - }, - }, - slowCacheCompute: map[int]ResultBasedCacheFunc{ - 1: digestFromResult, - }, - }), - } - - _, err = j2.Build(ctx, g2) - require.NoError(t, err) - require.NoError(t, j2.Discard()) - j2 = nil -} - -func generateSubGraph(nodes int) (Edge, int) { - if nodes == 1 { - value := rand.Int() % 500 - return Edge{Vertex: vtxConst(value, vtxOpt{})}, value - } - spread := rand.Int()%5 + 2 - inc := int(math.Ceil(float64(nodes) / float64(spread))) - if inc > nodes { - inc = nodes - } - added := 1 - value := 0 - inputs := []Edge{} - i := 0 - for { - i++ - if added >= nodes { - break - } - if added+inc > nodes { - inc = nodes - added - } - e, v := generateSubGraph(inc) - inputs = append(inputs, e) - value += v - added += inc - } - extra := rand.Int() % 500 - value += extra - return Edge{Vertex: vtxSum(extra, vtxOpt{inputs: inputs})}, value -} - -type vtxOpt struct { - name string - cacheKeySeed string - cacheKeySeeds []func() string - execDelay time.Duration - cacheDelay time.Duration - cachePreFunc func(context.Context) error - execPreFunc func(context.Context) error - inputs []Edge - value string - slowCacheCompute map[int]ResultBasedCacheFunc - selectors map[int]digest.Digest - cacheSource CacheManager - ignoreCache bool -} - -func vtx(opt vtxOpt) *vertex { - if opt.name == "" { - opt.name = identity.NewID() - } - if opt.cacheKeySeed == "" { - opt.cacheKeySeed = identity.NewID() - } - return &vertex{opt: opt} -} - -type vertex struct { - opt vtxOpt - - cacheCallCount *int64 - execCallCount *int64 -} - -func (v *vertex) Digest() digest.Digest { - return digest.FromBytes([]byte(v.opt.name)) -} -func (v *vertex) Sys() interface{} { - return v -} -func (v *vertex) Inputs() []Edge { - return v.opt.inputs -} -func (v *vertex) Name() string { - return v.opt.name -} -func (v *vertex) Options() VertexOptions { - var cache []CacheManager - if v.opt.cacheSource != nil { - cache = append(cache, v.opt.cacheSource) - } - return VertexOptions{ - CacheSources: cache, - IgnoreCache: v.opt.ignoreCache, - } -} - -func (v *vertex) setupCallCounters() { - var cacheCount int64 - var execCount int64 - - v.setCallCounters(&cacheCount, &execCount) -} - -func (v *vertex) setCallCounters(cacheCount, execCount *int64) { - v.cacheCallCount = cacheCount - v.execCallCount = execCount - - for _, inp := range v.opt.inputs { - var v *vertex - switch vv := inp.Vertex.(type) { - case *vertex: - v = vv - case *vertexSum: - v = vv.vertex - case *vertexConst: - v = vv.vertex - case *vertexSubBuild: - v = vv.vertex - } - v.setCallCounters(cacheCount, execCount) - } -} - -func (v *vertex) cacheMap(ctx context.Context) error { - if f := v.opt.cachePreFunc; f != nil { - if err := f(ctx); err != nil { - return err - } - } - if v.cacheCallCount != nil { - atomic.AddInt64(v.cacheCallCount, 1) - } - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - select { - case <-time.After(v.opt.cacheDelay): - case <-ctx.Done(): - return ctx.Err() - } - return nil -} - -func (v *vertex) CacheMap(ctx context.Context, index int) (*CacheMap, bool, error) { - if index == 0 { - if err := v.cacheMap(ctx); err != nil { - return nil, false, err - } - return v.makeCacheMap(), len(v.opt.cacheKeySeeds) == index, nil - } - return &CacheMap{ - Digest: digest.FromBytes([]byte(fmt.Sprintf("seed:%s", v.opt.cacheKeySeeds[index-1]()))), - }, len(v.opt.cacheKeySeeds) == index, nil -} - -func (v *vertex) exec(ctx context.Context, inputs []Result) error { - if len(inputs) != len(v.Inputs()) { - return errors.Errorf("invalid number of inputs") - } - if f := v.opt.execPreFunc; f != nil { - if err := f(ctx); err != nil { - return err - } - } - if v.execCallCount != nil { - atomic.AddInt64(v.execCallCount, 1) - } - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - select { - case <-time.After(v.opt.execDelay): - case <-ctx.Done(): - return ctx.Err() - } - return nil -} - -func (v *vertex) Exec(ctx context.Context, inputs []Result) (outputs []Result, err error) { - if err := v.exec(ctx, inputs); err != nil { - return nil, err - } - return []Result{&dummyResult{id: identity.NewID(), value: v.opt.value}}, nil -} - -func (v *vertex) makeCacheMap() *CacheMap { - m := &CacheMap{ - Digest: digest.FromBytes([]byte(fmt.Sprintf("seed:%s", v.opt.cacheKeySeed))), - Deps: make([]struct { - Selector digest.Digest - ComputeDigestFunc ResultBasedCacheFunc - }, len(v.Inputs())), - } - for i, f := range v.opt.slowCacheCompute { - m.Deps[i].ComputeDigestFunc = f - } - for i, dgst := range v.opt.selectors { - m.Deps[i].Selector = dgst - } - return m -} - -// vtxConst returns a vertex that outputs a constant integer -func vtxConst(v int, opt vtxOpt) *vertexConst { - if opt.cacheKeySeed == "" { - opt.cacheKeySeed = fmt.Sprintf("const-%d", v) - } - if opt.name == "" { - opt.name = opt.cacheKeySeed + "-" + identity.NewID() - } - return &vertexConst{vertex: vtx(opt), value: v} -} - -type vertexConst struct { - *vertex - value int -} - -func (v *vertexConst) Sys() interface{} { - return v -} - -func (v *vertexConst) Exec(ctx context.Context, inputs []Result) (outputs []Result, err error) { - if err := v.exec(ctx, inputs); err != nil { - return nil, err - } - return []Result{&dummyResult{id: identity.NewID(), intValue: v.value}}, nil -} - -// vtxSum returns a vertex that ourputs sum of its inputs plus a constant -func vtxSum(v int, opt vtxOpt) *vertexSum { - if opt.cacheKeySeed == "" { - opt.cacheKeySeed = fmt.Sprintf("sum-%d-%d", v, len(opt.inputs)) - } - if opt.name == "" { - opt.name = opt.cacheKeySeed + "-" + identity.NewID() - } - return &vertexSum{vertex: vtx(opt), value: v} -} - -type vertexSum struct { - *vertex - value int -} - -func (v *vertexSum) Sys() interface{} { - return v -} - -func (v *vertexSum) Exec(ctx context.Context, inputs []Result) (outputs []Result, err error) { - if err := v.exec(ctx, inputs); err != nil { - return nil, err - } - s := v.value - for _, inp := range inputs { - r, ok := inp.Sys().(*dummyResult) - if !ok { - return nil, errors.Errorf("invalid input type: %T", inp.Sys()) - } - s += r.intValue - } - return []Result{&dummyResult{id: identity.NewID(), intValue: s}}, nil -} - -func vtxSubBuild(g Edge, opt vtxOpt) *vertexSubBuild { - if opt.cacheKeySeed == "" { - opt.cacheKeySeed = fmt.Sprintf("sum-%s", identity.NewID()) - } - if opt.name == "" { - opt.name = opt.cacheKeySeed + "-" + identity.NewID() - } - return &vertexSubBuild{vertex: vtx(opt), g: g} -} - -type vertexSubBuild struct { - *vertex - g Edge - b Builder -} - -func (v *vertexSubBuild) Sys() interface{} { - return v -} - -func (v *vertexSubBuild) Exec(ctx context.Context, inputs []Result) (outputs []Result, err error) { - if err := v.exec(ctx, inputs); err != nil { - return nil, err - } - res, err := v.b.Build(ctx, v.g) - if err != nil { - return nil, err - } - return []Result{res}, nil -} - -func printGraph(e Edge, pfx string) { - name := e.Vertex.Name() - fmt.Printf("%s %d %s\n", pfx, e.Index, name) - for _, inp := range e.Vertex.Inputs() { - printGraph(inp, pfx+"-->") - } -} - -type dummyResult struct { - id string - value string - intValue int -} - -func (r *dummyResult) ID() string { return r.id } -func (r *dummyResult) Release(context.Context) error { return nil } -func (r *dummyResult) Sys() interface{} { return r } - -func testOpResolver(v Vertex, b Builder) (Op, error) { - if op, ok := v.Sys().(Op); ok { - if vtx, ok := op.(*vertexSubBuild); ok { - vtx.b = b - } - return op, nil - } - - return nil, errors.Errorf("invalid vertex") -} - -func unwrap(res Result) string { - r, ok := res.Sys().(*dummyResult) - if !ok { - return "unwrap-error" - } - return r.value -} - -func unwrapInt(res Result) int { - r, ok := res.Sys().(*dummyResult) - if !ok { - return -1e6 - } - return r.intValue -} - -func blockingFuncion(i int) func(context.Context) error { - limit := int64(i) - block := make(chan struct{}) - return func(context.Context) error { - if atomic.AddInt64(&limit, -1) == 0 { - close(block) - } - <-block - return nil - } -} - -func newTrackingCacheManager(cm CacheManager) *trackingCacheManager { - return &trackingCacheManager{CacheManager: cm} -} - -type trackingCacheManager struct { - CacheManager - loadCounter int64 -} - -func (cm *trackingCacheManager) Load(ctx context.Context, rec *CacheRecord) (Result, error) { - atomic.AddInt64(&cm.loadCounter, 1) - return cm.CacheManager.Load(ctx, rec) -} - -func digestFromResult(ctx context.Context, res Result) (digest.Digest, error) { - return digest.FromBytes([]byte(unwrap(res))), nil -} - -func testExporterOpts(all bool) CacheExportOpt { - mode := CacheExportModeMin - if all { - mode = CacheExportModeMax - } - return CacheExportOpt{ - Convert: func(ctx context.Context, res Result) (*Remote, error) { - if dr, ok := res.Sys().(*dummyResult); ok { - return &Remote{Descriptors: []ocispec.Descriptor{{ - Annotations: map[string]string{"value": fmt.Sprintf("%d", dr.intValue)}, - }}}, nil - } - return nil, nil - }, - Mode: mode, - } -} - -func newTestExporterTarget() *testExporterTarget { - return &testExporterTarget{ - visited: map[interface{}]struct{}{}, - } -} - -type testExporterTarget struct { - visited map[interface{}]struct{} - records []*testExporterRecord -} - -func (t *testExporterTarget) Add(dgst digest.Digest) CacheExporterRecord { - r := &testExporterRecord{dgst: dgst} - t.records = append(t.records, r) - return r -} -func (t *testExporterTarget) Visit(v interface{}) { - t.visited[v] = struct{}{} - -} -func (t *testExporterTarget) Visited(v interface{}) bool { - _, ok := t.visited[v] - return ok -} - -func (t *testExporterTarget) normalize() { - m := map[digest.Digest]struct{}{} - rec := make([]*testExporterRecord, 0, len(t.records)) - for _, r := range t.records { - if _, ok := m[r.dgst]; ok { - for _, r2 := range t.records { - delete(r2.linkMap, r.dgst) - r2.links = len(r2.linkMap) - } - continue - } - m[r.dgst] = struct{}{} - rec = append(rec, r) - } - t.records = rec -} - -type testExporterRecord struct { - dgst digest.Digest - results int - links int - linkMap map[digest.Digest]struct{} -} - -func (r *testExporterRecord) AddResult(createdAt time.Time, result *Remote) { - r.results++ -} - -func (r *testExporterRecord) LinkFrom(src CacheExporterRecord, index int, selector string) { - if s, ok := src.(*testExporterRecord); ok { - if r.linkMap == nil { - r.linkMap = map[digest.Digest]struct{}{} - } - if _, ok := r.linkMap[s.dgst]; !ok { - r.linkMap[s.dgst] = struct{}{} - r.links++ - } - } -} diff --git a/vendor/github.com/moby/buildkit/solver/testutil/cachestorage_testsuite.go b/vendor/github.com/moby/buildkit/solver/testutil/cachestorage_testsuite.go deleted file mode 100644 index e87fd979bfc8..000000000000 --- a/vendor/github.com/moby/buildkit/solver/testutil/cachestorage_testsuite.go +++ /dev/null @@ -1,387 +0,0 @@ -package testutil - -import ( - "fmt" - "reflect" - "runtime" - "strings" - "testing" - "time" - - "github.com/moby/buildkit/solver" - digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/stretchr/testify/require" -) - -func RunCacheStorageTests(t *testing.T, st func() (solver.CacheKeyStorage, func())) { - for _, tc := range []func(*testing.T, solver.CacheKeyStorage){ - testResults, - testLinks, - testResultReleaseSingleLevel, - testResultReleaseMultiLevel, - testBacklinks, - testWalkIDsByResult, - } { - runStorageTest(t, tc, st) - } -} - -func runStorageTest(t *testing.T, fn func(t *testing.T, st solver.CacheKeyStorage), st func() (solver.CacheKeyStorage, func())) { - require.True(t, t.Run(getFunctionName(fn), func(t *testing.T) { - s, cleanup := st() - defer cleanup() - fn(t, s) - })) -} - -func testResults(t *testing.T, st solver.CacheKeyStorage) { - t.Parallel() - err := st.AddResult("foo", solver.CacheResult{ - ID: "foo0", - CreatedAt: time.Now(), - }) - require.NoError(t, err) - - err = st.AddResult("foo", solver.CacheResult{ - ID: "foo1", - CreatedAt: time.Now(), - }) - require.NoError(t, err) - - err = st.AddResult("bar", solver.CacheResult{ - ID: "bar0", - CreatedAt: time.Now(), - }) - require.NoError(t, err) - - m := map[string]solver.CacheResult{} - err = st.WalkResults("foo", func(r solver.CacheResult) error { - m[r.ID] = r - return nil - }) - require.NoError(t, err) - - require.Equal(t, len(m), 2) - f0, ok := m["foo0"] - require.True(t, ok) - f1, ok := m["foo1"] - require.True(t, ok) - require.True(t, f0.CreatedAt.Before(f1.CreatedAt)) - - m = map[string]solver.CacheResult{} - err = st.WalkResults("bar", func(r solver.CacheResult) error { - m[r.ID] = r - return nil - }) - require.NoError(t, err) - - require.Equal(t, len(m), 1) - _, ok = m["bar0"] - require.True(t, ok) - - // empty result - err = st.WalkResults("baz", func(r solver.CacheResult) error { - require.Fail(t, "unreachable") - return nil - }) - require.NoError(t, err) - - res, err := st.Load("foo", "foo1") - require.NoError(t, err) - - require.Equal(t, res.ID, "foo1") - - _, err = st.Load("foo1", "foo1") - require.Error(t, err) - require.Equal(t, errors.Cause(err), solver.ErrNotFound) - - _, err = st.Load("foo", "foo2") - require.Error(t, err) - require.Equal(t, errors.Cause(err), solver.ErrNotFound) -} - -func testLinks(t *testing.T, st solver.CacheKeyStorage) { - t.Parallel() - - l0 := solver.CacheInfoLink{ - Input: 0, Output: 1, Digest: digest.FromBytes([]byte(">target0")), - } - err := st.AddLink("foo", l0, "target0") - require.NoError(t, err) - - err = st.AddLink("bar", l0, "target0-second") - require.NoError(t, err) - - m := map[string]struct{}{} - err = st.WalkLinks("foo", l0, func(id string) error { - m[id] = struct{}{} - return nil - }) - require.NoError(t, err) - - require.Equal(t, len(m), 1) - _, ok := m["target0"] - require.True(t, ok) - - l1 := solver.CacheInfoLink{ - Input: 0, Output: 1, Digest: digest.FromBytes([]byte(">target1")), - } - m = map[string]struct{}{} - err = st.WalkLinks("foo", l1, func(id string) error { - m[id] = struct{}{} - return nil - }) - require.NoError(t, err) - require.Equal(t, len(m), 0) - - err = st.AddLink("foo", l1, "target1") - require.NoError(t, err) - - m = map[string]struct{}{} - err = st.WalkLinks("foo", l1, func(id string) error { - m[id] = struct{}{} - return nil - }) - require.NoError(t, err) - require.Equal(t, len(m), 1) - - _, ok = m["target1"] - require.True(t, ok) - - err = st.AddLink("foo", l1, "target1-second") - require.NoError(t, err) - - m = map[string]struct{}{} - err = st.WalkLinks("foo", l1, func(id string) error { - m[id] = struct{}{} - return nil - }) - require.NoError(t, err) - require.Equal(t, len(m), 2) - _, ok = m["target1"] - require.True(t, ok) - _, ok = m["target1-second"] - require.True(t, ok) -} - -func testResultReleaseSingleLevel(t *testing.T, st solver.CacheKeyStorage) { - t.Parallel() - - err := st.AddResult("foo", solver.CacheResult{ - ID: "foo0", - CreatedAt: time.Now(), - }) - require.NoError(t, err) - - err = st.AddResult("foo", solver.CacheResult{ - ID: "foo1", - CreatedAt: time.Now(), - }) - require.NoError(t, err) - - err = st.Release("foo0") - require.NoError(t, err) - - m := map[string]struct{}{} - st.WalkResults("foo", func(res solver.CacheResult) error { - m[res.ID] = struct{}{} - return nil - }) - - require.Equal(t, len(m), 1) - _, ok := m["foo1"] - require.True(t, ok) - - err = st.Release("foo1") - require.NoError(t, err) - - m = map[string]struct{}{} - st.WalkResults("foo", func(res solver.CacheResult) error { - m[res.ID] = struct{}{} - return nil - }) - - require.Equal(t, len(m), 0) - - st.Walk(func(id string) error { - require.False(t, true, fmt.Sprintf("id %s should have been released", id)) - return nil - }) -} - -func testBacklinks(t *testing.T, st solver.CacheKeyStorage) { - t.Parallel() - - err := st.AddResult("foo", solver.CacheResult{ - ID: "foo-result", - CreatedAt: time.Now(), - }) - require.NoError(t, err) - - err = st.AddResult("sub0", solver.CacheResult{ - ID: "sub0-result", - CreatedAt: time.Now(), - }) - require.NoError(t, err) - - l0 := solver.CacheInfoLink{ - Input: 0, Output: 1, Digest: digest.FromBytes([]byte("to-sub0")), - } - err = st.AddLink("foo", l0, "sub0") - require.NoError(t, err) - - backlinks := 0 - st.WalkBacklinks("sub0", func(id string, link solver.CacheInfoLink) error { - require.Equal(t, id, "foo") - require.Equal(t, link.Input, solver.Index(0)) - require.Equal(t, link.Digest, rootKey(digest.FromBytes([]byte("to-sub0")), 1)) - backlinks++ - return nil - }) - require.Equal(t, backlinks, 1) -} - -func testResultReleaseMultiLevel(t *testing.T, st solver.CacheKeyStorage) { - t.Parallel() - - err := st.AddResult("foo", solver.CacheResult{ - ID: "foo-result", - CreatedAt: time.Now(), - }) - require.NoError(t, err) - - err = st.AddResult("sub0", solver.CacheResult{ - ID: "sub0-result", - CreatedAt: time.Now(), - }) - require.NoError(t, err) - - l0 := solver.CacheInfoLink{ - Input: 0, Output: 1, Digest: digest.FromBytes([]byte("to-sub0")), - } - err = st.AddLink("foo", l0, "sub0") - require.NoError(t, err) - - err = st.AddResult("sub1", solver.CacheResult{ - ID: "sub1-result", - CreatedAt: time.Now(), - }) - require.NoError(t, err) - - err = st.AddLink("foo", l0, "sub1") - require.NoError(t, err) - - // delete one sub doesn't delete parent - - err = st.Release("sub0-result") - require.NoError(t, err) - - m := map[string]struct{}{} - err = st.WalkResults("foo", func(res solver.CacheResult) error { - m[res.ID] = struct{}{} - return nil - }) - require.NoError(t, err) - - require.Equal(t, len(m), 1) - _, ok := m["foo-result"] - require.True(t, ok) - - require.False(t, st.Exists("sub0")) - - m = map[string]struct{}{} - err = st.WalkLinks("foo", l0, func(id string) error { - m[id] = struct{}{} - return nil - }) - require.NoError(t, err) - require.Equal(t, len(m), 1) - - _, ok = m["sub1"] - require.True(t, ok) - - // release foo removes the result but doesn't break the chain - - err = st.Release("foo-result") - require.NoError(t, err) - - require.True(t, st.Exists("foo")) - - m = map[string]struct{}{} - err = st.WalkResults("foo", func(res solver.CacheResult) error { - m[res.ID] = struct{}{} - return nil - }) - require.NoError(t, err) - - require.Equal(t, len(m), 0) - - m = map[string]struct{}{} - err = st.WalkLinks("foo", l0, func(id string) error { - m[id] = struct{}{} - return nil - }) - require.NoError(t, err) - require.Equal(t, len(m), 1) - - // release sub1 now releases foo as well - err = st.Release("sub1-result") - require.NoError(t, err) - - require.False(t, st.Exists("sub1")) - require.False(t, st.Exists("foo")) - - st.Walk(func(id string) error { - require.False(t, true, fmt.Sprintf("id %s should have been released", id)) - return nil - }) -} - -func testWalkIDsByResult(t *testing.T, st solver.CacheKeyStorage) { - t.Parallel() - - err := st.AddResult("foo", solver.CacheResult{ - ID: "foo-result", - CreatedAt: time.Now(), - }) - require.NoError(t, err) - - err = st.AddResult("foo2", solver.CacheResult{ - ID: "foo-result", - CreatedAt: time.Now(), - }) - require.NoError(t, err) - - err = st.AddResult("bar", solver.CacheResult{ - ID: "bar-result", - CreatedAt: time.Now(), - }) - require.NoError(t, err) - - m := map[string]struct{}{} - err = st.WalkIDsByResult("foo-result", func(id string) error { - m[id] = struct{}{} - return nil - }) - require.NoError(t, err) - - _, ok := m["foo"] - require.True(t, ok) - - _, ok = m["foo2"] - require.True(t, ok) - - _, ok = m["bar"] - require.False(t, ok) -} - -func getFunctionName(i interface{}) string { - fullname := runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name() - dot := strings.LastIndex(fullname, ".") + 1 - return strings.Title(fullname[dot:]) -} - -func rootKey(dgst digest.Digest, output solver.Index) digest.Digest { - return digest.FromBytes([]byte(fmt.Sprintf("%s@%d", dgst, output))) -} diff --git a/vendor/github.com/moby/buildkit/solver/testutil/memorycachestorage_test.go b/vendor/github.com/moby/buildkit/solver/testutil/memorycachestorage_test.go deleted file mode 100644 index ea1f6be464fe..000000000000 --- a/vendor/github.com/moby/buildkit/solver/testutil/memorycachestorage_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package testutil - -import ( - "testing" - - "github.com/moby/buildkit/solver" -) - -func TestMemoryCacheStorage(t *testing.T) { - RunCacheStorageTests(t, func() (solver.CacheKeyStorage, func()) { - return solver.NewInMemoryCacheStorage(), func() {} - }) -} diff --git a/vendor/github.com/moby/buildkit/solver/types.go b/vendor/github.com/moby/buildkit/solver/types.go deleted file mode 100644 index ccf4cad012c6..000000000000 --- a/vendor/github.com/moby/buildkit/solver/types.go +++ /dev/null @@ -1,168 +0,0 @@ -package solver - -import ( - "context" - "time" - - "github.com/containerd/containerd/content" - digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -// Vertex is one node in the build graph -type Vertex interface { - // Digest is a content-addressable vertex identifier - Digest() digest.Digest - // Sys returns an internal value that is used to execute the vertex. Usually - // this is capured by the operation resolver method during solve. - Sys() interface{} - Options() VertexOptions - // Array of edges current vertex depends on. - Inputs() []Edge - Name() string -} - -// Index is a index value for output edge -type Index int - -// Edge is a path to a specific output of the vertex -type Edge struct { - Index Index - Vertex Vertex -} - -// VertexOptions has optional metadata for the vertex that is not contained in digest -type VertexOptions struct { - IgnoreCache bool - CacheSources []CacheManager - Description map[string]string // text values with no special meaning for solver - ExportCache *bool - // WorkerConstraint -} - -// Result is an abstract return value for a solve -type Result interface { - ID() string - Release(context.Context) error - Sys() interface{} -} - -// CachedResult is a result connected with its cache key -type CachedResult interface { - Result - CacheKeys() []ExportableCacheKey -} - -// CacheExportMode is the type for setting cache exporting modes -type CacheExportMode int - -const ( - // CacheExportModeMin exports a topmost allowed vertex and its dependencies - // that already have transferable layers - CacheExportModeMin CacheExportMode = iota - // CacheExportModeMax exports all possible non-root vertexes - CacheExportModeMax - // CacheExportModeRemoteOnly only exports vertexes that already have - // transferable layers - CacheExportModeRemoteOnly -) - -// CacheExportOpt defines options for exporting build cache -type CacheExportOpt struct { - // Convert can convert a build result to transferable object - Convert func(context.Context, Result) (*Remote, error) - // Mode defines a cache export algorithm - Mode CacheExportMode -} - -// CacheExporter can export the artifacts of the build chain -type CacheExporter interface { - ExportTo(ctx context.Context, t CacheExporterTarget, opt CacheExportOpt) ([]CacheExporterRecord, error) -} - -// CacheExporterTarget defines object capable of receiving exports -type CacheExporterTarget interface { - Add(dgst digest.Digest) CacheExporterRecord - Visit(interface{}) - Visited(interface{}) bool -} - -// CacheExporterRecord is a single object being exported -type CacheExporterRecord interface { - AddResult(createdAt time.Time, result *Remote) - LinkFrom(src CacheExporterRecord, index int, selector string) -} - -// Remote is a descriptor or a list of stacked descriptors that can be pulled -// from a content provider -// TODO: add closer to keep referenced data from getting deleted -type Remote struct { - Descriptors []ocispec.Descriptor - Provider content.Provider -} - -// CacheLink is a link between two cache records -type CacheLink struct { - Source digest.Digest `json:",omitempty"` - Input Index `json:",omitempty"` - Output Index `json:",omitempty"` - Base digest.Digest `json:",omitempty"` - Selector digest.Digest `json:",omitempty"` -} - -// Op is an implementation for running a vertex -type Op interface { - // CacheMap returns structure describing how the operation is cached. - // Currently only roots are allowed to return multiple cache maps per op. - CacheMap(context.Context, int) (*CacheMap, bool, error) - // Exec runs an operation given results from previous operations. - Exec(ctx context.Context, inputs []Result) (outputs []Result, err error) -} - -type ResultBasedCacheFunc func(context.Context, Result) (digest.Digest, error) - -type CacheMap struct { - // Digest is a base digest for operation that needs to be combined with - // inputs cache or selectors for dependencies. - Digest digest.Digest - Deps []struct { - // Optional digest that is merged with the cache key of the input - Selector digest.Digest - // Optional function that returns a digest for the input based on its - // return value - ComputeDigestFunc ResultBasedCacheFunc - } -} - -// ExportableCacheKey is a cache key connected with an exporter that can export -// a chain of cacherecords pointing to that key -type ExportableCacheKey struct { - *CacheKey - Exporter CacheExporter -} - -// CacheRecord is an identifier for loading in cache -type CacheRecord struct { - ID string - Size int - CreatedAt time.Time - Priority int - - cacheManager *cacheManager - key *CacheKey -} - -// CacheManager implements build cache backend -type CacheManager interface { - // ID is used to identify cache providers that are backed by same source - // to avoid duplicate calls to the same provider - ID() string - // Query searches for cache paths from one cache key to the output of a - // possible match. - Query(inp []CacheKeyWithSelector, inputIndex Index, dgst digest.Digest, outputIndex Index) ([]*CacheKey, error) - Records(ck *CacheKey) ([]*CacheRecord, error) - // Load pulls and returns the cached result - Load(ctx context.Context, rec *CacheRecord) (Result, error) - // Save saves a result based on a cache key - Save(key *CacheKey, s Result) (*ExportableCacheKey, error) -} diff --git a/vendor/github.com/moby/buildkit/source/containerimage/pull.go b/vendor/github.com/moby/buildkit/source/containerimage/pull.go deleted file mode 100644 index 19325d619e4e..000000000000 --- a/vendor/github.com/moby/buildkit/source/containerimage/pull.go +++ /dev/null @@ -1,233 +0,0 @@ -package containerimage - -import ( - "context" - "encoding/json" - "runtime" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/diff" - "github.com/containerd/containerd/images" - "github.com/containerd/containerd/platforms" - "github.com/docker/distribution/reference" - "github.com/moby/buildkit/cache" - gw "github.com/moby/buildkit/frontend/gateway/client" - "github.com/moby/buildkit/session" - "github.com/moby/buildkit/snapshot" - "github.com/moby/buildkit/source" - "github.com/moby/buildkit/util/flightcontrol" - "github.com/moby/buildkit/util/imageutil" - "github.com/moby/buildkit/util/pull" - "github.com/moby/buildkit/util/resolver" - "github.com/moby/buildkit/util/winlayers" - digest "github.com/opencontainers/go-digest" - "github.com/opencontainers/image-spec/identity" - specs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -// TODO: break apart containerd specifics like contentstore so the resolver -// code can be used with any implementation - -type SourceOpt struct { - SessionManager *session.Manager - Snapshotter snapshot.Snapshotter - ContentStore content.Store - Applier diff.Applier - CacheAccessor cache.Accessor - ImageStore images.Store // optional - ResolverOpt resolver.ResolveOptionsFunc -} - -type imageSource struct { - SourceOpt - g flightcontrol.Group -} - -func NewSource(opt SourceOpt) (source.Source, error) { - is := &imageSource{ - SourceOpt: opt, - } - - return is, nil -} - -func (is *imageSource) ID() string { - return source.DockerImageScheme -} - -func (is *imageSource) ResolveImageConfig(ctx context.Context, ref string, opt gw.ResolveImageConfigOpt) (digest.Digest, []byte, error) { - type t struct { - dgst digest.Digest - dt []byte - } - key := ref - if platform := opt.Platform; platform != nil { - key += platforms.Format(*platform) - } - - rm, err := source.ParseImageResolveMode(opt.ResolveMode) - if err != nil { - return "", nil, err - } - - res, err := is.g.Do(ctx, key, func(ctx context.Context) (interface{}, error) { - dgst, dt, err := imageutil.Config(ctx, ref, pull.NewResolver(ctx, is.ResolverOpt, is.SessionManager, is.ImageStore, rm, ref), is.ContentStore, opt.Platform) - if err != nil { - return nil, err - } - return &t{dgst: dgst, dt: dt}, nil - }) - if err != nil { - return "", nil, err - } - typed := res.(*t) - return typed.dgst, typed.dt, nil -} - -func (is *imageSource) Resolve(ctx context.Context, id source.Identifier) (source.SourceInstance, error) { - imageIdentifier, ok := id.(*source.ImageIdentifier) - if !ok { - return nil, errors.Errorf("invalid image identifier %v", id) - } - - platform := platforms.DefaultSpec() - if imageIdentifier.Platform != nil { - platform = *imageIdentifier.Platform - } - - pullerUtil := &pull.Puller{ - Snapshotter: is.Snapshotter, - ContentStore: is.ContentStore, - Applier: is.Applier, - Src: imageIdentifier.Reference, - Resolver: pull.NewResolver(ctx, is.ResolverOpt, is.SessionManager, is.ImageStore, imageIdentifier.ResolveMode, imageIdentifier.Reference.String()), - Platform: &platform, - } - p := &puller{ - CacheAccessor: is.CacheAccessor, - Puller: pullerUtil, - Platform: platform, - id: imageIdentifier, - } - return p, nil -} - -type puller struct { - CacheAccessor cache.Accessor - Platform specs.Platform - id *source.ImageIdentifier - *pull.Puller -} - -func mainManifestKey(ctx context.Context, desc specs.Descriptor, platform specs.Platform) (digest.Digest, error) { - dt, err := json.Marshal(struct { - Digest digest.Digest - OS string - Arch string - Variant string `json:",omitempty"` - }{ - Digest: desc.Digest, - OS: platform.OS, - Arch: platform.Architecture, - Variant: platform.Variant, - }) - if err != nil { - return "", err - } - return digest.FromBytes(dt), nil -} - -func (p *puller) CacheKey(ctx context.Context, index int) (string, bool, error) { - _, desc, err := p.Puller.Resolve(ctx) - if err != nil { - return "", false, err - } - if index == 0 || desc.Digest == "" { - k, err := mainManifestKey(ctx, desc, p.Platform) - if err != nil { - return "", false, err - } - return k.String(), false, nil - } - ref, err := reference.ParseNormalizedNamed(p.Src.String()) - if err != nil { - return "", false, err - } - ref, err = reference.WithDigest(ref, desc.Digest) - if err != nil { - return "", false, nil - } - _, dt, err := imageutil.Config(ctx, ref.String(), p.Resolver, p.ContentStore, &p.Platform) - if err != nil { - // this happens on schema1 images - k, err := mainManifestKey(ctx, desc, p.Platform) - if err != nil { - return "", false, err - } - return k.String(), true, nil - } - return cacheKeyFromConfig(dt).String(), true, nil -} - -func (p *puller) Snapshot(ctx context.Context) (cache.ImmutableRef, error) { - layerNeedsTypeWindows := false - if platform := p.Puller.Platform; platform != nil { - if platform.OS == "windows" && runtime.GOOS != "windows" { - ctx = winlayers.UseWindowsLayerMode(ctx) - layerNeedsTypeWindows = true - } - } - - pulled, err := p.Puller.Pull(ctx) - if err != nil { - return nil, err - } - if pulled.ChainID == "" { - return nil, nil - } - ref, err := p.CacheAccessor.GetFromSnapshotter(ctx, string(pulled.ChainID), cache.WithDescription("pulled from "+pulled.Ref)) - if err != nil { - return nil, err - } - - if layerNeedsTypeWindows && ref != nil { - if err := markRefLayerTypeWindows(ref); err != nil { - ref.Release(context.TODO()) - return nil, err - } - } - - if p.id.RecordType != "" && cache.GetRecordType(ref) == "" { - if err := cache.SetRecordType(ref, p.id.RecordType); err != nil { - ref.Release(context.TODO()) - return nil, err - } - } - - return ref, nil -} - -func markRefLayerTypeWindows(ref cache.ImmutableRef) error { - if parent := ref.Parent(); parent != nil { - defer parent.Release(context.TODO()) - if err := markRefLayerTypeWindows(parent); err != nil { - return err - } - } - return cache.SetLayerType(ref, "windows") -} - -// cacheKeyFromConfig returns a stable digest from image config. If image config -// is a known oci image we will use chainID of layers. -func cacheKeyFromConfig(dt []byte) digest.Digest { - var img specs.Image - err := json.Unmarshal(dt, &img) - if err != nil { - return digest.FromBytes(dt) - } - if img.RootFS.Type != "layers" { - return digest.FromBytes(dt) - } - return identity.ChainID(img.RootFS.DiffIDs) -} diff --git a/vendor/github.com/moby/buildkit/source/git/gitsource.go b/vendor/github.com/moby/buildkit/source/git/gitsource.go deleted file mode 100644 index 47d304b79235..000000000000 --- a/vendor/github.com/moby/buildkit/source/git/gitsource.go +++ /dev/null @@ -1,407 +0,0 @@ -package git - -import ( - "bytes" - "context" - "fmt" - "io" - "os" - "os/exec" - "path/filepath" - "regexp" - "strings" - - "github.com/docker/docker/pkg/locker" - "github.com/moby/buildkit/cache" - "github.com/moby/buildkit/cache/metadata" - "github.com/moby/buildkit/client" - "github.com/moby/buildkit/identity" - "github.com/moby/buildkit/snapshot" - "github.com/moby/buildkit/source" - "github.com/moby/buildkit/util/progress/logs" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - bolt "go.etcd.io/bbolt" -) - -var validHex = regexp.MustCompile(`^[a-f0-9]{40}$`) - -type Opt struct { - CacheAccessor cache.Accessor - MetadataStore *metadata.Store -} - -type gitSource struct { - md *metadata.Store - cache cache.Accessor - locker *locker.Locker -} - -func NewSource(opt Opt) (source.Source, error) { - gs := &gitSource{ - md: opt.MetadataStore, - cache: opt.CacheAccessor, - locker: locker.New(), - } - - if err := exec.Command("git", "version").Run(); err != nil { - return nil, errors.Wrap(err, "failed to find git binary") - } - - return gs, nil -} - -func (gs *gitSource) ID() string { - return source.GitScheme -} - -// needs to be called with repo lock -func (gs *gitSource) mountRemote(ctx context.Context, remote string) (target string, release func(), retErr error) { - remoteKey := "git-remote::" + remote - - sis, err := gs.md.Search(remoteKey) - if err != nil { - return "", nil, errors.Wrapf(err, "failed to search metadata for %s", remote) - } - - var remoteRef cache.MutableRef - for _, si := range sis { - remoteRef, err = gs.cache.GetMutable(ctx, si.ID()) - if err != nil { - if cache.IsLocked(err) { - // should never really happen as no other function should access this metadata, but lets be graceful - logrus.Warnf("mutable ref for %s %s was locked: %v", remote, si.ID(), err) - continue - } - return "", nil, errors.Wrapf(err, "failed to get mutable ref for %s", remote) - } - break - } - - initializeRepo := false - if remoteRef == nil { - remoteRef, err = gs.cache.New(ctx, nil, cache.CachePolicyRetain, cache.WithDescription(fmt.Sprintf("shared git repo for %s", remote))) - if err != nil { - return "", nil, errors.Wrapf(err, "failed to create new mutable for %s", remote) - } - initializeRepo = true - } - - releaseRemoteRef := func() { - remoteRef.Release(context.TODO()) - } - - defer func() { - if retErr != nil && remoteRef != nil { - releaseRemoteRef() - } - }() - - mount, err := remoteRef.Mount(ctx, false) - if err != nil { - return "", nil, err - } - - lm := snapshot.LocalMounter(mount) - dir, err := lm.Mount() - if err != nil { - return "", nil, err - } - - defer func() { - if retErr != nil { - lm.Unmount() - } - }() - - if initializeRepo { - if _, err := gitWithinDir(ctx, dir, "", "init", "--bare"); err != nil { - return "", nil, errors.Wrapf(err, "failed to init repo at %s", dir) - } - - if _, err := gitWithinDir(ctx, dir, "", "remote", "add", "origin", remote); err != nil { - return "", nil, errors.Wrapf(err, "failed add origin repo at %s", dir) - } - - // same new remote metadata - si, _ := gs.md.Get(remoteRef.ID()) - v, err := metadata.NewValue(remoteKey) - v.Index = remoteKey - if err != nil { - return "", nil, err - } - - if err := si.Update(func(b *bolt.Bucket) error { - return si.SetValue(b, "git-remote", v) - }); err != nil { - return "", nil, err - } - } - return dir, func() { - lm.Unmount() - releaseRemoteRef() - }, nil -} - -type gitSourceHandler struct { - *gitSource - src source.GitIdentifier - cacheKey string -} - -func (gs *gitSource) Resolve(ctx context.Context, id source.Identifier) (source.SourceInstance, error) { - gitIdentifier, ok := id.(*source.GitIdentifier) - if !ok { - return nil, errors.Errorf("invalid git identifier %v", id) - } - - return &gitSourceHandler{ - src: *gitIdentifier, - gitSource: gs, - }, nil -} - -func (gs *gitSourceHandler) CacheKey(ctx context.Context, index int) (string, bool, error) { - remote := gs.src.Remote - ref := gs.src.Ref - if ref == "" { - ref = "master" - } - gs.locker.Lock(remote) - defer gs.locker.Unlock(remote) - - if isCommitSHA(ref) { - gs.cacheKey = ref - return ref, true, nil - } - - gitDir, unmountGitDir, err := gs.mountRemote(ctx, remote) - if err != nil { - return "", false, err - } - defer unmountGitDir() - - // TODO: should we assume that remote tag is immutable? add a timer? - - buf, err := gitWithinDir(ctx, gitDir, "", "ls-remote", "origin", ref) - if err != nil { - return "", false, errors.Wrapf(err, "failed to fetch remote %s", remote) - } - out := buf.String() - idx := strings.Index(out, "\t") - if idx == -1 { - return "", false, errors.Errorf("failed to find commit SHA from output: %s", string(out)) - } - - sha := string(out[:idx]) - if !isCommitSHA(sha) { - return "", false, errors.Errorf("invalid commit sha %q", sha) - } - gs.cacheKey = sha - return sha, true, nil -} - -func (gs *gitSourceHandler) Snapshot(ctx context.Context) (out cache.ImmutableRef, retErr error) { - ref := gs.src.Ref - if ref == "" { - ref = "master" - } - - cacheKey := gs.cacheKey - if cacheKey == "" { - var err error - cacheKey, _, err = gs.CacheKey(ctx, 0) - if err != nil { - return nil, err - } - } - - snapshotKey := "git-snapshot::" + cacheKey + ":" + gs.src.Subdir - gs.locker.Lock(snapshotKey) - defer gs.locker.Unlock(snapshotKey) - - sis, err := gs.md.Search(snapshotKey) - if err != nil { - return nil, errors.Wrapf(err, "failed to search metadata for %s", snapshotKey) - } - if len(sis) > 0 { - return gs.cache.Get(ctx, sis[0].ID()) - } - - gs.locker.Lock(gs.src.Remote) - defer gs.locker.Unlock(gs.src.Remote) - gitDir, unmountGitDir, err := gs.mountRemote(ctx, gs.src.Remote) - if err != nil { - return nil, err - } - defer unmountGitDir() - - doFetch := true - if isCommitSHA(ref) { - // skip fetch if commit already exists - if _, err := gitWithinDir(ctx, gitDir, "", "cat-file", "-e", ref+"^{commit}"); err == nil { - doFetch = false - } - } - - if doFetch { - args := []string{"fetch"} - if !isCommitSHA(ref) { // TODO: find a branch from ls-remote? - args = append(args, "--depth=1", "--no-tags") - } else { - if _, err := os.Lstat(filepath.Join(gitDir, "shallow")); err == nil { - args = append(args, "--unshallow") - } - } - args = append(args, "origin") - if !isCommitSHA(ref) { - args = append(args, ref+":tags/"+ref) - // local refs are needed so they would be advertised on next fetches - // TODO: is there a better way to do this? - } - if _, err := gitWithinDir(ctx, gitDir, "", args...); err != nil { - return nil, errors.Wrapf(err, "failed to fetch remote %s", gs.src.Remote) - } - } - - checkoutRef, err := gs.cache.New(ctx, nil, cache.WithRecordType(client.UsageRecordTypeGitCheckout), cache.WithDescription(fmt.Sprintf("git snapshot for %s#%s", gs.src.Remote, ref))) - if err != nil { - return nil, errors.Wrapf(err, "failed to create new mutable for %s", gs.src.Remote) - } - - defer func() { - if retErr != nil && checkoutRef != nil { - checkoutRef.Release(context.TODO()) - } - }() - - mount, err := checkoutRef.Mount(ctx, false) - if err != nil { - return nil, err - } - lm := snapshot.LocalMounter(mount) - checkoutDir, err := lm.Mount() - if err != nil { - return nil, err - } - defer func() { - if retErr != nil && lm != nil { - lm.Unmount() - } - }() - - if gs.src.KeepGitDir { - _, err = gitWithinDir(ctx, checkoutDir, "", "init") - if err != nil { - return nil, err - } - _, err = gitWithinDir(ctx, checkoutDir, "", "remote", "add", "origin", gitDir) - if err != nil { - return nil, err - } - pullref := ref - if isCommitSHA(ref) { - pullref = "refs/buildkit/" + identity.NewID() - _, err = gitWithinDir(ctx, gitDir, "", "update-ref", pullref, ref) - if err != nil { - return nil, err - } - } - _, err = gitWithinDir(ctx, checkoutDir, "", "fetch", "--depth=1", "origin", pullref) - if err != nil { - return nil, err - } - _, err = gitWithinDir(ctx, checkoutDir, checkoutDir, "checkout", "FETCH_HEAD") - if err != nil { - return nil, errors.Wrapf(err, "failed to checkout remote %s", gs.src.Remote) - } - gitDir = checkoutDir - } else { - _, err = gitWithinDir(ctx, gitDir, checkoutDir, "checkout", ref, "--", ".") - if err != nil { - return nil, errors.Wrapf(err, "failed to checkout remote %s", gs.src.Remote) - } - } - - _, err = gitWithinDir(ctx, gitDir, checkoutDir, "submodule", "update", "--init", "--recursive", "--depth=1") - if err != nil { - return nil, errors.Wrapf(err, "failed to update submodules for %s", gs.src.Remote) - } - - lm.Unmount() - lm = nil - - snap, err := checkoutRef.Commit(ctx) - if err != nil { - return nil, err - } - checkoutRef = nil - - defer func() { - if retErr != nil { - snap.Release(context.TODO()) - } - }() - - si, _ := gs.md.Get(snap.ID()) - v, err := metadata.NewValue(snapshotKey) - v.Index = snapshotKey - if err != nil { - return nil, err - } - if err := si.Update(func(b *bolt.Bucket) error { - return si.SetValue(b, "git-snapshot", v) - }); err != nil { - return nil, err - } - - return snap, nil -} - -func isCommitSHA(str string) bool { - return validHex.MatchString(str) -} - -func gitWithinDir(ctx context.Context, gitDir, workDir string, args ...string) (*bytes.Buffer, error) { - a := []string{"--git-dir", gitDir} - if workDir != "" { - a = append(a, "--work-tree", workDir) - } - return git(ctx, workDir, append(a, args...)...) -} - -func git(ctx context.Context, dir string, args ...string) (*bytes.Buffer, error) { - for { - stdout, stderr := logs.NewLogStreams(ctx, false) - defer stdout.Close() - defer stderr.Close() - cmd := exec.Command("git", args...) - cmd.Dir = dir // some commands like submodule require this - buf := bytes.NewBuffer(nil) - errbuf := bytes.NewBuffer(nil) - cmd.Stdout = io.MultiWriter(stdout, buf) - cmd.Stderr = io.MultiWriter(stderr, errbuf) - // remote git commands spawn helper processes that inherit FDs and don't - // handle parent death signal so exec.CommandContext can't be used - err := runProcessGroup(ctx, cmd) - if err != nil { - if strings.Contains(errbuf.String(), "--depth") || strings.Contains(errbuf.String(), "shallow") { - if newArgs := argsNoDepth(args); len(args) > len(newArgs) { - args = newArgs - continue - } - } - } - return buf, err - } -} - -func argsNoDepth(args []string) []string { - out := make([]string, 0, len(args)) - for _, a := range args { - if a != "--depth=1" { - out = append(out, a) - } - } - return out -} diff --git a/vendor/github.com/moby/buildkit/source/git/gitsource_test.go b/vendor/github.com/moby/buildkit/source/git/gitsource_test.go deleted file mode 100644 index 636721f838fc..000000000000 --- a/vendor/github.com/moby/buildkit/source/git/gitsource_test.go +++ /dev/null @@ -1,368 +0,0 @@ -package git - -import ( - "context" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "strings" - "testing" - - "github.com/containerd/containerd/namespaces" - "github.com/containerd/containerd/snapshots/native" - "github.com/moby/buildkit/cache" - "github.com/moby/buildkit/cache/metadata" - "github.com/moby/buildkit/snapshot" - "github.com/moby/buildkit/source" - "github.com/pkg/errors" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestRepeatedFetch(t *testing.T) { - testRepeatedFetch(t, false) -} -func TestRepeatedFetchKeepGitDir(t *testing.T) { - testRepeatedFetch(t, true) -} - -func testRepeatedFetch(t *testing.T, keepGitDir bool) { - t.Parallel() - ctx := context.TODO() - - tmpdir, err := ioutil.TempDir("", "buildkit-state") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) - - gs := setupGitSource(t, tmpdir) - - repodir, err := ioutil.TempDir("", "buildkit-gitsource") - require.NoError(t, err) - defer os.RemoveAll(repodir) - - repodir, err = setupGitRepo(repodir) - require.NoError(t, err) - - id := &source.GitIdentifier{Remote: repodir, KeepGitDir: keepGitDir} - - g, err := gs.Resolve(ctx, id) - require.NoError(t, err) - - key1, done, err := g.CacheKey(ctx, 0) - require.NoError(t, err) - require.True(t, done) - - require.Equal(t, 40, len(key1)) - - ref1, err := g.Snapshot(ctx) - require.NoError(t, err) - defer ref1.Release(context.TODO()) - - mount, err := ref1.Mount(ctx, false) - require.NoError(t, err) - - lm := snapshot.LocalMounter(mount) - dir, err := lm.Mount() - require.NoError(t, err) - defer lm.Unmount() - - dt, err := ioutil.ReadFile(filepath.Join(dir, "def")) - require.NoError(t, err) - - require.Equal(t, "bar\n", string(dt)) - - _, err = os.Lstat(filepath.Join(dir, "ghi")) - require.Error(t, err) - require.True(t, os.IsNotExist(err)) - - _, err = os.Lstat(filepath.Join(dir, "sub/subfile")) - require.Error(t, err) - require.True(t, os.IsNotExist(err)) - - // second fetch returns same dir - id = &source.GitIdentifier{Remote: repodir, Ref: "master", KeepGitDir: keepGitDir} - - g, err = gs.Resolve(ctx, id) - require.NoError(t, err) - - key2, _, err := g.CacheKey(ctx, 0) - require.NoError(t, err) - - require.Equal(t, key1, key2) - - ref2, err := g.Snapshot(ctx) - require.NoError(t, err) - defer ref2.Release(context.TODO()) - - require.Equal(t, ref1.ID(), ref2.ID()) - - id = &source.GitIdentifier{Remote: repodir, Ref: "feature", KeepGitDir: keepGitDir} - - g, err = gs.Resolve(ctx, id) - require.NoError(t, err) - - key3, _, err := g.CacheKey(ctx, 0) - require.NoError(t, err) - require.NotEqual(t, key1, key3) - - ref3, err := g.Snapshot(ctx) - require.NoError(t, err) - defer ref3.Release(context.TODO()) - - mount, err = ref3.Mount(ctx, false) - require.NoError(t, err) - - lm = snapshot.LocalMounter(mount) - dir, err = lm.Mount() - require.NoError(t, err) - defer lm.Unmount() - - dt, err = ioutil.ReadFile(filepath.Join(dir, "ghi")) - require.NoError(t, err) - - require.Equal(t, "baz\n", string(dt)) - - dt, err = ioutil.ReadFile(filepath.Join(dir, "sub/subfile")) - require.NoError(t, err) - - require.Equal(t, "subcontents\n", string(dt)) -} - -func TestFetchBySHA(t *testing.T) { - testFetchBySHA(t, false) -} -func TestFetchBySHAKeepGitDir(t *testing.T) { - testFetchBySHA(t, true) -} - -func testFetchBySHA(t *testing.T, keepGitDir bool) { - t.Parallel() - ctx := namespaces.WithNamespace(context.Background(), "buildkit-test") - - tmpdir, err := ioutil.TempDir("", "buildkit-state") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) - - gs := setupGitSource(t, tmpdir) - - repodir, err := ioutil.TempDir("", "buildkit-gitsource") - require.NoError(t, err) - defer os.RemoveAll(repodir) - - repodir, err = setupGitRepo(repodir) - require.NoError(t, err) - - cmd := exec.Command("git", "rev-parse", "feature") - cmd.Dir = repodir - - out, err := cmd.Output() - require.NoError(t, err) - - sha := strings.TrimSpace(string(out)) - require.Equal(t, 40, len(sha)) - - id := &source.GitIdentifier{Remote: repodir, Ref: sha, KeepGitDir: keepGitDir} - - g, err := gs.Resolve(ctx, id) - require.NoError(t, err) - - key1, done, err := g.CacheKey(ctx, 0) - require.NoError(t, err) - require.True(t, done) - - require.Equal(t, 40, len(key1)) - - ref1, err := g.Snapshot(ctx) - require.NoError(t, err) - defer ref1.Release(context.TODO()) - - mount, err := ref1.Mount(ctx, false) - require.NoError(t, err) - - lm := snapshot.LocalMounter(mount) - dir, err := lm.Mount() - require.NoError(t, err) - defer lm.Unmount() - - dt, err := ioutil.ReadFile(filepath.Join(dir, "ghi")) - require.NoError(t, err) - - require.Equal(t, "baz\n", string(dt)) - - dt, err = ioutil.ReadFile(filepath.Join(dir, "sub/subfile")) - require.NoError(t, err) - - require.Equal(t, "subcontents\n", string(dt)) -} - -func TestMultipleRepos(t *testing.T) { - testMultipleRepos(t, false) -} - -func TestMultipleReposKeepGitDir(t *testing.T) { - testMultipleRepos(t, true) -} - -func testMultipleRepos(t *testing.T, keepGitDir bool) { - t.Parallel() - ctx := namespaces.WithNamespace(context.Background(), "buildkit-test") - - tmpdir, err := ioutil.TempDir("", "buildkit-state") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) - - gs := setupGitSource(t, tmpdir) - - repodir, err := ioutil.TempDir("", "buildkit-gitsource") - require.NoError(t, err) - defer os.RemoveAll(repodir) - - repodir, err = setupGitRepo(repodir) - require.NoError(t, err) - - repodir2, err := ioutil.TempDir("", "buildkit-gitsource") - require.NoError(t, err) - defer os.RemoveAll(repodir2) - - err = runShell(repodir2, - "git init", - "git config --local user.email test", - "git config --local user.name test", - "echo xyz > xyz", - "git add xyz", - "git commit -m initial", - ) - require.NoError(t, err) - - id := &source.GitIdentifier{Remote: repodir, KeepGitDir: keepGitDir} - id2 := &source.GitIdentifier{Remote: repodir2, KeepGitDir: keepGitDir} - - g, err := gs.Resolve(ctx, id) - require.NoError(t, err) - - g2, err := gs.Resolve(ctx, id2) - require.NoError(t, err) - - key1, _, err := g.CacheKey(ctx, 0) - require.NoError(t, err) - require.Equal(t, 40, len(key1)) - - key2, _, err := g2.CacheKey(ctx, 0) - require.NoError(t, err) - require.Equal(t, 40, len(key2)) - - require.NotEqual(t, key1, key2) - - ref1, err := g.Snapshot(ctx) - require.NoError(t, err) - defer ref1.Release(context.TODO()) - - mount, err := ref1.Mount(ctx, false) - require.NoError(t, err) - - lm := snapshot.LocalMounter(mount) - dir, err := lm.Mount() - require.NoError(t, err) - defer lm.Unmount() - - ref2, err := g2.Snapshot(ctx) - require.NoError(t, err) - defer ref2.Release(context.TODO()) - - mount, err = ref2.Mount(ctx, false) - require.NoError(t, err) - - lm = snapshot.LocalMounter(mount) - dir2, err := lm.Mount() - require.NoError(t, err) - defer lm.Unmount() - - dt, err := ioutil.ReadFile(filepath.Join(dir, "def")) - require.NoError(t, err) - - require.Equal(t, "bar\n", string(dt)) - - dt, err = ioutil.ReadFile(filepath.Join(dir2, "xyz")) - require.NoError(t, err) - - require.Equal(t, "xyz\n", string(dt)) -} - -func setupGitSource(t *testing.T, tmpdir string) source.Source { - snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) - assert.NoError(t, err) - - md, err := metadata.NewStore(filepath.Join(tmpdir, "metadata.db")) - assert.NoError(t, err) - - cm, err := cache.NewManager(cache.ManagerOpt{ - Snapshotter: snapshot.FromContainerdSnapshotter(snapshotter), - MetadataStore: md, - }) - assert.NoError(t, err) - - gs, err := NewSource(Opt{ - CacheAccessor: cm, - MetadataStore: md, - }) - require.NoError(t, err) - - return gs -} - -func setupGitRepo(dir string) (string, error) { - subPath := filepath.Join(dir, "sub") - mainPath := filepath.Join(dir, "main") - - if err := os.MkdirAll(subPath, 0700); err != nil { - return "", err - } - - if err := os.MkdirAll(mainPath, 0700); err != nil { - return "", err - } - - if err := runShell(filepath.Join(dir, "sub"), - "git init", - "git config --local user.email test", - "git config --local user.name test", - "echo subcontents > subfile", - "git add subfile", - "git commit -m initial", - ); err != nil { - return "", err - } - if err := runShell(filepath.Join(dir, "main"), - "git init", - "git config --local user.email test", - "git config --local user.name test", - "echo foo > abc", - "git add abc", - "git commit -m initial", - "echo bar > def", - "git add def", - "git commit -m second", - "git checkout -B feature", - "echo baz > ghi", - "git add ghi", - "git commit -m feature", - "git submodule add "+subPath+" sub", - "git add -A", - "git commit -m withsub", - ); err != nil { - return "", err - } - return mainPath, nil -} - -func runShell(dir string, cmds ...string) error { - for _, args := range cmds { - cmd := exec.Command("sh", "-c", args) - cmd.Dir = dir - if err := cmd.Run(); err != nil { - return errors.Wrapf(err, "error running %v", args) - } - } - return nil -} diff --git a/vendor/github.com/moby/buildkit/source/git/gitsource_unix.go b/vendor/github.com/moby/buildkit/source/git/gitsource_unix.go deleted file mode 100644 index 4d0e9d89d20a..000000000000 --- a/vendor/github.com/moby/buildkit/source/git/gitsource_unix.go +++ /dev/null @@ -1,27 +0,0 @@ -// +build !windows - -package git - -import ( - "context" - "os/exec" - "syscall" -) - -func runProcessGroup(ctx context.Context, cmd *exec.Cmd) error { - cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true} - if err := cmd.Start(); err != nil { - return err - } - waitDone := make(chan struct{}) - go func() { - select { - case <-ctx.Done(): - syscall.Kill(-cmd.Process.Pid, syscall.SIGKILL) - case <-waitDone: - } - }() - err := cmd.Wait() - close(waitDone) - return err -} diff --git a/vendor/github.com/moby/buildkit/source/git/gitsource_windows.go b/vendor/github.com/moby/buildkit/source/git/gitsource_windows.go deleted file mode 100644 index 3435c8f9eef4..000000000000 --- a/vendor/github.com/moby/buildkit/source/git/gitsource_windows.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build windows - -package git - -import ( - "context" - "os/exec" -) - -func runProcessGroup(ctx context.Context, cmd *exec.Cmd) error { - if err := cmd.Start(); err != nil { - return err - } - waitDone := make(chan struct{}) - go func() { - select { - case <-ctx.Done(): - cmd.Process.Kill() - case <-waitDone: - } - }() - return cmd.Wait() -} diff --git a/vendor/github.com/moby/buildkit/source/gitidentifier.go b/vendor/github.com/moby/buildkit/source/gitidentifier.go deleted file mode 100644 index 9f338343bf9a..000000000000 --- a/vendor/github.com/moby/buildkit/source/gitidentifier.go +++ /dev/null @@ -1,70 +0,0 @@ -package source - -import ( - "net/url" - "strings" - - "github.com/pkg/errors" -) - -type GitIdentifier struct { - Remote string - Ref string - Subdir string - KeepGitDir bool -} - -func NewGitIdentifier(remoteURL string) (*GitIdentifier, error) { - repo := GitIdentifier{} - - if !isGitTransport(remoteURL) { - remoteURL = "https://" + remoteURL - } - - var fragment string - if strings.HasPrefix(remoteURL, "git@") { - // git@.. is not an URL, so cannot be parsed as URL - parts := strings.SplitN(remoteURL, "#", 2) - - repo.Remote = parts[0] - if len(parts) == 2 { - fragment = parts[1] - } - repo.Ref, repo.Subdir = getRefAndSubdir(fragment) - } else { - u, err := url.Parse(remoteURL) - if err != nil { - return nil, err - } - - repo.Ref, repo.Subdir = getRefAndSubdir(u.Fragment) - u.Fragment = "" - repo.Remote = u.String() - } - if repo.Subdir != "" { - return nil, errors.Errorf("subdir not supported yet") - } - return &repo, nil -} - -func (i *GitIdentifier) ID() string { - return "git" -} - -// isGitTransport returns true if the provided str is a git transport by inspecting -// the prefix of the string for known protocols used in git. -func isGitTransport(str string) bool { - return strings.HasPrefix(str, "http://") || strings.HasPrefix(str, "https://") || strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "git@") -} - -func getRefAndSubdir(fragment string) (ref string, subdir string) { - refAndDir := strings.SplitN(fragment, ":", 2) - ref = "master" - if len(refAndDir[0]) != 0 { - ref = refAndDir[0] - } - if len(refAndDir) > 1 && len(refAndDir[1]) != 0 { - subdir = refAndDir[1] - } - return -} diff --git a/vendor/github.com/moby/buildkit/source/http/httpsource.go b/vendor/github.com/moby/buildkit/source/http/httpsource.go deleted file mode 100644 index 39939128cd9e..000000000000 --- a/vendor/github.com/moby/buildkit/source/http/httpsource.go +++ /dev/null @@ -1,429 +0,0 @@ -package http - -import ( - "context" - "crypto/sha256" - "encoding/json" - "fmt" - "io" - "mime" - "net/http" - "net/url" - "os" - "path" - "path/filepath" - "strings" - "time" - - "github.com/docker/docker/pkg/locker" - "github.com/moby/buildkit/cache" - "github.com/moby/buildkit/cache/metadata" - "github.com/moby/buildkit/snapshot" - "github.com/moby/buildkit/source" - "github.com/moby/buildkit/util/tracing" - digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - bolt "go.etcd.io/bbolt" -) - -type Opt struct { - CacheAccessor cache.Accessor - MetadataStore *metadata.Store - Transport http.RoundTripper -} - -type httpSource struct { - md *metadata.Store - cache cache.Accessor - locker *locker.Locker - client *http.Client -} - -func NewSource(opt Opt) (source.Source, error) { - transport := opt.Transport - if transport == nil { - transport = tracing.DefaultTransport - } - hs := &httpSource{ - md: opt.MetadataStore, - cache: opt.CacheAccessor, - locker: locker.New(), - client: &http.Client{ - Transport: transport, - }, - } - return hs, nil -} - -func (hs *httpSource) ID() string { - return source.HttpsScheme -} - -type httpSourceHandler struct { - *httpSource - src source.HttpIdentifier - refID string - cacheKey digest.Digest -} - -func (hs *httpSource) Resolve(ctx context.Context, id source.Identifier) (source.SourceInstance, error) { - httpIdentifier, ok := id.(*source.HttpIdentifier) - if !ok { - return nil, errors.Errorf("invalid http identifier %v", id) - } - - return &httpSourceHandler{ - src: *httpIdentifier, - httpSource: hs, - }, nil -} - -// urlHash is internal hash the etag is stored by that doesn't leak outside -// this package. -func (hs *httpSourceHandler) urlHash() (digest.Digest, error) { - dt, err := json.Marshal(struct { - Filename string - Perm, UID, GID int - }{ - Filename: getFileName(hs.src.URL, hs.src.Filename, nil), - Perm: hs.src.Perm, - UID: hs.src.UID, - GID: hs.src.GID, - }) - if err != nil { - return "", err - } - return digest.FromBytes(dt), nil -} - -func (hs *httpSourceHandler) formatCacheKey(filename string, dgst digest.Digest, lastModTime string) digest.Digest { - dt, err := json.Marshal(struct { - Filename string - Perm, UID, GID int - Checksum digest.Digest - LastModTime string `json:",omitempty"` - }{ - Filename: filename, - Perm: hs.src.Perm, - UID: hs.src.UID, - GID: hs.src.GID, - Checksum: dgst, - LastModTime: lastModTime, - }) - if err != nil { - return dgst - } - return digest.FromBytes(dt) -} - -func (hs *httpSourceHandler) CacheKey(ctx context.Context, index int) (string, bool, error) { - if hs.src.Checksum != "" { - hs.cacheKey = hs.src.Checksum - return hs.formatCacheKey(getFileName(hs.src.URL, hs.src.Filename, nil), hs.src.Checksum, "").String(), true, nil - } - - uh, err := hs.urlHash() - if err != nil { - return "", false, nil - } - - // look up metadata(previously stored headers) for that URL - sis, err := hs.md.Search(uh.String()) - if err != nil { - return "", false, errors.Wrapf(err, "failed to search metadata for %s", uh) - } - - req, err := http.NewRequest("GET", hs.src.URL, nil) - if err != nil { - return "", false, err - } - req = req.WithContext(ctx) - m := map[string]*metadata.StorageItem{} - - if len(sis) > 0 { - for _, si := range sis { - // if metaDigest := getMetaDigest(si); metaDigest == hs.formatCacheKey("") { - if etag := getETag(si); etag != "" { - if dgst := getChecksum(si); dgst != "" { - m[etag] = si - req.Header.Add("If-None-Match", etag) - } - } - // } - } - } - - resp, err := hs.client.Do(req) - if err != nil { - return "", false, err - } - if resp.StatusCode < 200 || resp.StatusCode >= 400 { - return "", false, errors.Errorf("invalid response status %d", resp.StatusCode) - } - if resp.StatusCode == http.StatusNotModified { - respETag := resp.Header.Get("ETag") - si, ok := m[respETag] - if !ok { - return "", false, errors.Errorf("invalid not-modified ETag: %v", respETag) - } - hs.refID = si.ID() - dgst := getChecksum(si) - if dgst == "" { - return "", false, errors.Errorf("invalid metadata change") - } - modTime := getModTime(si) - resp.Body.Close() - return hs.formatCacheKey(getFileName(hs.src.URL, hs.src.Filename, resp), dgst, modTime).String(), true, nil - } - - ref, dgst, err := hs.save(ctx, resp) - if err != nil { - return "", false, err - } - ref.Release(context.TODO()) - - hs.cacheKey = dgst - - return hs.formatCacheKey(getFileName(hs.src.URL, hs.src.Filename, resp), dgst, resp.Header.Get("Last-Modified")).String(), true, nil -} - -func (hs *httpSourceHandler) save(ctx context.Context, resp *http.Response) (ref cache.ImmutableRef, dgst digest.Digest, retErr error) { - newRef, err := hs.cache.New(ctx, nil, cache.CachePolicyRetain, cache.WithDescription(fmt.Sprintf("http url %s", hs.src.URL))) - if err != nil { - return nil, "", err - } - - releaseRef := func() { - newRef.Release(context.TODO()) - } - - defer func() { - if retErr != nil && newRef != nil { - releaseRef() - } - }() - - mount, err := newRef.Mount(ctx, false) - if err != nil { - return nil, "", err - } - - lm := snapshot.LocalMounter(mount) - dir, err := lm.Mount() - if err != nil { - return nil, "", err - } - - defer func() { - if retErr != nil && lm != nil { - lm.Unmount() - } - }() - perm := 0600 - if hs.src.Perm != 0 { - perm = hs.src.Perm - } - fp := filepath.Join(dir, getFileName(hs.src.URL, hs.src.Filename, resp)) - - f, err := os.OpenFile(fp, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.FileMode(perm)) - if err != nil { - return nil, "", err - } - defer func() { - if f != nil { - f.Close() - } - }() - - h := sha256.New() - - if _, err := io.Copy(io.MultiWriter(f, h), resp.Body); err != nil { - return nil, "", err - } - - if err := f.Close(); err != nil { - return nil, "", err - } - f = nil - - if hs.src.UID != 0 || hs.src.GID != 0 { - if err := os.Chown(fp, hs.src.UID, hs.src.GID); err != nil { - return nil, "", err - } - } - - mTime := time.Unix(0, 0) - lastMod := resp.Header.Get("Last-Modified") - if lastMod != "" { - if parsedMTime, err := http.ParseTime(lastMod); err == nil { - mTime = parsedMTime - } - } - - if err := os.Chtimes(fp, mTime, mTime); err != nil { - return nil, "", err - } - - lm.Unmount() - lm = nil - - ref, err = newRef.Commit(ctx) - if err != nil { - return nil, "", err - } - newRef = nil - - hs.refID = ref.ID() - dgst = digest.NewDigest(digest.SHA256, h) - - if respETag := resp.Header.Get("ETag"); respETag != "" { - setETag(ref.Metadata(), respETag) - uh, err := hs.urlHash() - if err != nil { - return nil, "", err - } - setChecksum(ref.Metadata(), uh.String(), dgst) - if err := ref.Metadata().Commit(); err != nil { - return nil, "", err - } - } - - if modTime := resp.Header.Get("Last-Modified"); modTime != "" { - setModTime(ref.Metadata(), modTime) - } - - return ref, dgst, nil -} - -func (hs *httpSourceHandler) Snapshot(ctx context.Context) (cache.ImmutableRef, error) { - if hs.refID != "" { - ref, err := hs.cache.Get(ctx, hs.refID) - if err == nil { - return ref, nil - } - } - - req, err := http.NewRequest("GET", hs.src.URL, nil) - if err != nil { - return nil, err - } - req = req.WithContext(ctx) - - resp, err := hs.client.Do(req) - if err != nil { - return nil, err - } - - ref, dgst, err := hs.save(ctx, resp) - if err != nil { - return nil, err - } - if dgst != hs.cacheKey { - ref.Release(context.TODO()) - return nil, errors.Errorf("digest mismatch %s: %s", dgst, hs.cacheKey) - } - - return ref, nil -} - -const keyETag = "etag" -const keyChecksum = "http.checksum" -const keyModTime = "http.modtime" - -func setETag(si *metadata.StorageItem, s string) error { - v, err := metadata.NewValue(s) - if err != nil { - return errors.Wrap(err, "failed to create etag value") - } - si.Queue(func(b *bolt.Bucket) error { - return si.SetValue(b, keyETag, v) - }) - return nil -} - -func getETag(si *metadata.StorageItem) string { - v := si.Get(keyETag) - if v == nil { - return "" - } - var etag string - if err := v.Unmarshal(&etag); err != nil { - return "" - } - return etag -} - -func setModTime(si *metadata.StorageItem, s string) error { - v, err := metadata.NewValue(s) - if err != nil { - return errors.Wrap(err, "failed to create modtime value") - } - si.Queue(func(b *bolt.Bucket) error { - return si.SetValue(b, keyModTime, v) - }) - return nil -} - -func getModTime(si *metadata.StorageItem) string { - v := si.Get(keyModTime) - if v == nil { - return "" - } - var modTime string - if err := v.Unmarshal(&modTime); err != nil { - return "" - } - return modTime -} - -func setChecksum(si *metadata.StorageItem, url string, d digest.Digest) error { - v, err := metadata.NewValue(d) - if err != nil { - return errors.Wrap(err, "failed to create checksum value") - } - v.Index = url - si.Queue(func(b *bolt.Bucket) error { - return si.SetValue(b, keyChecksum, v) - }) - return nil -} - -func getChecksum(si *metadata.StorageItem) digest.Digest { - v := si.Get(keyChecksum) - if v == nil { - return "" - } - var dgstStr string - if err := v.Unmarshal(&dgstStr); err != nil { - return "" - } - dgst, err := digest.Parse(dgstStr) - if err != nil { - return "" - } - return dgst -} - -func getFileName(urlStr, manualFilename string, resp *http.Response) string { - if manualFilename != "" { - return manualFilename - } - if resp != nil { - if contentDisposition := resp.Header.Get("Content-Disposition"); contentDisposition != "" { - if _, params, err := mime.ParseMediaType(contentDisposition); err == nil { - if params["filename"] != "" && !strings.HasSuffix(params["filename"], "/") { - if filename := filepath.Base(filepath.FromSlash(params["filename"])); filename != "" { - return filename - } - } - } - } - } - u, err := url.Parse(urlStr) - if err == nil { - if base := path.Base(u.Path); base != "." && base != "/" { - return base - } - } - return "download" -} diff --git a/vendor/github.com/moby/buildkit/source/http/httpsource_test.go b/vendor/github.com/moby/buildkit/source/http/httpsource_test.go deleted file mode 100644 index b9f14936eee4..000000000000 --- a/vendor/github.com/moby/buildkit/source/http/httpsource_test.go +++ /dev/null @@ -1,329 +0,0 @@ -package http - -import ( - "context" - "io/ioutil" - "os" - "path/filepath" - "testing" - - "github.com/containerd/containerd/snapshots/native" - "github.com/moby/buildkit/cache" - "github.com/moby/buildkit/cache/metadata" - "github.com/moby/buildkit/identity" - "github.com/moby/buildkit/snapshot" - "github.com/moby/buildkit/source" - "github.com/moby/buildkit/util/testutil/httpserver" - digest "github.com/opencontainers/go-digest" - "github.com/stretchr/testify/require" -) - -func TestHTTPSource(t *testing.T) { - t.Parallel() - ctx := context.TODO() - - tmpdir, err := ioutil.TempDir("", "buildkit-state") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) - - hs, err := newHTTPSource(tmpdir) - require.NoError(t, err) - - resp := httpserver.Response{ - Etag: identity.NewID(), - Content: []byte("content1"), - } - server := httpserver.NewTestServer(map[string]httpserver.Response{ - "/foo": resp, - }) - defer server.Close() - - id := &source.HttpIdentifier{URL: server.URL + "/foo"} - - h, err := hs.Resolve(ctx, id) - require.NoError(t, err) - - k, _, err := h.CacheKey(ctx, 0) - require.NoError(t, err) - - expectedContent1 := "sha256:0b1a154faa3003c1fbe7fda9c8a42d55fde2df2a2c405c32038f8ac7ed6b044a" - - require.Equal(t, expectedContent1, k) - require.Equal(t, server.Stats("/foo").AllRequests, 1) - require.Equal(t, server.Stats("/foo").CachedRequests, 0) - - ref, err := h.Snapshot(ctx) - require.NoError(t, err) - defer func() { - if ref != nil { - ref.Release(context.TODO()) - ref = nil - } - }() - - dt, err := readFile(ctx, ref, "foo") - require.NoError(t, err) - require.Equal(t, dt, []byte("content1")) - - ref.Release(context.TODO()) - ref = nil - - // repeat, should use the etag - h, err = hs.Resolve(ctx, id) - require.NoError(t, err) - - k, _, err = h.CacheKey(ctx, 0) - require.NoError(t, err) - - require.Equal(t, expectedContent1, k) - require.Equal(t, server.Stats("/foo").AllRequests, 2) - require.Equal(t, server.Stats("/foo").CachedRequests, 1) - - ref, err = h.Snapshot(ctx) - require.NoError(t, err) - defer func() { - if ref != nil { - ref.Release(context.TODO()) - ref = nil - } - }() - - dt, err = readFile(ctx, ref, "foo") - require.NoError(t, err) - require.Equal(t, dt, []byte("content1")) - - ref.Release(context.TODO()) - ref = nil - - resp2 := httpserver.Response{ - Etag: identity.NewID(), - Content: []byte("content2"), - } - - expectedContent2 := "sha256:888722f299c02bfae173a747a0345bb2291cf6a076c36d8eb6fab442a8adddfa" - - // update etag, downloads again - server.SetRoute("/foo", resp2) - - h, err = hs.Resolve(ctx, id) - require.NoError(t, err) - - k, _, err = h.CacheKey(ctx, 0) - require.NoError(t, err) - - require.Equal(t, expectedContent2, k) - require.Equal(t, server.Stats("/foo").AllRequests, 3) - require.Equal(t, server.Stats("/foo").CachedRequests, 1) - - ref, err = h.Snapshot(ctx) - require.NoError(t, err) - defer func() { - if ref != nil { - ref.Release(context.TODO()) - ref = nil - } - }() - - dt, err = readFile(ctx, ref, "foo") - require.NoError(t, err) - require.Equal(t, dt, []byte("content2")) - - ref.Release(context.TODO()) - ref = nil -} - -func TestHTTPDefaultName(t *testing.T) { - t.Parallel() - ctx := context.TODO() - - tmpdir, err := ioutil.TempDir("", "buildkit-state") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) - - hs, err := newHTTPSource(tmpdir) - require.NoError(t, err) - - resp := httpserver.Response{ - Etag: identity.NewID(), - Content: []byte("content1"), - } - server := httpserver.NewTestServer(map[string]httpserver.Response{ - "/": resp, - }) - defer server.Close() - - id := &source.HttpIdentifier{URL: server.URL} - - h, err := hs.Resolve(ctx, id) - require.NoError(t, err) - - k, _, err := h.CacheKey(ctx, 0) - require.NoError(t, err) - - require.Equal(t, "sha256:146f16ec8810a62a57ce314aba391f95f7eaaf41b8b1ebaf2ab65fd63b1ad437", k) - require.Equal(t, server.Stats("/").AllRequests, 1) - require.Equal(t, server.Stats("/").CachedRequests, 0) - - ref, err := h.Snapshot(ctx) - require.NoError(t, err) - defer func() { - if ref != nil { - ref.Release(context.TODO()) - ref = nil - } - }() - - dt, err := readFile(ctx, ref, "download") - require.NoError(t, err) - require.Equal(t, dt, []byte("content1")) - - ref.Release(context.TODO()) - ref = nil -} - -func TestHTTPInvalidURL(t *testing.T) { - t.Parallel() - ctx := context.TODO() - - tmpdir, err := ioutil.TempDir("", "buildkit-state") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) - - hs, err := newHTTPSource(tmpdir) - require.NoError(t, err) - - server := httpserver.NewTestServer(map[string]httpserver.Response{}) - defer server.Close() - - id := &source.HttpIdentifier{URL: server.URL + "/foo"} - - h, err := hs.Resolve(ctx, id) - require.NoError(t, err) - - _, _, err = h.CacheKey(ctx, 0) - require.Error(t, err) - require.Contains(t, err.Error(), "invalid response") -} - -func TestHTTPChecksum(t *testing.T) { - t.Parallel() - ctx := context.TODO() - - tmpdir, err := ioutil.TempDir("", "buildkit-state") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) - - hs, err := newHTTPSource(tmpdir) - require.NoError(t, err) - - resp := httpserver.Response{ - Etag: identity.NewID(), - Content: []byte("content-correct"), - } - server := httpserver.NewTestServer(map[string]httpserver.Response{ - "/foo": resp, - }) - defer server.Close() - - id := &source.HttpIdentifier{URL: server.URL + "/foo", Checksum: digest.FromBytes([]byte("content-different"))} - - h, err := hs.Resolve(ctx, id) - require.NoError(t, err) - - k, _, err := h.CacheKey(ctx, 0) - require.NoError(t, err) - - expectedContentDifferent := "sha256:f25996f463dca69cffb580f8273ffacdda43332b5f0a8bea2ead33900616d44b" - expectedContentCorrect := "sha256:c6a440110a7757b9e1e47b52e413cba96c62377c37a474714b6b3c4f8b74e536" - - require.Equal(t, expectedContentDifferent, k) - require.Equal(t, server.Stats("/foo").AllRequests, 0) - require.Equal(t, server.Stats("/foo").CachedRequests, 0) - - _, err = h.Snapshot(ctx) - require.Error(t, err) - - require.Equal(t, expectedContentDifferent, k) - require.Equal(t, server.Stats("/foo").AllRequests, 1) - require.Equal(t, server.Stats("/foo").CachedRequests, 0) - - id = &source.HttpIdentifier{URL: server.URL + "/foo", Checksum: digest.FromBytes([]byte("content-correct"))} - - h, err = hs.Resolve(ctx, id) - require.NoError(t, err) - - k, _, err = h.CacheKey(ctx, 0) - require.NoError(t, err) - - require.Equal(t, expectedContentCorrect, k) - require.Equal(t, server.Stats("/foo").AllRequests, 1) - require.Equal(t, server.Stats("/foo").CachedRequests, 0) - - ref, err := h.Snapshot(ctx) - require.NoError(t, err) - defer func() { - if ref != nil { - ref.Release(context.TODO()) - ref = nil - } - }() - - dt, err := readFile(ctx, ref, "foo") - require.NoError(t, err) - require.Equal(t, dt, []byte("content-correct")) - - require.Equal(t, expectedContentCorrect, k) - require.Equal(t, server.Stats("/foo").AllRequests, 2) - require.Equal(t, server.Stats("/foo").CachedRequests, 0) - - ref.Release(context.TODO()) - ref = nil - -} - -func readFile(ctx context.Context, ref cache.ImmutableRef, fp string) ([]byte, error) { - mount, err := ref.Mount(ctx, false) - if err != nil { - return nil, err - } - - lm := snapshot.LocalMounter(mount) - dir, err := lm.Mount() - if err != nil { - return nil, err - } - - defer lm.Unmount() - - dt, err := ioutil.ReadFile(filepath.Join(dir, fp)) - if err != nil { - return nil, err - } - - return dt, nil -} - -func newHTTPSource(tmpdir string) (source.Source, error) { - snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) - if err != nil { - return nil, err - } - - md, err := metadata.NewStore(filepath.Join(tmpdir, "metadata.db")) - if err != nil { - return nil, err - } - - cm, err := cache.NewManager(cache.ManagerOpt{ - Snapshotter: snapshot.FromContainerdSnapshotter(snapshotter), - MetadataStore: md, - }) - if err != nil { - return nil, err - } - - return NewSource(Opt{ - CacheAccessor: cm, - MetadataStore: md, - }) -} diff --git a/vendor/github.com/moby/buildkit/source/identifier.go b/vendor/github.com/moby/buildkit/source/identifier.go deleted file mode 100644 index 2a8611098ecf..000000000000 --- a/vendor/github.com/moby/buildkit/source/identifier.go +++ /dev/null @@ -1,275 +0,0 @@ -package source - -import ( - "encoding/json" - "strconv" - "strings" - - "github.com/containerd/containerd/reference" - "github.com/moby/buildkit/client" - "github.com/moby/buildkit/solver/pb" - digest "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -var ( - errInvalid = errors.New("invalid") - errNotFound = errors.New("not found") -) - -type ResolveMode int - -const ( - ResolveModeDefault ResolveMode = iota - ResolveModeForcePull - ResolveModePreferLocal -) - -const ( - DockerImageScheme = "docker-image" - GitScheme = "git" - LocalScheme = "local" - HttpScheme = "http" - HttpsScheme = "https" -) - -type Identifier interface { - ID() string // until sources are in process this string comparison could be avoided -} - -func FromString(s string) (Identifier, error) { - // TODO: improve this - parts := strings.SplitN(s, "://", 2) - if len(parts) != 2 { - return nil, errors.Wrapf(errInvalid, "failed to parse %s", s) - } - - switch parts[0] { - case DockerImageScheme: - return NewImageIdentifier(parts[1]) - case GitScheme: - return NewGitIdentifier(parts[1]) - case LocalScheme: - return NewLocalIdentifier(parts[1]) - case HttpsScheme: - return NewHttpIdentifier(parts[1], true) - case HttpScheme: - return NewHttpIdentifier(parts[1], false) - default: - return nil, errors.Wrapf(errNotFound, "unknown schema %s", parts[0]) - } -} - -func FromLLB(op *pb.Op_Source, platform *pb.Platform) (Identifier, error) { - id, err := FromString(op.Source.Identifier) - if err != nil { - return nil, err - } - - if id, ok := id.(*ImageIdentifier); ok { - if platform != nil { - id.Platform = &specs.Platform{ - OS: platform.OS, - Architecture: platform.Architecture, - Variant: platform.Variant, - OSVersion: platform.OSVersion, - OSFeatures: platform.OSFeatures, - } - } - for k, v := range op.Source.Attrs { - switch k { - case pb.AttrImageResolveMode: - rm, err := ParseImageResolveMode(v) - if err != nil { - return nil, err - } - id.ResolveMode = rm - case pb.AttrImageRecordType: - rt, err := parseImageRecordType(v) - if err != nil { - return nil, err - } - id.RecordType = rt - } - } - } - if id, ok := id.(*GitIdentifier); ok { - for k, v := range op.Source.Attrs { - switch k { - case pb.AttrKeepGitDir: - if v == "true" { - id.KeepGitDir = true - } - case pb.AttrFullRemoteURL: - id.Remote = v - } - } - } - if id, ok := id.(*LocalIdentifier); ok { - for k, v := range op.Source.Attrs { - switch k { - case pb.AttrLocalSessionID: - id.SessionID = v - if p := strings.SplitN(v, ":", 2); len(p) == 2 { - id.Name = p[0] + "-" + id.Name - id.SessionID = p[1] - } - case pb.AttrIncludePatterns: - var patterns []string - if err := json.Unmarshal([]byte(v), &patterns); err != nil { - return nil, err - } - id.IncludePatterns = patterns - case pb.AttrExcludePatterns: - var patterns []string - if err := json.Unmarshal([]byte(v), &patterns); err != nil { - return nil, err - } - id.ExcludePatterns = patterns - case pb.AttrFollowPaths: - var paths []string - if err := json.Unmarshal([]byte(v), &paths); err != nil { - return nil, err - } - id.FollowPaths = paths - case pb.AttrSharedKeyHint: - id.SharedKeyHint = v - } - } - } - if id, ok := id.(*HttpIdentifier); ok { - for k, v := range op.Source.Attrs { - switch k { - case pb.AttrHTTPChecksum: - dgst, err := digest.Parse(v) - if err != nil { - return nil, err - } - id.Checksum = dgst - case pb.AttrHTTPFilename: - id.Filename = v - case pb.AttrHTTPPerm: - i, err := strconv.ParseInt(v, 0, 64) - if err != nil { - return nil, err - } - id.Perm = int(i) - case pb.AttrHTTPUID: - i, err := strconv.ParseInt(v, 0, 64) - if err != nil { - return nil, err - } - id.UID = int(i) - case pb.AttrHTTPGID: - i, err := strconv.ParseInt(v, 0, 64) - if err != nil { - return nil, err - } - id.GID = int(i) - } - } - } - return id, nil -} - -type ImageIdentifier struct { - Reference reference.Spec - Platform *specs.Platform - ResolveMode ResolveMode - RecordType client.UsageRecordType -} - -func NewImageIdentifier(str string) (*ImageIdentifier, error) { - ref, err := reference.Parse(str) - if err != nil { - return nil, errors.WithStack(err) - } - - if ref.Object == "" { - return nil, errors.WithStack(reference.ErrObjectRequired) - } - return &ImageIdentifier{Reference: ref}, nil -} - -func (_ *ImageIdentifier) ID() string { - return DockerImageScheme -} - -type LocalIdentifier struct { - Name string - SessionID string - IncludePatterns []string - ExcludePatterns []string - FollowPaths []string - SharedKeyHint string -} - -func NewLocalIdentifier(str string) (*LocalIdentifier, error) { - return &LocalIdentifier{Name: str}, nil -} - -func (*LocalIdentifier) ID() string { - return LocalScheme -} - -func NewHttpIdentifier(str string, tls bool) (*HttpIdentifier, error) { - proto := "https://" - if !tls { - proto = "http://" - } - return &HttpIdentifier{TLS: tls, URL: proto + str}, nil -} - -type HttpIdentifier struct { - TLS bool - URL string - Checksum digest.Digest - Filename string - Perm int - UID int - GID int -} - -func (_ *HttpIdentifier) ID() string { - return HttpsScheme -} - -func (r ResolveMode) String() string { - switch r { - case ResolveModeDefault: - return pb.AttrImageResolveModeDefault - case ResolveModeForcePull: - return pb.AttrImageResolveModeForcePull - case ResolveModePreferLocal: - return pb.AttrImageResolveModePreferLocal - default: - return "" - } -} - -func ParseImageResolveMode(v string) (ResolveMode, error) { - switch v { - case pb.AttrImageResolveModeDefault, "": - return ResolveModeDefault, nil - case pb.AttrImageResolveModeForcePull: - return ResolveModeForcePull, nil - case pb.AttrImageResolveModePreferLocal: - return ResolveModePreferLocal, nil - default: - return 0, errors.Errorf("invalid resolvemode: %s", v) - } -} - -func parseImageRecordType(v string) (client.UsageRecordType, error) { - switch client.UsageRecordType(v) { - case "", client.UsageRecordTypeRegular: - return client.UsageRecordTypeRegular, nil - case client.UsageRecordTypeInternal: - return client.UsageRecordTypeInternal, nil - case client.UsageRecordTypeFrontend: - return client.UsageRecordTypeFrontend, nil - default: - return "", errors.Errorf("invalid record type %s", v) - } -} diff --git a/vendor/github.com/moby/buildkit/source/local/local.go b/vendor/github.com/moby/buildkit/source/local/local.go deleted file mode 100644 index 8ffe0c09ed36..000000000000 --- a/vendor/github.com/moby/buildkit/source/local/local.go +++ /dev/null @@ -1,256 +0,0 @@ -package local - -import ( - "context" - "encoding/json" - "fmt" - "time" - - "github.com/moby/buildkit/cache" - "github.com/moby/buildkit/cache/contenthash" - "github.com/moby/buildkit/cache/metadata" - "github.com/moby/buildkit/client" - "github.com/moby/buildkit/session" - "github.com/moby/buildkit/session/filesync" - "github.com/moby/buildkit/snapshot" - "github.com/moby/buildkit/source" - "github.com/moby/buildkit/util/progress" - digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "github.com/tonistiigi/fsutil" - bolt "go.etcd.io/bbolt" - "golang.org/x/time/rate" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -const keySharedKey = "local.sharedKey" - -type Opt struct { - SessionManager *session.Manager - CacheAccessor cache.Accessor - MetadataStore *metadata.Store -} - -func NewSource(opt Opt) (source.Source, error) { - ls := &localSource{ - sm: opt.SessionManager, - cm: opt.CacheAccessor, - md: opt.MetadataStore, - } - return ls, nil -} - -type localSource struct { - sm *session.Manager - cm cache.Accessor - md *metadata.Store -} - -func (ls *localSource) ID() string { - return source.LocalScheme -} - -func (ls *localSource) Resolve(ctx context.Context, id source.Identifier) (source.SourceInstance, error) { - localIdentifier, ok := id.(*source.LocalIdentifier) - if !ok { - return nil, errors.Errorf("invalid local identifier %v", id) - } - - return &localSourceHandler{ - src: *localIdentifier, - localSource: ls, - }, nil -} - -type localSourceHandler struct { - src source.LocalIdentifier - *localSource -} - -func (ls *localSourceHandler) CacheKey(ctx context.Context, index int) (string, bool, error) { - sessionID := ls.src.SessionID - - if sessionID == "" { - id := session.FromContext(ctx) - if id == "" { - return "", false, errors.New("could not access local files without session") - } - sessionID = id - } - dt, err := json.Marshal(struct { - SessionID string - IncludePatterns []string - ExcludePatterns []string - FollowPaths []string - }{SessionID: sessionID, IncludePatterns: ls.src.IncludePatterns, ExcludePatterns: ls.src.ExcludePatterns, FollowPaths: ls.src.FollowPaths}) - if err != nil { - return "", false, err - } - return "session:" + ls.src.Name + ":" + digest.FromBytes(dt).String(), true, nil -} - -func (ls *localSourceHandler) Snapshot(ctx context.Context) (out cache.ImmutableRef, retErr error) { - - id := session.FromContext(ctx) - if id == "" { - return nil, errors.New("could not access local files without session") - } - - timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second) - defer cancel() - - caller, err := ls.sm.Get(timeoutCtx, id) - if err != nil { - return nil, err - } - - sharedKey := keySharedKey + ":" + ls.src.Name + ":" + ls.src.SharedKeyHint + ":" + caller.SharedKey() // TODO: replace caller.SharedKey() with source based hint from client(absolute-path+nodeid) - - var mutable cache.MutableRef - sis, err := ls.md.Search(sharedKey) - if err != nil { - return nil, err - } - for _, si := range sis { - if m, err := ls.cm.GetMutable(ctx, si.ID()); err == nil { - logrus.Debugf("reusing ref for local: %s", m.ID()) - mutable = m - break - } - } - - if mutable == nil { - m, err := ls.cm.New(ctx, nil, cache.CachePolicyRetain, cache.WithRecordType(client.UsageRecordTypeLocalSource), cache.WithDescription(fmt.Sprintf("local source for %s", ls.src.Name))) - if err != nil { - return nil, err - } - mutable = m - logrus.Debugf("new ref for local: %s", mutable.ID()) - } - - defer func() { - if retErr != nil && mutable != nil { - go mutable.Release(context.TODO()) - } - }() - - mount, err := mutable.Mount(ctx, false) - if err != nil { - return nil, err - } - - lm := snapshot.LocalMounter(mount) - - dest, err := lm.Mount() - if err != nil { - return nil, err - } - - defer func() { - if retErr != nil && lm != nil { - lm.Unmount() - } - }() - - cc, err := contenthash.GetCacheContext(ctx, mutable.Metadata()) - if err != nil { - return nil, err - } - - opt := filesync.FSSendRequestOpt{ - Name: ls.src.Name, - IncludePatterns: ls.src.IncludePatterns, - ExcludePatterns: ls.src.ExcludePatterns, - FollowPaths: ls.src.FollowPaths, - OverrideExcludes: false, - DestDir: dest, - CacheUpdater: &cacheUpdater{cc}, - ProgressCb: newProgressHandler(ctx, "transferring "+ls.src.Name+":"), - } - - if err := filesync.FSSync(ctx, caller, opt); err != nil { - if status.Code(err) == codes.NotFound { - return nil, errors.Errorf("local source %s not enabled from the client", ls.src.Name) - } - return nil, err - } - - if err := lm.Unmount(); err != nil { - return nil, err - } - lm = nil - - if err := contenthash.SetCacheContext(ctx, mutable.Metadata(), cc); err != nil { - return nil, err - } - - // skip storing snapshot by the shared key if it already exists - skipStoreSharedKey := false - si, _ := ls.md.Get(mutable.ID()) - if v := si.Get(keySharedKey); v != nil { - var str string - if err := v.Unmarshal(&str); err != nil { - return nil, err - } - skipStoreSharedKey = str == sharedKey - } - if !skipStoreSharedKey { - v, err := metadata.NewValue(sharedKey) - if err != nil { - return nil, err - } - v.Index = sharedKey - if err := si.Update(func(b *bolt.Bucket) error { - return si.SetValue(b, sharedKey, v) - }); err != nil { - return nil, err - } - logrus.Debugf("saved %s as %s", mutable.ID(), sharedKey) - } - - snap, err := mutable.Commit(ctx) - if err != nil { - return nil, err - } - - mutable = nil // avoid deferred cleanup - - return snap, nil -} - -func newProgressHandler(ctx context.Context, id string) func(int, bool) { - limiter := rate.NewLimiter(rate.Every(100*time.Millisecond), 1) - pw, _, _ := progress.FromContext(ctx) - now := time.Now() - st := progress.Status{ - Started: &now, - Action: "transferring", - } - pw.Write(id, st) - return func(s int, last bool) { - if last || limiter.Allow() { - st.Current = s - if last { - now := time.Now() - st.Completed = &now - } - pw.Write(id, st) - if last { - pw.Close() - } - } - } -} - -type cacheUpdater struct { - contenthash.CacheContext -} - -func (cu *cacheUpdater) MarkSupported(bool) { -} - -func (cu *cacheUpdater) ContentHasher() fsutil.ContentHasher { - return contenthash.NewFromStat -} diff --git a/vendor/github.com/moby/buildkit/source/manager.go b/vendor/github.com/moby/buildkit/source/manager.go deleted file mode 100644 index e520b6c77c3d..000000000000 --- a/vendor/github.com/moby/buildkit/source/manager.go +++ /dev/null @@ -1,48 +0,0 @@ -package source - -import ( - "context" - "sync" - - "github.com/moby/buildkit/cache" - "github.com/pkg/errors" -) - -type Source interface { - ID() string - Resolve(ctx context.Context, id Identifier) (SourceInstance, error) -} - -type SourceInstance interface { - CacheKey(ctx context.Context, index int) (string, bool, error) - Snapshot(ctx context.Context) (cache.ImmutableRef, error) -} - -type Manager struct { - mu sync.Mutex - sources map[string]Source -} - -func NewManager() (*Manager, error) { - return &Manager{ - sources: make(map[string]Source), - }, nil -} - -func (sm *Manager) Register(src Source) { - sm.mu.Lock() - sm.sources[src.ID()] = src - sm.mu.Unlock() -} - -func (sm *Manager) Resolve(ctx context.Context, id Identifier) (SourceInstance, error) { - sm.mu.Lock() - src, ok := sm.sources[id.ID()] - sm.mu.Unlock() - - if !ok { - return nil, errors.Errorf("no handler for %s", id.ID()) - } - - return src.Resolve(ctx, id) -} diff --git a/vendor/github.com/moby/buildkit/util/apicaps/caps.go b/vendor/github.com/moby/buildkit/util/apicaps/caps.go deleted file mode 100644 index 9a661b50c96e..000000000000 --- a/vendor/github.com/moby/buildkit/util/apicaps/caps.go +++ /dev/null @@ -1,162 +0,0 @@ -package apicaps - -import ( - "fmt" - "sort" - "strings" - - pb "github.com/moby/buildkit/util/apicaps/pb" - "github.com/pkg/errors" -) - -type PBCap = pb.APICap - -// ExportedProduct is the name of the product using this package. -// Users vendoring this library may override it to provide better versioning hints -// for their users (or set it with a flag to buildkitd). -var ExportedProduct string - -// CapStatus defines the stability properties of a capability -type CapStatus int - -const ( - // CapStatusStable refers to a capability that should never be changed in - // backwards incompatible manner unless there is a serious security issue. - CapStatusStable CapStatus = iota - // CapStatusExperimental refers to a capability that may be removed in the future. - // If incompatible changes are made the previous ID is disabled and new is added. - CapStatusExperimental - // CapStatusPrerelease is same as CapStatusExperimental that can be used for new - // features before they move to stable. - CapStatusPrerelease -) - -// CapID is type for capability identifier -type CapID string - -// Cap describes an API feature -type Cap struct { - ID CapID - Name string // readable name, may contain spaces but keep in one sentence - Status CapStatus - Enabled bool - Deprecated bool - SupportedHint map[string]string - DisabledReason string - DisabledReasonMsg string - DisabledAlternative string -} - -// CapList is a collection of capability definitions -type CapList struct { - m map[CapID]Cap -} - -// Init initializes definition for a new capability. -// Not safe to be called concurrently with other methods. -func (l *CapList) Init(cc ...Cap) { - if l.m == nil { - l.m = make(map[CapID]Cap, len(cc)) - } - for _, c := range cc { - l.m[c.ID] = c - } -} - -// All reports the configuration of all known capabilities -func (l *CapList) All() []pb.APICap { - out := make([]pb.APICap, 0, len(l.m)) - for _, c := range l.m { - out = append(out, pb.APICap{ - ID: string(c.ID), - Enabled: c.Enabled, - Deprecated: c.Deprecated, - DisabledReason: c.DisabledReason, - DisabledReasonMsg: c.DisabledReasonMsg, - DisabledAlternative: c.DisabledAlternative, - }) - } - sort.Slice(out, func(i, j int) bool { - return out[i].ID < out[j].ID - }) - return out -} - -// CapSet returns a CapSet for an capability configuration -func (l *CapList) CapSet(caps []pb.APICap) CapSet { - m := make(map[string]*pb.APICap, len(caps)) - for _, c := range caps { - if c.ID != "" { - c := c // capture loop iterator - m[c.ID] = &c - } - } - return CapSet{ - list: l, - set: m, - } -} - -// CapSet is a configuration for detecting supported capabilities -type CapSet struct { - list *CapList - set map[string]*pb.APICap -} - -// Supports returns an error if capability is not supported -func (s *CapSet) Supports(id CapID) error { - err := &CapError{ID: id} - c, ok := s.list.m[id] - if !ok { - return errors.WithStack(err) - } - err.Definition = &c - state, ok := s.set[string(id)] - if !ok { - return errors.WithStack(err) - } - err.State = state - if !state.Enabled { - return errors.WithStack(err) - } - return nil -} - -// CapError is an error for unsupported capability -type CapError struct { - ID CapID - Definition *Cap - State *pb.APICap -} - -func (e CapError) Error() string { - if e.Definition == nil { - return fmt.Sprintf("unknown API capability %s", e.ID) - } - typ := "" - if e.Definition.Status == CapStatusExperimental { - typ = "experimental " - } - if e.Definition.Status == CapStatusPrerelease { - typ = "prerelease " - } - name := "" - if e.Definition.Name != "" { - name = "(" + e.Definition.Name + ")" - } - b := &strings.Builder{} - fmt.Fprintf(b, "requested %sfeature %s %s", typ, e.ID, name) - if e.State == nil { - fmt.Fprint(b, " is not supported by build server") - if hint, ok := e.Definition.SupportedHint[ExportedProduct]; ok { - fmt.Fprintf(b, " (added in %s)", hint) - } - fmt.Fprintf(b, ", please update %s", ExportedProduct) - } else { - fmt.Fprint(b, " has been disabled on the build server") - if e.State.DisabledReasonMsg != "" { - fmt.Fprintf(b, ": %s", e.State.DisabledReasonMsg) - } - } - return b.String() -} diff --git a/vendor/github.com/moby/buildkit/util/apicaps/caps_test.go b/vendor/github.com/moby/buildkit/util/apicaps/caps_test.go deleted file mode 100644 index 8a6b5becdeda..000000000000 --- a/vendor/github.com/moby/buildkit/util/apicaps/caps_test.go +++ /dev/null @@ -1,60 +0,0 @@ -package apicaps - -import ( - "testing" - - pb "github.com/moby/buildkit/util/apicaps/pb" - "github.com/stretchr/testify/assert" -) - -func TestDisabledCap(t *testing.T) { - var cl CapList - cl.Init(Cap{ - ID: "cap1", - Name: "a test cap", - Enabled: true, - Status: CapStatusExperimental, - }) - cl.Init(Cap{ - ID: "cap2", - Name: "a second test cap", - Enabled: false, - Status: CapStatusExperimental, - }) - - cs := cl.CapSet([]pb.APICap{ - {ID: "cap1", Enabled: true}, - {ID: "cap2", Enabled: true}, - }) - err := cs.Supports("cap1") - assert.NoError(t, err) - err = cs.Supports("cap2") - assert.NoError(t, err) - - cs = cl.CapSet([]pb.APICap{ - {ID: "cap1", Enabled: true}, - {ID: "cap2", Enabled: false}, - }) - err = cs.Supports("cap1") - assert.NoError(t, err) - err = cs.Supports("cap2") - assert.EqualError(t, err, "requested experimental feature cap2 (a second test cap) has been disabled on the build server") - - cs = cl.CapSet([]pb.APICap{ - {ID: "cap1", Enabled: false}, - {ID: "cap2", Enabled: true}, - }) - err = cs.Supports("cap1") - assert.EqualError(t, err, "requested experimental feature cap1 (a test cap) has been disabled on the build server") - err = cs.Supports("cap2") - assert.NoError(t, err) - - cs = cl.CapSet([]pb.APICap{ - {ID: "cap1", Enabled: false}, - {ID: "cap2", Enabled: false}, - }) - err = cs.Supports("cap1") - assert.EqualError(t, err, "requested experimental feature cap1 (a test cap) has been disabled on the build server") - err = cs.Supports("cap2") - assert.EqualError(t, err, "requested experimental feature cap2 (a second test cap) has been disabled on the build server") -} diff --git a/vendor/github.com/moby/buildkit/util/apicaps/pb/caps.pb.go b/vendor/github.com/moby/buildkit/util/apicaps/pb/caps.pb.go deleted file mode 100644 index 9d4d48802427..000000000000 --- a/vendor/github.com/moby/buildkit/util/apicaps/pb/caps.pb.go +++ /dev/null @@ -1,535 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: caps.proto - -/* - Package moby_buildkit_v1_apicaps is a generated protocol buffer package. - - It is generated from these files: - caps.proto - - It has these top-level messages: - APICap -*/ -package moby_buildkit_v1_apicaps - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" -import _ "github.com/gogo/protobuf/gogoproto" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -// APICap defines a capability supported by the service -type APICap struct { - ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` - Enabled bool `protobuf:"varint,2,opt,name=Enabled,proto3" json:"Enabled,omitempty"` - Deprecated bool `protobuf:"varint,3,opt,name=Deprecated,proto3" json:"Deprecated,omitempty"` - DisabledReason string `protobuf:"bytes,4,opt,name=DisabledReason,proto3" json:"DisabledReason,omitempty"` - DisabledReasonMsg string `protobuf:"bytes,5,opt,name=DisabledReasonMsg,proto3" json:"DisabledReasonMsg,omitempty"` - DisabledAlternative string `protobuf:"bytes,6,opt,name=DisabledAlternative,proto3" json:"DisabledAlternative,omitempty"` -} - -func (m *APICap) Reset() { *m = APICap{} } -func (m *APICap) String() string { return proto.CompactTextString(m) } -func (*APICap) ProtoMessage() {} -func (*APICap) Descriptor() ([]byte, []int) { return fileDescriptorCaps, []int{0} } - -func (m *APICap) GetID() string { - if m != nil { - return m.ID - } - return "" -} - -func (m *APICap) GetEnabled() bool { - if m != nil { - return m.Enabled - } - return false -} - -func (m *APICap) GetDeprecated() bool { - if m != nil { - return m.Deprecated - } - return false -} - -func (m *APICap) GetDisabledReason() string { - if m != nil { - return m.DisabledReason - } - return "" -} - -func (m *APICap) GetDisabledReasonMsg() string { - if m != nil { - return m.DisabledReasonMsg - } - return "" -} - -func (m *APICap) GetDisabledAlternative() string { - if m != nil { - return m.DisabledAlternative - } - return "" -} - -func init() { - proto.RegisterType((*APICap)(nil), "moby.buildkit.v1.apicaps.APICap") -} -func (m *APICap) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *APICap) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.ID) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintCaps(dAtA, i, uint64(len(m.ID))) - i += copy(dAtA[i:], m.ID) - } - if m.Enabled { - dAtA[i] = 0x10 - i++ - if m.Enabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.Deprecated { - dAtA[i] = 0x18 - i++ - if m.Deprecated { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if len(m.DisabledReason) > 0 { - dAtA[i] = 0x22 - i++ - i = encodeVarintCaps(dAtA, i, uint64(len(m.DisabledReason))) - i += copy(dAtA[i:], m.DisabledReason) - } - if len(m.DisabledReasonMsg) > 0 { - dAtA[i] = 0x2a - i++ - i = encodeVarintCaps(dAtA, i, uint64(len(m.DisabledReasonMsg))) - i += copy(dAtA[i:], m.DisabledReasonMsg) - } - if len(m.DisabledAlternative) > 0 { - dAtA[i] = 0x32 - i++ - i = encodeVarintCaps(dAtA, i, uint64(len(m.DisabledAlternative))) - i += copy(dAtA[i:], m.DisabledAlternative) - } - return i, nil -} - -func encodeVarintCaps(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *APICap) Size() (n int) { - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovCaps(uint64(l)) - } - if m.Enabled { - n += 2 - } - if m.Deprecated { - n += 2 - } - l = len(m.DisabledReason) - if l > 0 { - n += 1 + l + sovCaps(uint64(l)) - } - l = len(m.DisabledReasonMsg) - if l > 0 { - n += 1 + l + sovCaps(uint64(l)) - } - l = len(m.DisabledAlternative) - if l > 0 { - n += 1 + l + sovCaps(uint64(l)) - } - return n -} - -func sovCaps(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozCaps(x uint64) (n int) { - return sovCaps(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *APICap) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCaps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: APICap: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: APICap: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCaps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCaps - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCaps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Enabled = bool(v != 0) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Deprecated", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCaps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Deprecated = bool(v != 0) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DisabledReason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCaps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCaps - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DisabledReason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DisabledReasonMsg", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCaps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCaps - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DisabledReasonMsg = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DisabledAlternative", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCaps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCaps - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DisabledAlternative = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCaps(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCaps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipCaps(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCaps - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCaps - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCaps - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthCaps - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCaps - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipCaps(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthCaps = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowCaps = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("caps.proto", fileDescriptorCaps) } - -var fileDescriptorCaps = []byte{ - // 236 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4a, 0x4e, 0x2c, 0x28, - 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x92, 0xc8, 0xcd, 0x4f, 0xaa, 0xd4, 0x4b, 0x2a, 0xcd, - 0xcc, 0x49, 0xc9, 0xce, 0x2c, 0xd1, 0x2b, 0x33, 0xd4, 0x4b, 0x2c, 0xc8, 0x04, 0xc9, 0x4b, 0xe9, - 0xa6, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0xa7, 0xe7, 0xa7, 0xe7, 0xeb, - 0x83, 0x35, 0x24, 0x95, 0xa6, 0x81, 0x79, 0x60, 0x0e, 0x98, 0x05, 0x31, 0x48, 0xe9, 0x16, 0x23, - 0x17, 0x9b, 0x63, 0x80, 0xa7, 0x73, 0x62, 0x81, 0x10, 0x1f, 0x17, 0x93, 0xa7, 0x8b, 0x04, 0xa3, - 0x02, 0xa3, 0x06, 0x67, 0x10, 0x93, 0xa7, 0x8b, 0x90, 0x04, 0x17, 0xbb, 0x6b, 0x5e, 0x62, 0x52, - 0x4e, 0x6a, 0x8a, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x47, 0x10, 0x8c, 0x2b, 0x24, 0xc7, 0xc5, 0xe5, - 0x92, 0x5a, 0x50, 0x94, 0x9a, 0x9c, 0x58, 0x92, 0x9a, 0x22, 0xc1, 0x0c, 0x96, 0x44, 0x12, 0x11, - 0x52, 0xe3, 0xe2, 0x73, 0xc9, 0x2c, 0x06, 0xab, 0x0d, 0x4a, 0x4d, 0x2c, 0xce, 0xcf, 0x93, 0x60, - 0x01, 0x9b, 0x8a, 0x26, 0x2a, 0xa4, 0xc3, 0x25, 0x88, 0x2a, 0xe2, 0x5b, 0x9c, 0x2e, 0xc1, 0x0a, - 0x56, 0x8a, 0x29, 0x21, 0x64, 0xc0, 0x25, 0x0c, 0x13, 0x74, 0xcc, 0x29, 0x49, 0x2d, 0xca, 0x4b, - 0x2c, 0xc9, 0x2c, 0x4b, 0x95, 0x60, 0x03, 0xab, 0xc7, 0x26, 0xe5, 0xc4, 0x73, 0xe2, 0x91, 0x1c, - 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0x26, 0xb1, 0x81, 0x7d, 0x6c, 0x0c, 0x08, - 0x00, 0x00, 0xff, 0xff, 0x02, 0x2d, 0x9e, 0x91, 0x48, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/moby/buildkit/util/apicaps/pb/caps.proto b/vendor/github.com/moby/buildkit/util/apicaps/pb/caps.proto deleted file mode 100644 index 1e8c06517c51..000000000000 --- a/vendor/github.com/moby/buildkit/util/apicaps/pb/caps.proto +++ /dev/null @@ -1,19 +0,0 @@ -syntax = "proto3"; - -package moby.buildkit.v1.apicaps; - -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; - -option (gogoproto.sizer_all) = true; -option (gogoproto.marshaler_all) = true; -option (gogoproto.unmarshaler_all) = true; - -// APICap defines a capability supported by the service -message APICap { - string ID = 1; - bool Enabled = 2; - bool Deprecated = 3; // Unused. May be used for warnings in the future - string DisabledReason = 4; // Reason key for detection code - string DisabledReasonMsg = 5; // Message to the user - string DisabledAlternative = 6; // Identifier that updated client could catch. -} \ No newline at end of file diff --git a/vendor/github.com/moby/buildkit/util/apicaps/pb/generate.go b/vendor/github.com/moby/buildkit/util/apicaps/pb/generate.go deleted file mode 100644 index 281dfabd64a2..000000000000 --- a/vendor/github.com/moby/buildkit/util/apicaps/pb/generate.go +++ /dev/null @@ -1,3 +0,0 @@ -package moby_buildkit_v1_apicaps - -//go:generate protoc -I=. -I=../../../vendor/ -I=../../../../../../ --gogo_out=plugins=grpc:. caps.proto diff --git a/vendor/github.com/moby/buildkit/util/appcontext/appcontext.go b/vendor/github.com/moby/buildkit/util/appcontext/appcontext.go deleted file mode 100644 index e74da2cdb56f..000000000000 --- a/vendor/github.com/moby/buildkit/util/appcontext/appcontext.go +++ /dev/null @@ -1,41 +0,0 @@ -package appcontext - -import ( - "context" - "os" - "os/signal" - "sync" - - "github.com/sirupsen/logrus" -) - -var appContextCache context.Context -var appContextOnce sync.Once - -// Context returns a static context that reacts to termination signals of the -// running process. Useful in CLI tools. -func Context() context.Context { - appContextOnce.Do(func() { - signals := make(chan os.Signal, 2048) - signal.Notify(signals, terminationSignals...) - - const exitLimit = 3 - retries := 0 - - ctx, cancel := context.WithCancel(context.Background()) - appContextCache = ctx - - go func() { - for { - <-signals - cancel() - retries++ - if retries >= exitLimit { - logrus.Errorf("got %d SIGTERM/SIGINTs, forcing shutdown", retries) - os.Exit(1) - } - } - }() - }) - return appContextCache -} diff --git a/vendor/github.com/moby/buildkit/util/appcontext/appcontext_unix.go b/vendor/github.com/moby/buildkit/util/appcontext/appcontext_unix.go deleted file mode 100644 index b586e2f6131e..000000000000 --- a/vendor/github.com/moby/buildkit/util/appcontext/appcontext_unix.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !windows - -package appcontext - -import ( - "os" - - "golang.org/x/sys/unix" -) - -var terminationSignals = []os.Signal{unix.SIGTERM, unix.SIGINT} diff --git a/vendor/github.com/moby/buildkit/util/appcontext/appcontext_windows.go b/vendor/github.com/moby/buildkit/util/appcontext/appcontext_windows.go deleted file mode 100644 index 0a8bcbe7df2a..000000000000 --- a/vendor/github.com/moby/buildkit/util/appcontext/appcontext_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -package appcontext - -import ( - "os" -) - -var terminationSignals = []os.Signal{os.Interrupt} diff --git a/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_unix.go b/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_unix.go deleted file mode 100644 index 6252147e0d6c..000000000000 --- a/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_unix.go +++ /dev/null @@ -1,69 +0,0 @@ -// +build !windows - -package appdefaults - -import ( - "os" - "path/filepath" - "strings" -) - -const ( - Address = "unix:///run/buildkit/buildkitd.sock" - Root = "/var/lib/buildkit" - ConfigDir = "/etc/buildkit" -) - -// UserAddress typically returns /run/user/$UID/buildkit/buildkitd.sock -func UserAddress() string { - // pam_systemd sets XDG_RUNTIME_DIR but not other dirs. - xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR") - if xdgRuntimeDir != "" { - dirs := strings.Split(xdgRuntimeDir, ":") - return "unix://" + filepath.Join(dirs[0], "buildkit", "buildkitd.sock") - } - return Address -} - -// EnsureUserAddressDir sets sticky bit on XDG_RUNTIME_DIR if XDG_RUNTIME_DIR is set. -// See https://github.com/opencontainers/runc/issues/1694 -func EnsureUserAddressDir() error { - xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR") - if xdgRuntimeDir != "" { - dirs := strings.Split(xdgRuntimeDir, ":") - dir := filepath.Join(dirs[0], "buildkit") - if err := os.MkdirAll(dir, 0700); err != nil { - return err - } - return os.Chmod(dir, 0700|os.ModeSticky) - } - return nil -} - -// UserRoot typically returns /home/$USER/.local/share/buildkit -func UserRoot() string { - // pam_systemd sets XDG_RUNTIME_DIR but not other dirs. - xdgDataHome := os.Getenv("XDG_DATA_HOME") - if xdgDataHome != "" { - dirs := strings.Split(xdgDataHome, ":") - return filepath.Join(dirs[0], "buildkit") - } - home := os.Getenv("HOME") - if home != "" { - return filepath.Join(home, ".local", "share", "buildkit") - } - return Root -} - -// UserConfigDir returns dir for storing config. /home/$USER/.config/buildkit/ -func UserConfigDir() string { - xdgConfigHome := os.Getenv("XDG_CONFIG_HOME") - if xdgConfigHome != "" { - return filepath.Join(xdgConfigHome, "buildkit") - } - home := os.Getenv("HOME") - if home != "" { - return filepath.Join(home, ".config", "buildkit") - } - return ConfigDir -} diff --git a/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_windows.go b/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_windows.go deleted file mode 100644 index 74f8389da656..000000000000 --- a/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_windows.go +++ /dev/null @@ -1,23 +0,0 @@ -package appdefaults - -const ( - Address = "npipe:////./pipe/buildkitd" - Root = ".buildstate" - ConfigDir = "" -) - -func UserAddress() string { - return Address -} - -func EnsureUserAddressDir() error { - return nil -} - -func UserRoot() string { - return Root -} - -func UserConfigDir() string { - return ConfigDir -} diff --git a/vendor/github.com/moby/buildkit/util/cond/cond.go b/vendor/github.com/moby/buildkit/util/cond/cond.go deleted file mode 100644 index c5e07aec9e3d..000000000000 --- a/vendor/github.com/moby/buildkit/util/cond/cond.go +++ /dev/null @@ -1,40 +0,0 @@ -package cond - -import ( - "sync" -) - -// NewStatefulCond returns a stateful version of sync.Cond . This cond will -// never block on `Wait()` if `Signal()` has been called after the `Wait()` last -// returned. This is useful for avoiding to take a lock on `cond.Locker` for -// signalling. -func NewStatefulCond(l sync.Locker) *StatefulCond { - sc := &StatefulCond{main: l} - sc.c = sync.NewCond(&sc.mu) - return sc -} - -type StatefulCond struct { - main sync.Locker - mu sync.Mutex - c *sync.Cond - signalled bool -} - -func (s *StatefulCond) Wait() { - s.main.Unlock() - s.mu.Lock() - if !s.signalled { - s.c.Wait() - } - s.signalled = false - s.mu.Unlock() - s.main.Lock() -} - -func (s *StatefulCond) Signal() { - s.mu.Lock() - s.signalled = true - s.c.Signal() - s.mu.Unlock() -} diff --git a/vendor/github.com/moby/buildkit/util/cond/cond_test.go b/vendor/github.com/moby/buildkit/util/cond/cond_test.go deleted file mode 100644 index 3741fd771a3f..000000000000 --- a/vendor/github.com/moby/buildkit/util/cond/cond_test.go +++ /dev/null @@ -1,124 +0,0 @@ -package cond - -import ( - "sync" - "testing" - "time" - - "github.com/stretchr/testify/require" -) - -func TestCondInitialWaitBlocks(t *testing.T) { - t.Parallel() - - var mu sync.Mutex - - c := NewStatefulCond(&mu) - - waited := make(chan struct{}) - - mu.Lock() - - go func() { - c.Wait() - close(waited) - }() - - select { - case <-time.After(50 * time.Millisecond): - case <-waited: - require.Fail(t, "wait should have blocked") - } - - c.Signal() - - select { - case <-time.After(300 * time.Millisecond): - require.Fail(t, "wait should have resumed") - case <-waited: - } - - mu.Unlock() -} - -func TestInitialSignalDoesntBlock(t *testing.T) { - t.Parallel() - - var mu sync.Mutex - - c := NewStatefulCond(&mu) - - waited := make(chan struct{}) - - c.Signal() - - mu.Lock() - - go func() { - c.Wait() - close(waited) - }() - - select { - case <-time.After(300 * time.Millisecond): - require.Fail(t, "wait should have resumed") - case <-waited: - } - - waited = make(chan struct{}) - go func() { - c.Wait() - close(waited) - }() - - select { - case <-time.After(50 * time.Millisecond): - case <-waited: - require.Fail(t, "wait should have blocked") - } - - c.Signal() - - <-waited - - mu.Unlock() -} - -func TestSignalBetweenWaits(t *testing.T) { - t.Parallel() - - var mu sync.Mutex - - c := NewStatefulCond(&mu) - - mu.Lock() - - waited := make(chan struct{}) - - go func() { - c.Wait() - close(waited) - }() - - select { - case <-time.After(50 * time.Millisecond): - case <-waited: - require.Fail(t, "wait should have blocked") - } - - c.Signal() - - <-waited - - c.Signal() - - waited = make(chan struct{}) - go func() { - c.Wait() - close(waited) - }() - - <-waited - - mu.Unlock() -} diff --git a/vendor/github.com/moby/buildkit/util/contentutil/buffer.go b/vendor/github.com/moby/buildkit/util/contentutil/buffer.go deleted file mode 100644 index ac8c8baff36a..000000000000 --- a/vendor/github.com/moby/buildkit/util/contentutil/buffer.go +++ /dev/null @@ -1,156 +0,0 @@ -package contentutil - -import ( - "bytes" - "context" - "io/ioutil" - "sync" - "time" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" - digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -// Buffer is a content provider and ingester that keeps data in memory -type Buffer interface { - content.Provider - content.Ingester -} - -// NewBuffer returns a new buffer -func NewBuffer() Buffer { - return &buffer{ - buffers: map[digest.Digest][]byte{}, - refs: map[string]struct{}{}, - } -} - -type buffer struct { - mu sync.Mutex - buffers map[digest.Digest][]byte - refs map[string]struct{} -} - -func (b *buffer) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { - var wOpts content.WriterOpts - for _, opt := range opts { - if err := opt(&wOpts); err != nil { - return nil, err - } - } - b.mu.Lock() - if _, ok := b.refs[wOpts.Ref]; ok { - return nil, errors.Wrapf(errdefs.ErrUnavailable, "ref %s locked", wOpts.Ref) - } - b.mu.Unlock() - return &bufferedWriter{ - main: b, - digester: digest.Canonical.Digester(), - buffer: bytes.NewBuffer(nil), - expected: wOpts.Desc.Digest, - releaseRef: func() { - b.mu.Lock() - delete(b.refs, wOpts.Ref) - b.mu.Unlock() - }, - }, nil -} - -func (b *buffer) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { - r, err := b.getBytesReader(ctx, desc.Digest) - if err != nil { - return nil, err - } - return &readerAt{Reader: r, Closer: ioutil.NopCloser(r), size: int64(r.Len())}, nil -} - -func (b *buffer) getBytesReader(ctx context.Context, dgst digest.Digest) (*bytes.Reader, error) { - b.mu.Lock() - defer b.mu.Unlock() - - if dt, ok := b.buffers[dgst]; ok { - return bytes.NewReader(dt), nil - } - - return nil, errors.Wrapf(errdefs.ErrNotFound, "content %v", dgst) -} - -func (b *buffer) addValue(k digest.Digest, dt []byte) { - b.mu.Lock() - defer b.mu.Unlock() - b.buffers[k] = dt -} - -type bufferedWriter struct { - main *buffer - ref string - offset int64 - total int64 - startedAt time.Time - updatedAt time.Time - buffer *bytes.Buffer - expected digest.Digest - digester digest.Digester - releaseRef func() -} - -func (w *bufferedWriter) Write(p []byte) (n int, err error) { - n, err = w.buffer.Write(p) - w.digester.Hash().Write(p[:n]) - w.offset += int64(len(p)) - w.updatedAt = time.Now() - return n, err -} - -func (w *bufferedWriter) Close() error { - if w.buffer != nil { - w.releaseRef() - w.buffer = nil - } - return nil -} - -func (w *bufferedWriter) Status() (content.Status, error) { - return content.Status{ - Ref: w.ref, - Offset: w.offset, - Total: w.total, - StartedAt: w.startedAt, - UpdatedAt: w.updatedAt, - }, nil -} - -func (w *bufferedWriter) Digest() digest.Digest { - return w.digester.Digest() -} - -func (w *bufferedWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opt ...content.Opt) error { - if w.buffer == nil { - return errors.Errorf("can't commit already committed or closed") - } - if s := int64(w.buffer.Len()); size > 0 && size != s { - return errors.Errorf("unexpected commit size %d, expected %d", s, size) - } - dgst := w.digester.Digest() - if expected != "" && expected != dgst { - return errors.Errorf("unexpected digest: %v != %v", dgst, expected) - } - if w.expected != "" && w.expected != dgst { - return errors.Errorf("unexpected digest: %v != %v", dgst, w.expected) - } - w.main.addValue(dgst, w.buffer.Bytes()) - return w.Close() -} - -func (w *bufferedWriter) Truncate(size int64) error { - if size != 0 { - return errors.New("Truncate: unsupported size") - } - w.offset = 0 - w.digester.Hash().Reset() - w.buffer.Reset() - return nil -} diff --git a/vendor/github.com/moby/buildkit/util/contentutil/buffer_test.go b/vendor/github.com/moby/buildkit/util/contentutil/buffer_test.go deleted file mode 100644 index 1388bbeaa1cc..000000000000 --- a/vendor/github.com/moby/buildkit/util/contentutil/buffer_test.go +++ /dev/null @@ -1,73 +0,0 @@ -package contentutil - -import ( - "bytes" - "context" - "io" - "testing" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" - digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/stretchr/testify/require" -) - -func TestReadWrite(t *testing.T) { - t.Parallel() - ctx := context.TODO() - - b := NewBuffer() - - err := content.WriteBlob(ctx, b, "foo", bytes.NewBuffer([]byte("foo0")), ocispec.Descriptor{Size: -1}) - require.NoError(t, err) - - err = content.WriteBlob(ctx, b, "foo", bytes.NewBuffer([]byte("foo1")), ocispec.Descriptor{Size: 4}) - require.NoError(t, err) - - err = content.WriteBlob(ctx, b, "foo", bytes.NewBuffer([]byte("foo2")), ocispec.Descriptor{Size: 3}) - require.Error(t, err) - - err = content.WriteBlob(ctx, b, "foo", bytes.NewBuffer([]byte("foo3")), ocispec.Descriptor{Size: -1, Digest: digest.FromBytes([]byte("foo4"))}) - require.Error(t, err) - - err = content.WriteBlob(ctx, b, "foo", bytes.NewBuffer([]byte("foo4")), ocispec.Descriptor{Size: -1, Digest: digest.FromBytes([]byte("foo4"))}) - require.NoError(t, err) - - dt, err := content.ReadBlob(ctx, b, ocispec.Descriptor{Digest: digest.FromBytes([]byte("foo1"))}) - require.NoError(t, err) - require.Equal(t, string(dt), "foo1") - - _, err = content.ReadBlob(ctx, b, ocispec.Descriptor{Digest: digest.FromBytes([]byte("foo3"))}) - require.Error(t, err) - require.Equal(t, errors.Cause(err), errdefs.ErrNotFound) -} - -func TestReaderAt(t *testing.T) { - t.Parallel() - ctx := context.TODO() - - b := NewBuffer() - - err := content.WriteBlob(ctx, b, "foo", bytes.NewBuffer([]byte("foobar")), ocispec.Descriptor{Size: -1}) - require.NoError(t, err) - - rdr, err := b.ReaderAt(ctx, ocispec.Descriptor{Digest: digest.FromBytes([]byte("foobar"))}) - require.NoError(t, err) - - require.Equal(t, int64(6), rdr.Size()) - - buf := make([]byte, 3) - - n, err := rdr.ReadAt(buf, 1) - require.NoError(t, err) - require.Equal(t, "oob", string(buf[:n])) - - buf = make([]byte, 7) - - n, err = rdr.ReadAt(buf, 3) - require.Error(t, err) - require.Equal(t, err, io.EOF) - require.Equal(t, "bar", string(buf[:n])) -} diff --git a/vendor/github.com/moby/buildkit/util/contentutil/copy.go b/vendor/github.com/moby/buildkit/util/contentutil/copy.go deleted file mode 100644 index 04d46c4f36dc..000000000000 --- a/vendor/github.com/moby/buildkit/util/contentutil/copy.go +++ /dev/null @@ -1,81 +0,0 @@ -package contentutil - -import ( - "context" - "io" - "sync" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/images" - "github.com/containerd/containerd/remotes" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -func Copy(ctx context.Context, ingester content.Ingester, provider content.Provider, desc ocispec.Descriptor) error { - if _, err := remotes.FetchHandler(ingester, &localFetcher{provider})(ctx, desc); err != nil { - return err - } - return nil -} - -type localFetcher struct { - content.Provider -} - -func (f *localFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) { - r, err := f.Provider.ReaderAt(ctx, desc) - if err != nil { - return nil, err - } - return &rc{ReaderAt: r}, nil -} - -type rc struct { - content.ReaderAt - offset int -} - -func (r *rc) Read(b []byte) (int, error) { - n, err := r.ReadAt(b, int64(r.offset)) - r.offset += n - if n > 0 && err == io.EOF { - err = nil - } - return n, err -} - -func CopyChain(ctx context.Context, ingester content.Ingester, provider content.Provider, desc ocispec.Descriptor) error { - var m sync.Mutex - manifestStack := []ocispec.Descriptor{} - - filterHandler := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - switch desc.MediaType { - case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest, - images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: - m.Lock() - manifestStack = append(manifestStack, desc) - m.Unlock() - return nil, images.ErrStopHandler - default: - return nil, nil - } - }) - handlers := []images.Handler{ - images.ChildrenHandler(provider), - filterHandler, - remotes.FetchHandler(ingester, &localFetcher{provider}), - } - - if err := images.Dispatch(ctx, images.Handlers(handlers...), desc); err != nil { - return errors.WithStack(err) - } - - for i := len(manifestStack) - 1; i >= 0; i-- { - if err := Copy(ctx, ingester, provider, manifestStack[i]); err != nil { - return errors.WithStack(err) - } - } - - return nil -} diff --git a/vendor/github.com/moby/buildkit/util/contentutil/copy_test.go b/vendor/github.com/moby/buildkit/util/contentutil/copy_test.go deleted file mode 100644 index f3e8b0000bc3..000000000000 --- a/vendor/github.com/moby/buildkit/util/contentutil/copy_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package contentutil - -import ( - "bytes" - "context" - "testing" - - "github.com/containerd/containerd/content" - digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/stretchr/testify/require" -) - -func TestCopy(t *testing.T) { - t.Parallel() - ctx := context.TODO() - - b0 := NewBuffer() - b1 := NewBuffer() - - err := content.WriteBlob(ctx, b0, "foo", bytes.NewBuffer([]byte("foobar")), ocispec.Descriptor{Size: -1}) - require.NoError(t, err) - - err = Copy(ctx, b1, b0, ocispec.Descriptor{Digest: digest.FromBytes([]byte("foobar")), Size: -1}) - require.NoError(t, err) - - dt, err := content.ReadBlob(ctx, b1, ocispec.Descriptor{Digest: digest.FromBytes([]byte("foobar"))}) - require.NoError(t, err) - require.Equal(t, string(dt), "foobar") -} diff --git a/vendor/github.com/moby/buildkit/util/contentutil/fetcher.go b/vendor/github.com/moby/buildkit/util/contentutil/fetcher.go deleted file mode 100644 index d55c10121984..000000000000 --- a/vendor/github.com/moby/buildkit/util/contentutil/fetcher.go +++ /dev/null @@ -1,73 +0,0 @@ -package contentutil - -import ( - "context" - "io" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/remotes" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -func FromFetcher(f remotes.Fetcher) content.Provider { - return &fetchedProvider{ - f: f, - } -} - -type fetchedProvider struct { - f remotes.Fetcher -} - -func (p *fetchedProvider) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { - rc, err := p.f.Fetch(ctx, desc) - if err != nil { - return nil, err - } - - return &readerAt{Reader: rc, Closer: rc, size: desc.Size}, nil -} - -type readerAt struct { - io.Reader - io.Closer - size int64 - offset int64 -} - -func (r *readerAt) ReadAt(b []byte, off int64) (int, error) { - if ra, ok := r.Reader.(io.ReaderAt); ok { - return ra.ReadAt(b, off) - } - - if r.offset != off { - if seeker, ok := r.Reader.(io.Seeker); ok { - if _, err := seeker.Seek(off, io.SeekStart); err != nil { - return 0, err - } - r.offset = off - } else { - return 0, errors.Errorf("unsupported offset") - } - } - - var totalN int - for len(b) > 0 { - n, err := r.Reader.Read(b) - if err == io.EOF && n == len(b) { - err = nil - } - r.offset += int64(n) - totalN += n - b = b[n:] - if err != nil { - return totalN, err - } - } - return totalN, nil -} - -func (r *readerAt) Size() int64 { - return r.size -} diff --git a/vendor/github.com/moby/buildkit/util/contentutil/fetcher_test.go b/vendor/github.com/moby/buildkit/util/contentutil/fetcher_test.go deleted file mode 100644 index e2521d792937..000000000000 --- a/vendor/github.com/moby/buildkit/util/contentutil/fetcher_test.go +++ /dev/null @@ -1,105 +0,0 @@ -package contentutil - -import ( - "bytes" - "context" - "io" - "testing" - "time" - - "github.com/containerd/containerd/content" - digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/stretchr/testify/require" -) - -func TestFetcher(t *testing.T) { - t.Parallel() - ctx := context.TODO() - - b0 := NewBuffer() - - err := content.WriteBlob(ctx, b0, "foo", bytes.NewBuffer([]byte("foobar")), ocispec.Descriptor{Size: -1}) - require.NoError(t, err) - - f := &localFetcher{b0} - p := FromFetcher(f) - - b1 := NewBuffer() - err = Copy(ctx, b1, p, ocispec.Descriptor{Digest: digest.FromBytes([]byte("foobar")), Size: -1}) - require.NoError(t, err) - - dt, err := content.ReadBlob(ctx, b1, ocispec.Descriptor{Digest: digest.FromBytes([]byte("foobar"))}) - require.NoError(t, err) - require.Equal(t, string(dt), "foobar") - - rdr, err := p.ReaderAt(ctx, ocispec.Descriptor{Digest: digest.FromBytes([]byte("foobar"))}) - require.NoError(t, err) - - buf := make([]byte, 3) - - n, err := rdr.ReadAt(buf, 1) - require.NoError(t, err) - require.Equal(t, "oob", string(buf[:n])) - - n, err = rdr.ReadAt(buf, 5) - require.Error(t, err) - require.Equal(t, err, io.EOF) - require.Equal(t, "r", string(buf[:n])) -} - -func TestSlowFetch(t *testing.T) { - t.Parallel() - ctx := context.TODO() - - f := &dummySlowFetcher{} - p := FromFetcher(f) - - rdr, err := p.ReaderAt(ctx, ocispec.Descriptor{Digest: digest.FromBytes([]byte("foobar"))}) - require.NoError(t, err) - - buf := make([]byte, 3) - - n, err := rdr.ReadAt(buf, 1) - require.NoError(t, err) - require.Equal(t, "oob", string(buf[:n])) - - n, err = rdr.ReadAt(buf, 5) - require.Error(t, err) - require.Equal(t, err, io.EOF) - require.Equal(t, "r", string(buf[:n])) -} - -type dummySlowFetcher struct{} - -func (f *dummySlowFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) { - return newSlowBuffer([]byte("foobar")), nil -} - -func newSlowBuffer(dt []byte) io.ReadCloser { - return &slowBuffer{dt: dt} -} - -type slowBuffer struct { - dt []byte - off int -} - -func (sb *slowBuffer) Seek(offset int64, _ int) (int64, error) { - sb.off = int(offset) - return offset, nil -} - -func (sb *slowBuffer) Read(b []byte) (int, error) { - time.Sleep(5 * time.Millisecond) - if sb.off >= len(sb.dt) { - return 0, io.EOF - } - b[0] = sb.dt[sb.off] - sb.off++ - return 1, nil -} - -func (sb *slowBuffer) Close() error { - return nil -} diff --git a/vendor/github.com/moby/buildkit/util/contentutil/multiprovider.go b/vendor/github.com/moby/buildkit/util/contentutil/multiprovider.go deleted file mode 100644 index 3dafed7dc2ad..000000000000 --- a/vendor/github.com/moby/buildkit/util/contentutil/multiprovider.go +++ /dev/null @@ -1,48 +0,0 @@ -package contentutil - -import ( - "context" - "sync" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" - digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -// NewMultiProvider creates a new mutable provider with a base provider -func NewMultiProvider(base content.Provider) *MultiProvider { - return &MultiProvider{ - base: base, - sub: map[digest.Digest]content.Provider{}, - } -} - -// MultiProvider is a provider backed by a mutable map of providers -type MultiProvider struct { - mu sync.RWMutex - base content.Provider - sub map[digest.Digest]content.Provider -} - -// ReaderAt returns a content.ReaderAt -func (mp *MultiProvider) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { - mp.mu.RLock() - if p, ok := mp.sub[desc.Digest]; ok { - mp.mu.RUnlock() - return p.ReaderAt(ctx, desc) - } - mp.mu.RUnlock() - if mp.base == nil { - return nil, errors.Wrapf(errdefs.ErrNotFound, "content %v", desc.Digest) - } - return mp.base.ReaderAt(ctx, desc) -} - -// Add adds a new child provider for a specific digest -func (mp *MultiProvider) Add(dgst digest.Digest, p content.Provider) { - mp.mu.Lock() - defer mp.mu.Unlock() - mp.sub[dgst] = p -} diff --git a/vendor/github.com/moby/buildkit/util/contentutil/multiprovider_test.go b/vendor/github.com/moby/buildkit/util/contentutil/multiprovider_test.go deleted file mode 100644 index d80e5fd577d9..000000000000 --- a/vendor/github.com/moby/buildkit/util/contentutil/multiprovider_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package contentutil - -import ( - "bytes" - "context" - "testing" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" - digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/stretchr/testify/require" -) - -func TestMultiProvider(t *testing.T) { - t.Parallel() - ctx := context.TODO() - - b0 := NewBuffer() - b1 := NewBuffer() - - err := content.WriteBlob(ctx, b0, "foo", bytes.NewBuffer([]byte("foo0")), ocispec.Descriptor{Size: -1}) - require.NoError(t, err) - - err = content.WriteBlob(ctx, b1, "foo", bytes.NewBuffer([]byte("foo1")), ocispec.Descriptor{Size: -1}) - require.NoError(t, err) - - mp := NewMultiProvider(nil) - mp.Add(digest.FromBytes([]byte("foo0")), b0) - mp.Add(digest.FromBytes([]byte("foo1")), b1) - - dt, err := content.ReadBlob(ctx, mp, ocispec.Descriptor{Digest: digest.FromBytes([]byte("foo0"))}) - require.NoError(t, err) - require.Equal(t, string(dt), "foo0") - - dt, err = content.ReadBlob(ctx, mp, ocispec.Descriptor{Digest: digest.FromBytes([]byte("foo1"))}) - require.NoError(t, err) - require.Equal(t, string(dt), "foo1") - - _, err = content.ReadBlob(ctx, mp, ocispec.Descriptor{Digest: digest.FromBytes([]byte("foo2"))}) - require.Error(t, err) - require.Equal(t, errors.Cause(err), errdefs.ErrNotFound) -} diff --git a/vendor/github.com/moby/buildkit/util/contentutil/pusher.go b/vendor/github.com/moby/buildkit/util/contentutil/pusher.go deleted file mode 100644 index ab88128aa2ad..000000000000 --- a/vendor/github.com/moby/buildkit/util/contentutil/pusher.go +++ /dev/null @@ -1,58 +0,0 @@ -package contentutil - -import ( - "context" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/remotes" - "github.com/pkg/errors" -) - -func FromPusher(p remotes.Pusher) content.Ingester { - return &pushingIngester{ - p: p, - } -} - -type pushingIngester struct { - p remotes.Pusher -} - -// Writer implements content.Ingester. desc.MediaType must be set for manifest blobs. -func (i *pushingIngester) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { - var wOpts content.WriterOpts - for _, opt := range opts { - if err := opt(&wOpts); err != nil { - return nil, err - } - } - if wOpts.Ref == "" { - return nil, errors.Wrap(errdefs.ErrInvalidArgument, "ref must not be empty") - } - // pusher requires desc.MediaType to determine the PUT URL, especially for manifest blobs. - contentWriter, err := i.p.Push(ctx, wOpts.Desc) - if err != nil { - return nil, err - } - return &writer{ - Writer: contentWriter, - contentWriterRef: wOpts.Ref, - }, nil -} - -type writer struct { - content.Writer // returned from pusher.Push - contentWriterRef string // ref passed for Writer() -} - -func (w *writer) Status() (content.Status, error) { - st, err := w.Writer.Status() - if err != nil { - return st, err - } - if w.contentWriterRef != "" { - st.Ref = w.contentWriterRef - } - return st, nil -} diff --git a/vendor/github.com/moby/buildkit/util/contentutil/refs.go b/vendor/github.com/moby/buildkit/util/contentutil/refs.go deleted file mode 100644 index e62d7987bd3f..000000000000 --- a/vendor/github.com/moby/buildkit/util/contentutil/refs.go +++ /dev/null @@ -1,98 +0,0 @@ -package contentutil - -import ( - "context" - "net/http" - "sync" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/remotes" - "github.com/containerd/containerd/remotes/docker" - "github.com/docker/docker/pkg/locker" - digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -func ProviderFromRef(ref string) (ocispec.Descriptor, content.Provider, error) { - remote := docker.NewResolver(docker.ResolverOptions{ - Client: http.DefaultClient, - }) - - name, desc, err := remote.Resolve(context.TODO(), ref) - if err != nil { - return ocispec.Descriptor{}, nil, err - } - - fetcher, err := remote.Fetcher(context.TODO(), name) - if err != nil { - return ocispec.Descriptor{}, nil, err - } - return desc, FromFetcher(fetcher), nil -} - -func IngesterFromRef(ref string) (content.Ingester, error) { - remote := docker.NewResolver(docker.ResolverOptions{ - Client: http.DefaultClient, - }) - - pusher, err := remote.Pusher(context.TODO(), ref) - if err != nil { - return nil, err - } - - return &ingester{ - locker: locker.New(), - pusher: pusher, - }, nil -} - -type ingester struct { - locker *locker.Locker - pusher remotes.Pusher -} - -func (w *ingester) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { - var wo content.WriterOpts - for _, o := range opts { - if err := o(&wo); err != nil { - return nil, err - } - } - if wo.Ref == "" { - return nil, errors.Wrap(errdefs.ErrInvalidArgument, "ref must not be empty") - } - w.locker.Lock(wo.Ref) - var once sync.Once - unlock := func() { - once.Do(func() { - w.locker.Unlock(wo.Ref) - }) - } - writer, err := w.pusher.Push(ctx, wo.Desc) - if err != nil { - unlock() - return nil, err - } - return &lockedWriter{unlock: unlock, Writer: writer}, nil -} - -type lockedWriter struct { - unlock func() - content.Writer -} - -func (w *lockedWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { - err := w.Writer.Commit(ctx, size, expected, opts...) - if err == nil { - w.unlock() - } - return err -} - -func (w *lockedWriter) Close() error { - err := w.Writer.Close() - w.unlock() - return err -} diff --git a/vendor/github.com/moby/buildkit/util/dockerexporter/dockerexporter.go b/vendor/github.com/moby/buildkit/util/dockerexporter/dockerexporter.go deleted file mode 100644 index 7599bea25173..000000000000 --- a/vendor/github.com/moby/buildkit/util/dockerexporter/dockerexporter.go +++ /dev/null @@ -1,249 +0,0 @@ -package dockerexporter - -import ( - "archive/tar" - "context" - "encoding/json" - "io" - "path" - "sort" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/images" - ocispecs "github.com/opencontainers/image-spec/specs-go" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -// DockerExporter implements containerd/images.Exporter to -// Docker Combined Image JSON + Filesystem Changeset Format v1.1 -// https://github.com/moby/moby/blob/master/image/spec/v1.1.md#combined-image-json--filesystem-changeset-format -// The output tarball is also compatible with OCI Image Format Specification -type DockerExporter struct { - Name string -} - -var _ images.Exporter = &DockerExporter{} - -// Export exports tarball into writer. -func (de *DockerExporter) Export(ctx context.Context, store content.Provider, desc ocispec.Descriptor, writer io.Writer) error { - tw := tar.NewWriter(writer) - defer tw.Close() - - dockerManifest, err := dockerManifestRecord(ctx, store, desc, de.Name) - if err != nil { - return err - } - - records := []tarRecord{ - ociLayoutFile(""), - ociIndexRecord(desc), - *dockerManifest, - } - - algorithms := map[string]struct{}{} - exportHandler := func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - records = append(records, blobRecord(store, desc)) - algorithms[desc.Digest.Algorithm().String()] = struct{}{} - return nil, nil - } - - // Get all the children for a descriptor - childrenHandler := images.ChildrenHandler(store) - - handlers := images.Handlers( - childrenHandler, - images.HandlerFunc(exportHandler), - ) - - // Walk sequentially since the number of fetchs is likely one and doing in - // parallel requires locking the export handler - if err := images.Walk(ctx, handlers, desc); err != nil { - return err - } - - if len(algorithms) > 0 { - records = append(records, directoryRecord("blobs/", 0755)) - for alg := range algorithms { - records = append(records, directoryRecord("blobs/"+alg+"/", 0755)) - } - } - - return writeTar(ctx, tw, records) -} - -type tarRecord struct { - Header *tar.Header - CopyTo func(context.Context, io.Writer) (int64, error) -} - -func dockerManifestRecord(ctx context.Context, provider content.Provider, desc ocispec.Descriptor, name string) (*tarRecord, error) { - switch desc.MediaType { - case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: - p, err := content.ReadBlob(ctx, provider, desc) - if err != nil { - return nil, err - } - var manifest ocispec.Manifest - if err := json.Unmarshal(p, &manifest); err != nil { - return nil, err - } - type mfstItem struct { - Config string - RepoTags []string - Layers []string - } - item := mfstItem{ - Config: path.Join("blobs", manifest.Config.Digest.Algorithm().String(), manifest.Config.Digest.Hex()), - } - - for _, l := range manifest.Layers { - item.Layers = append(item.Layers, path.Join("blobs", l.Digest.Algorithm().String(), l.Digest.Hex())) - } - - if name != "" { - item.RepoTags = append(item.RepoTags, name) - } - - dt, err := json.Marshal([]mfstItem{item}) - if err != nil { - return nil, err - } - - return &tarRecord{ - Header: &tar.Header{ - Name: "manifest.json", - Mode: 0444, - Size: int64(len(dt)), - Typeflag: tar.TypeReg, - }, - CopyTo: func(ctx context.Context, w io.Writer) (int64, error) { - n, err := w.Write(dt) - return int64(n), err - }, - }, nil - default: - return nil, errors.Errorf("%v not supported for Docker exporter", desc.MediaType) - } - -} - -func blobRecord(cs content.Provider, desc ocispec.Descriptor) tarRecord { - path := "blobs/" + desc.Digest.Algorithm().String() + "/" + desc.Digest.Hex() - return tarRecord{ - Header: &tar.Header{ - Name: path, - Mode: 0444, - Size: desc.Size, - Typeflag: tar.TypeReg, - }, - CopyTo: func(ctx context.Context, w io.Writer) (int64, error) { - r, err := cs.ReaderAt(ctx, desc) - if err != nil { - return 0, err - } - defer r.Close() - - // Verify digest - dgstr := desc.Digest.Algorithm().Digester() - - n, err := io.Copy(io.MultiWriter(w, dgstr.Hash()), content.NewReader(r)) - if err != nil { - return 0, err - } - if dgstr.Digest() != desc.Digest { - return 0, errors.Errorf("unexpected digest %s copied", dgstr.Digest()) - } - return n, nil - }, - } -} - -func directoryRecord(name string, mode int64) tarRecord { - return tarRecord{ - Header: &tar.Header{ - Name: name, - Mode: mode, - Typeflag: tar.TypeDir, - }, - } -} - -func ociLayoutFile(version string) tarRecord { - if version == "" { - version = ocispec.ImageLayoutVersion - } - layout := ocispec.ImageLayout{ - Version: version, - } - - b, err := json.Marshal(layout) - if err != nil { - panic(err) - } - - return tarRecord{ - Header: &tar.Header{ - Name: ocispec.ImageLayoutFile, - Mode: 0444, - Size: int64(len(b)), - Typeflag: tar.TypeReg, - }, - CopyTo: func(ctx context.Context, w io.Writer) (int64, error) { - n, err := w.Write(b) - return int64(n), err - }, - } - -} - -func ociIndexRecord(manifests ...ocispec.Descriptor) tarRecord { - index := ocispec.Index{ - Versioned: ocispecs.Versioned{ - SchemaVersion: 2, - }, - Manifests: manifests, - } - - b, err := json.Marshal(index) - if err != nil { - panic(err) - } - - return tarRecord{ - Header: &tar.Header{ - Name: "index.json", - Mode: 0644, - Size: int64(len(b)), - Typeflag: tar.TypeReg, - }, - CopyTo: func(ctx context.Context, w io.Writer) (int64, error) { - n, err := w.Write(b) - return int64(n), err - }, - } -} - -func writeTar(ctx context.Context, tw *tar.Writer, records []tarRecord) error { - sort.Slice(records, func(i, j int) bool { - return records[i].Header.Name < records[j].Header.Name - }) - - for _, record := range records { - if err := tw.WriteHeader(record.Header); err != nil { - return err - } - if record.CopyTo != nil { - n, err := record.CopyTo(ctx, tw) - if err != nil { - return err - } - if n != record.Header.Size { - return errors.Errorf("unexpected copy size for %s", record.Header.Name) - } - } else if record.Header.Size > 0 { - return errors.Errorf("no content to write to record with non-zero size for %s", record.Header.Name) - } - } - return nil -} diff --git a/vendor/github.com/moby/buildkit/util/entitlements/entitlements.go b/vendor/github.com/moby/buildkit/util/entitlements/entitlements.go deleted file mode 100644 index 4bd7f2a80969..000000000000 --- a/vendor/github.com/moby/buildkit/util/entitlements/entitlements.go +++ /dev/null @@ -1,70 +0,0 @@ -package entitlements - -import "github.com/pkg/errors" - -type Entitlement string - -const ( - EntitlementSecurityConfined Entitlement = "security.confined" - EntitlementSecurityUnconfined Entitlement = "security.unconfined" // unimplemented - EntitlementNetworkHost Entitlement = "network.host" - EntitlementNetworkNone Entitlement = "network.none" -) - -var all = map[Entitlement]struct{}{ - EntitlementSecurityConfined: {}, - EntitlementSecurityUnconfined: {}, - EntitlementNetworkHost: {}, - EntitlementNetworkNone: {}, -} - -var defaults = map[Entitlement]struct{}{ - EntitlementSecurityConfined: {}, - EntitlementNetworkNone: {}, -} - -func Parse(s string) (Entitlement, error) { - _, ok := all[Entitlement(s)] - if !ok { - return "", errors.Errorf("unknown entitlement %s", s) - } - return Entitlement(s), nil -} - -func WhiteList(allowed, supported []Entitlement) (Set, error) { - m := map[Entitlement]struct{}{} - - var supm Set - if supported != nil { - var err error - supm, err = WhiteList(supported, nil) - if err != nil { // should not happen - return nil, err - } - } - - for _, e := range allowed { - e, err := Parse(string(e)) - if err != nil { - return nil, err - } - if supported != nil { - if !supm.Allowed(e) { - return nil, errors.Errorf("entitlement %s is not allowed", e) - } - } - m[e] = struct{}{} - } - - for e := range defaults { - m[e] = struct{}{} - } - return Set(m), nil -} - -type Set map[Entitlement]struct{} - -func (s Set) Allowed(e Entitlement) bool { - _, ok := s[e] - return ok -} diff --git a/vendor/github.com/moby/buildkit/util/flightcontrol/flightcontrol.go b/vendor/github.com/moby/buildkit/util/flightcontrol/flightcontrol.go deleted file mode 100644 index 89866b76bb61..000000000000 --- a/vendor/github.com/moby/buildkit/util/flightcontrol/flightcontrol.go +++ /dev/null @@ -1,335 +0,0 @@ -package flightcontrol - -import ( - "context" - "io" - "runtime" - "sort" - "sync" - "time" - - "github.com/moby/buildkit/util/progress" - "github.com/pkg/errors" -) - -// flightcontrol is like singleflight but with support for cancellation and -// nested progress reporting - -var ( - errRetry = errors.Errorf("retry") - errRetryTimeout = errors.Errorf("exceeded retry timeout") -) - -type contextKeyT string - -var contextKey = contextKeyT("buildkit/util/flightcontrol.progress") - -// Group is a flightcontrol syncronization group -type Group struct { - mu sync.Mutex // protects m - m map[string]*call // lazily initialized -} - -// Do executes a context function syncronized by the key -func (g *Group) Do(ctx context.Context, key string, fn func(ctx context.Context) (interface{}, error)) (v interface{}, err error) { - var backoff time.Duration - for { - v, err = g.do(ctx, key, fn) - if err == nil || errors.Cause(err) != errRetry { - return v, err - } - // backoff logic - if backoff >= 3*time.Second { - err = errors.Wrapf(errRetryTimeout, "flightcontrol") - return v, err - } - runtime.Gosched() - if backoff > 0 { - time.Sleep(backoff) - backoff *= 2 - } else { - backoff = time.Millisecond - } - } -} - -func (g *Group) do(ctx context.Context, key string, fn func(ctx context.Context) (interface{}, error)) (interface{}, error) { - g.mu.Lock() - if g.m == nil { - g.m = make(map[string]*call) - } - - if c, ok := g.m[key]; ok { // register 2nd waiter - g.mu.Unlock() - return c.wait(ctx) - } - - c := newCall(fn) - g.m[key] = c - go func() { - // cleanup after a caller has returned - <-c.ready - g.mu.Lock() - delete(g.m, key) - g.mu.Unlock() - }() - g.mu.Unlock() - return c.wait(ctx) -} - -type call struct { - mu sync.Mutex - result interface{} - err error - ready chan struct{} - - ctx *sharedContext - ctxs []context.Context - fn func(ctx context.Context) (interface{}, error) - once sync.Once - - closeProgressWriter func() - progressState *progressState - progressCtx context.Context -} - -func newCall(fn func(ctx context.Context) (interface{}, error)) *call { - c := &call{ - fn: fn, - ready: make(chan struct{}), - progressState: newProgressState(), - } - ctx := newContext(c) // newSharedContext - pr, pctx, closeProgressWriter := progress.NewContext(context.Background()) - - c.progressCtx = pctx - c.ctx = ctx - c.closeProgressWriter = closeProgressWriter - - go c.progressState.run(pr) // TODO: remove this, wrap writer instead - - return c -} - -func (c *call) run() { - defer c.closeProgressWriter() - v, err := c.fn(c.ctx) - c.mu.Lock() - c.result = v - c.err = err - c.mu.Unlock() - close(c.ready) -} - -func (c *call) wait(ctx context.Context) (v interface{}, err error) { - c.mu.Lock() - // detect case where caller has just returned, let it clean up before - select { - case <-c.ready: // could return if no error - c.mu.Unlock() - return nil, errRetry - default: - } - - pw, ok, ctx := progress.FromContext(ctx) - if ok { - c.progressState.add(pw) - } - c.ctxs = append(c.ctxs, ctx) - - c.mu.Unlock() - - go c.once.Do(c.run) - - select { - case <-ctx.Done(): - select { - case <-c.ctx.Done(): - // if this cancelled the last context, then wait for function to shut down - // and don't accept any more callers - <-c.ready - return c.result, c.err - default: - if ok { - c.progressState.close(pw) - } - return nil, ctx.Err() - } - case <-c.ready: - return c.result, c.err // shared not implemented yet - } -} - -func (c *call) Deadline() (deadline time.Time, ok bool) { - c.mu.Lock() - defer c.mu.Unlock() - for _, ctx := range c.ctxs { - select { - case <-ctx.Done(): - default: - dl, ok := ctx.Deadline() - if ok { - return dl, ok - } - } - } - return time.Time{}, false -} - -func (c *call) Done() <-chan struct{} { - c.mu.Lock() - c.ctx.signal() - c.mu.Unlock() - return c.ctx.done -} - -func (c *call) Err() error { - select { - case <-c.ctx.Done(): - return c.ctx.err - default: - return nil - } -} - -func (c *call) Value(key interface{}) interface{} { - if key == contextKey { - return c.progressState - } - c.mu.Lock() - defer c.mu.Unlock() - - ctx := c.progressCtx - select { - case <-ctx.Done(): - default: - if v := ctx.Value(key); v != nil { - return v - } - } - - if len(c.ctxs) > 0 { - ctx = c.ctxs[0] - select { - case <-ctx.Done(): - default: - if v := ctx.Value(key); v != nil { - return v - } - } - } - - return nil -} - -type sharedContext struct { - *call - done chan struct{} - err error -} - -func newContext(c *call) *sharedContext { - return &sharedContext{call: c, done: make(chan struct{})} -} - -// call with lock -func (c *sharedContext) signal() { - select { - case <-c.done: - default: - var err error - for _, ctx := range c.ctxs { - select { - case <-ctx.Done(): - err = ctx.Err() - default: - return - } - } - c.err = err - close(c.done) - } -} - -type rawProgressWriter interface { - WriteRawProgress(*progress.Progress) error - Close() error -} - -type progressState struct { - mu sync.Mutex - items map[string]*progress.Progress - writers []rawProgressWriter - done bool -} - -func newProgressState() *progressState { - return &progressState{ - items: make(map[string]*progress.Progress), - } -} - -func (ps *progressState) run(pr progress.Reader) { - for { - p, err := pr.Read(context.TODO()) - if err != nil { - if err == io.EOF { - ps.mu.Lock() - ps.done = true - ps.mu.Unlock() - for _, w := range ps.writers { - w.Close() - } - } - return - } - ps.mu.Lock() - for _, p := range p { - for _, w := range ps.writers { - w.WriteRawProgress(p) - } - ps.items[p.ID] = p - } - ps.mu.Unlock() - } -} - -func (ps *progressState) add(pw progress.Writer) { - rw, ok := pw.(rawProgressWriter) - if !ok { - return - } - ps.mu.Lock() - plist := make([]*progress.Progress, 0, len(ps.items)) - for _, p := range ps.items { - plist = append(plist, p) - } - sort.Slice(plist, func(i, j int) bool { - return plist[i].Timestamp.Before(plist[j].Timestamp) - }) - for _, p := range plist { - rw.WriteRawProgress(p) - } - if ps.done { - rw.Close() - } else { - ps.writers = append(ps.writers, rw) - } - ps.mu.Unlock() -} - -func (ps *progressState) close(pw progress.Writer) { - rw, ok := pw.(rawProgressWriter) - if !ok { - return - } - ps.mu.Lock() - for i, w := range ps.writers { - if w == rw { - w.Close() - ps.writers = append(ps.writers[:i], ps.writers[i+1:]...) - break - } - } - ps.mu.Unlock() -} diff --git a/vendor/github.com/moby/buildkit/util/flightcontrol/flightcontrol_test.go b/vendor/github.com/moby/buildkit/util/flightcontrol/flightcontrol_test.go deleted file mode 100644 index 93e8c48166a8..000000000000 --- a/vendor/github.com/moby/buildkit/util/flightcontrol/flightcontrol_test.go +++ /dev/null @@ -1,161 +0,0 @@ -package flightcontrol - -import ( - "context" - "sync/atomic" - "testing" - "time" - - "github.com/pkg/errors" - "github.com/stretchr/testify/assert" - "golang.org/x/sync/errgroup" -) - -func TestNoCancel(t *testing.T) { - t.Parallel() - g := &Group{} - eg, ctx := errgroup.WithContext(context.Background()) - var r1, r2 string - var counter int64 - f := testFunc(100*time.Millisecond, "bar", &counter) - eg.Go(func() error { - ret1, err := g.Do(ctx, "foo", f) - if err != nil { - return err - } - r1 = ret1.(string) - return nil - }) - eg.Go(func() error { - ret2, err := g.Do(ctx, "foo", f) - if err != nil { - return err - } - r2 = ret2.(string) - return nil - }) - err := eg.Wait() - assert.NoError(t, err) - assert.Equal(t, "bar", r1) - assert.Equal(t, "bar", r2) - assert.Equal(t, counter, int64(1)) -} - -func TestCancelOne(t *testing.T) { - t.Parallel() - g := &Group{} - eg, ctx := errgroup.WithContext(context.Background()) - var r1, r2 string - var counter int64 - f := testFunc(100*time.Millisecond, "bar", &counter) - ctx2, cancel := context.WithCancel(ctx) - eg.Go(func() error { - ret1, err := g.Do(ctx2, "foo", f) - assert.Error(t, err) - assert.Equal(t, errors.Cause(err), context.Canceled) - if err == nil { - r1 = ret1.(string) - } - return nil - }) - eg.Go(func() error { - ret2, err := g.Do(ctx, "foo", f) - if err != nil { - return err - } - r2 = ret2.(string) - return nil - }) - eg.Go(func() error { - select { - case <-ctx.Done(): - return ctx.Err() - case <-time.After(30 * time.Millisecond): - cancel() - return nil - } - }) - err := eg.Wait() - assert.NoError(t, err) - assert.Equal(t, "", r1) - assert.Equal(t, "bar", r2) - assert.Equal(t, counter, int64(1)) -} - -func TestCancelBoth(t *testing.T) { - t.Parallel() - g := &Group{} - eg, ctx := errgroup.WithContext(context.Background()) - var r1, r2 string - var counter int64 - f := testFunc(200*time.Millisecond, "bar", &counter) - ctx2, cancel2 := context.WithCancel(ctx) - ctx3, cancel3 := context.WithCancel(ctx) - eg.Go(func() error { - ret1, err := g.Do(ctx2, "foo", f) - assert.Error(t, err) - assert.Equal(t, errors.Cause(err), context.Canceled) - if err == nil { - r1 = ret1.(string) - } - return nil - }) - eg.Go(func() error { - ret2, err := g.Do(ctx3, "foo", f) - assert.Error(t, err) - assert.Equal(t, errors.Cause(err), context.Canceled) - if err == nil { - r2 = ret2.(string) - } - return nil - }) - eg.Go(func() error { - select { - case <-ctx.Done(): - return ctx.Err() - case <-time.After(20 * time.Millisecond): - cancel2() - return nil - } - }) - eg.Go(func() error { - select { - case <-ctx.Done(): - return ctx.Err() - case <-time.After(50 * time.Millisecond): - cancel3() - return nil - } - }) - err := eg.Wait() - assert.NoError(t, err) - assert.Equal(t, "", r1) - assert.Equal(t, "", r2) - assert.Equal(t, counter, int64(1)) - - ret1, err := g.Do(context.TODO(), "foo", f) - assert.NoError(t, err) - assert.Equal(t, ret1, "bar") - - f2 := testFunc(100*time.Millisecond, "baz", &counter) - ret1, err = g.Do(context.TODO(), "foo", f2) - assert.NoError(t, err) - assert.Equal(t, ret1, "baz") - ret1, err = g.Do(context.TODO(), "abc", f) - assert.NoError(t, err) - assert.Equal(t, ret1, "bar") - - assert.Equal(t, counter, int64(4)) -} - -func testFunc(wait time.Duration, ret string, counter *int64) func(ctx context.Context) (interface{}, error) { - return func(ctx context.Context) (interface{}, error) { - atomic.AddInt64(counter, 1) - select { - case <-ctx.Done(): - return nil, ctx.Err() - case <-time.After(wait): - return ret, nil - } - } -} diff --git a/vendor/github.com/moby/buildkit/util/imageutil/config.go b/vendor/github.com/moby/buildkit/util/imageutil/config.go deleted file mode 100644 index a9890e730eb4..000000000000 --- a/vendor/github.com/moby/buildkit/util/imageutil/config.go +++ /dev/null @@ -1,159 +0,0 @@ -package imageutil - -import ( - "context" - "encoding/json" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/images" - "github.com/containerd/containerd/platforms" - "github.com/containerd/containerd/reference" - "github.com/containerd/containerd/remotes" - digest "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -type ContentCache interface { - content.Ingester - content.Provider -} - -func Config(ctx context.Context, str string, resolver remotes.Resolver, cache ContentCache, p *specs.Platform) (digest.Digest, []byte, error) { - // TODO: fix buildkit to take interface instead of struct - var platform platforms.MatchComparer - if p != nil { - platform = platforms.Only(*p) - } else { - platform = platforms.Default() - } - ref, err := reference.Parse(str) - if err != nil { - return "", nil, errors.WithStack(err) - } - - desc := specs.Descriptor{ - Digest: ref.Digest(), - } - if desc.Digest != "" { - ra, err := cache.ReaderAt(ctx, desc) - if err == nil { - desc.Size = ra.Size() - mt, err := DetectManifestMediaType(ra) - if err == nil { - desc.MediaType = mt - } - } - } - // use resolver if desc is incomplete - if desc.MediaType == "" { - _, desc, err = resolver.Resolve(ctx, ref.String()) - if err != nil { - return "", nil, err - } - } - - fetcher, err := resolver.Fetcher(ctx, ref.String()) - if err != nil { - return "", nil, err - } - - if desc.MediaType == images.MediaTypeDockerSchema1Manifest { - return readSchema1Config(ctx, ref.String(), desc, fetcher, cache) - } - - handlers := []images.Handler{ - remotes.FetchHandler(cache, fetcher), - childrenConfigHandler(cache, platform), - } - if err := images.Dispatch(ctx, images.Handlers(handlers...), desc); err != nil { - return "", nil, err - } - config, err := images.Config(ctx, cache, desc, platform) - if err != nil { - return "", nil, err - } - - dt, err := content.ReadBlob(ctx, cache, config) - if err != nil { - return "", nil, err - } - - return desc.Digest, dt, nil -} - -func childrenConfigHandler(provider content.Provider, platform platforms.MatchComparer) images.HandlerFunc { - return func(ctx context.Context, desc specs.Descriptor) ([]specs.Descriptor, error) { - var descs []specs.Descriptor - switch desc.MediaType { - case images.MediaTypeDockerSchema2Manifest, specs.MediaTypeImageManifest: - p, err := content.ReadBlob(ctx, provider, desc) - if err != nil { - return nil, err - } - - // TODO(stevvooe): We just assume oci manifest, for now. There may be - // subtle differences from the docker version. - var manifest specs.Manifest - if err := json.Unmarshal(p, &manifest); err != nil { - return nil, err - } - - descs = append(descs, manifest.Config) - case images.MediaTypeDockerSchema2ManifestList, specs.MediaTypeImageIndex: - p, err := content.ReadBlob(ctx, provider, desc) - if err != nil { - return nil, err - } - - var index specs.Index - if err := json.Unmarshal(p, &index); err != nil { - return nil, err - } - - if platform != nil { - for _, d := range index.Manifests { - if d.Platform == nil || platform.Match(*d.Platform) { - descs = append(descs, d) - } - } - } else { - descs = append(descs, index.Manifests...) - } - case images.MediaTypeDockerSchema2Config, specs.MediaTypeImageConfig: - // childless data types. - return nil, nil - default: - return nil, errors.Errorf("encountered unknown type %v; children may not be fetched", desc.MediaType) - } - - return descs, nil - } -} - -// specs.MediaTypeImageManifest, // TODO: detect schema1/manifest-list -func DetectManifestMediaType(ra content.ReaderAt) (string, error) { - // TODO: schema1 - - p := make([]byte, ra.Size()) - if _, err := ra.ReadAt(p, 0); err != nil { - return "", err - } - - var mfst struct { - MediaType string `json:"mediaType"` - Config json.RawMessage `json:"config"` - } - - if err := json.Unmarshal(p, &mfst); err != nil { - return "", err - } - - if mfst.MediaType != "" { - return mfst.MediaType, nil - } - if mfst.Config != nil { - return images.MediaTypeDockerSchema2Manifest, nil - } - return images.MediaTypeDockerSchema2ManifestList, nil -} diff --git a/vendor/github.com/moby/buildkit/util/imageutil/schema1.go b/vendor/github.com/moby/buildkit/util/imageutil/schema1.go deleted file mode 100644 index 591676fffdba..000000000000 --- a/vendor/github.com/moby/buildkit/util/imageutil/schema1.go +++ /dev/null @@ -1,87 +0,0 @@ -package imageutil - -import ( - "context" - "encoding/json" - "io/ioutil" - "strings" - "time" - - "github.com/containerd/containerd/remotes" - digest "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -func readSchema1Config(ctx context.Context, ref string, desc specs.Descriptor, fetcher remotes.Fetcher, cache ContentCache) (digest.Digest, []byte, error) { - rc, err := fetcher.Fetch(ctx, desc) - if err != nil { - return "", nil, err - } - defer rc.Close() - dt, err := ioutil.ReadAll(rc) - if err != nil { - return "", nil, errors.Wrap(err, "failed to fetch schema1 manifest") - } - dt, err = convertSchema1ConfigMeta(dt) - if err != nil { - return "", nil, err - } - return desc.Digest, dt, nil -} - -func convertSchema1ConfigMeta(in []byte) ([]byte, error) { - type history struct { - V1Compatibility string `json:"v1Compatibility"` - } - var m struct { - History []history `json:"history"` - } - if err := json.Unmarshal(in, &m); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal schema1 manifest") - } - if len(m.History) == 0 { - return nil, errors.Errorf("invalid schema1 manifest") - } - - var img specs.Image - if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), &img); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal image from schema 1 history") - } - - img.RootFS = specs.RootFS{ - Type: "layers", // filled in by exporter - } - img.History = make([]specs.History, len(m.History)) - - for i := range m.History { - var h v1History - if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), &h); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal history") - } - img.History[len(m.History)-i-1] = specs.History{ - Author: h.Author, - Comment: h.Comment, - Created: &h.Created, - CreatedBy: strings.Join(h.ContainerConfig.Cmd, " "), - EmptyLayer: (h.ThrowAway != nil && *h.ThrowAway) || (h.Size != nil && *h.Size == 0), - } - } - - dt, err := json.MarshalIndent(img, "", " ") - if err != nil { - return nil, errors.Wrap(err, "failed to marshal schema1 config") - } - return dt, nil -} - -type v1History struct { - Author string `json:"author,omitempty"` - Created time.Time `json:"created"` - Comment string `json:"comment,omitempty"` - ThrowAway *bool `json:"throwaway,omitempty"` - Size *int `json:"Size,omitempty"` // used before ThrowAway field - ContainerConfig struct { - Cmd []string `json:"Cmd,omitempty"` - } `json:"container_config,omitempty"` -} diff --git a/vendor/github.com/moby/buildkit/util/network/host.go b/vendor/github.com/moby/buildkit/util/network/host.go deleted file mode 100644 index dc58b1ce72e4..000000000000 --- a/vendor/github.com/moby/buildkit/util/network/host.go +++ /dev/null @@ -1,28 +0,0 @@ -package network - -import ( - "github.com/containerd/containerd/oci" - specs "github.com/opencontainers/runtime-spec/specs-go" -) - -func NewHostProvider() Provider { - return &host{} -} - -type host struct { -} - -func (h *host) New() (Namespace, error) { - return &hostNS{}, nil -} - -type hostNS struct { -} - -func (h *hostNS) Set(s *specs.Spec) { - oci.WithHostNamespace(specs.NetworkNamespace)(nil, nil, nil, s) -} - -func (h *hostNS) Close() error { - return nil -} diff --git a/vendor/github.com/moby/buildkit/util/network/network.go b/vendor/github.com/moby/buildkit/util/network/network.go deleted file mode 100644 index 055a52da8bce..000000000000 --- a/vendor/github.com/moby/buildkit/util/network/network.go +++ /dev/null @@ -1,37 +0,0 @@ -package network - -import ( - "io" - - "github.com/moby/buildkit/solver/pb" - specs "github.com/opencontainers/runtime-spec/specs-go" -) - -// Default returns the default network provider set -func Default() map[pb.NetMode]Provider { - return map[pb.NetMode]Provider{ - // FIXME: still uses host if no provider configured - pb.NetMode_UNSET: NewHostProvider(), - pb.NetMode_HOST: NewHostProvider(), - pb.NetMode_NONE: NewNoneProvider(), - } -} - -// Provider interface for Network -type Provider interface { - New() (Namespace, error) -} - -// Namespace of network for workers -type Namespace interface { - io.Closer - // Set the namespace on the spec - Set(*specs.Spec) -} - -// NetworkOpts hold network options -type NetworkOpts struct { - Type string - CNIConfigPath string - CNIPluginPath string -} diff --git a/vendor/github.com/moby/buildkit/util/network/none.go b/vendor/github.com/moby/buildkit/util/network/none.go deleted file mode 100644 index ebf1ebda941f..000000000000 --- a/vendor/github.com/moby/buildkit/util/network/none.go +++ /dev/null @@ -1,26 +0,0 @@ -package network - -import ( - specs "github.com/opencontainers/runtime-spec/specs-go" -) - -func NewNoneProvider() Provider { - return &none{} -} - -type none struct { -} - -func (h *none) New() (Namespace, error) { - return &noneNS{}, nil -} - -type noneNS struct { -} - -func (h *noneNS) Set(s *specs.Spec) { -} - -func (h *noneNS) Close() error { - return nil -} diff --git a/vendor/github.com/moby/buildkit/util/profiler/profiler.go b/vendor/github.com/moby/buildkit/util/profiler/profiler.go deleted file mode 100644 index 178b128af67d..000000000000 --- a/vendor/github.com/moby/buildkit/util/profiler/profiler.go +++ /dev/null @@ -1,84 +0,0 @@ -package profiler - -import ( - "github.com/pkg/profile" - "github.com/urfave/cli" -) - -func Attach(app *cli.App) { - app.Flags = append(app.Flags, - cli.StringFlag{ - Name: "profile-cpu", - Hidden: true, - }, - cli.StringFlag{ - Name: "profile-memory", - Hidden: true, - }, - cli.IntFlag{ - Name: "profile-memoryrate", - Value: 512 * 1024, - Hidden: true, - }, - cli.StringFlag{ - Name: "profile-block", - Hidden: true, - }, - cli.StringFlag{ - Name: "profile-mutex", - Hidden: true, - }, - cli.StringFlag{ - Name: "profile-trace", - Hidden: true, - }, - ) - - var stoppers = []interface { - Stop() - }{} - - before := app.Before - app.Before = func(clicontext *cli.Context) error { - if before != nil { - if err := before(clicontext); err != nil { - return err - } - } - - if cpuProfile := clicontext.String("profile-cpu"); cpuProfile != "" { - stoppers = append(stoppers, profile.Start(profile.CPUProfile, profile.ProfilePath(cpuProfile), profile.NoShutdownHook)) - } - - if memProfile := clicontext.String("profile-memory"); memProfile != "" { - stoppers = append(stoppers, profile.Start(profile.MemProfile, profile.ProfilePath(memProfile), profile.NoShutdownHook, profile.MemProfileRate(clicontext.Int("profile-memoryrate")))) - } - - if blockProfile := clicontext.String("profile-block"); blockProfile != "" { - stoppers = append(stoppers, profile.Start(profile.BlockProfile, profile.ProfilePath(blockProfile), profile.NoShutdownHook)) - } - - if mutexProfile := clicontext.String("profile-mutex"); mutexProfile != "" { - stoppers = append(stoppers, profile.Start(profile.MutexProfile, profile.ProfilePath(mutexProfile), profile.NoShutdownHook)) - } - - if traceProfile := clicontext.String("profile-trace"); traceProfile != "" { - stoppers = append(stoppers, profile.Start(profile.TraceProfile, profile.ProfilePath(traceProfile), profile.NoShutdownHook)) - } - return nil - } - - after := app.After - app.After = func(clicontext *cli.Context) error { - if after != nil { - if err := after(clicontext); err != nil { - return err - } - } - - for _, stopper := range stoppers { - stopper.Stop() - } - return nil - } -} diff --git a/vendor/github.com/moby/buildkit/util/progress/logs/logs.go b/vendor/github.com/moby/buildkit/util/progress/logs/logs.go deleted file mode 100644 index 54f6ff89657c..000000000000 --- a/vendor/github.com/moby/buildkit/util/progress/logs/logs.go +++ /dev/null @@ -1,53 +0,0 @@ -package logs - -import ( - "context" - "io" - "os" - - "github.com/moby/buildkit/client" - "github.com/moby/buildkit/identity" - "github.com/moby/buildkit/util/progress" - "github.com/pkg/errors" -) - -func NewLogStreams(ctx context.Context, printOutput bool) (io.WriteCloser, io.WriteCloser) { - return newStreamWriter(ctx, 1, printOutput), newStreamWriter(ctx, 2, printOutput) -} - -func newStreamWriter(ctx context.Context, stream int, printOutput bool) io.WriteCloser { - pw, _, _ := progress.FromContext(ctx) - return &streamWriter{ - pw: pw, - stream: stream, - printOutput: printOutput, - } -} - -type streamWriter struct { - pw progress.Writer - stream int - printOutput bool -} - -func (sw *streamWriter) Write(dt []byte) (int, error) { - sw.pw.Write(identity.NewID(), client.VertexLog{ - Stream: sw.stream, - Data: append([]byte{}, dt...), - }) - if sw.printOutput { - switch sw.stream { - case 1: - return os.Stdout.Write(dt) - case 2: - return os.Stderr.Write(dt) - default: - return 0, errors.Errorf("invalid stream %d", sw.stream) - } - } - return len(dt), nil -} - -func (sw *streamWriter) Close() error { - return sw.pw.Close() -} diff --git a/vendor/github.com/moby/buildkit/util/progress/multireader.go b/vendor/github.com/moby/buildkit/util/progress/multireader.go deleted file mode 100644 index 2bd3f2ca8616..000000000000 --- a/vendor/github.com/moby/buildkit/util/progress/multireader.go +++ /dev/null @@ -1,77 +0,0 @@ -package progress - -import ( - "context" - "io" - "sync" -) - -type MultiReader struct { - mu sync.Mutex - main Reader - initialized bool - done chan struct{} - writers map[*progressWriter]func() -} - -func NewMultiReader(pr Reader) *MultiReader { - mr := &MultiReader{ - main: pr, - writers: make(map[*progressWriter]func()), - done: make(chan struct{}), - } - return mr -} - -func (mr *MultiReader) Reader(ctx context.Context) Reader { - mr.mu.Lock() - defer mr.mu.Unlock() - - pr, ctx, closeWriter := NewContext(ctx) - pw, _, ctx := FromContext(ctx) - - w := pw.(*progressWriter) - mr.writers[w] = closeWriter - - go func() { - select { - case <-ctx.Done(): - case <-mr.done: - } - mr.mu.Lock() - defer mr.mu.Unlock() - delete(mr.writers, w) - }() - - if !mr.initialized { - go mr.handle() - mr.initialized = true - } - - return pr -} - -func (mr *MultiReader) handle() error { - for { - p, err := mr.main.Read(context.TODO()) - if err != nil { - if err == io.EOF { - mr.mu.Lock() - for w, c := range mr.writers { - w.Close() - c() - } - mr.mu.Unlock() - return nil - } - return err - } - mr.mu.Lock() - for _, p := range p { - for w := range mr.writers { - w.writeRawProgress(p) - } - } - mr.mu.Unlock() - } -} diff --git a/vendor/github.com/moby/buildkit/util/progress/multiwriter.go b/vendor/github.com/moby/buildkit/util/progress/multiwriter.go deleted file mode 100644 index 51989368ce5d..000000000000 --- a/vendor/github.com/moby/buildkit/util/progress/multiwriter.go +++ /dev/null @@ -1,105 +0,0 @@ -package progress - -import ( - "sort" - "sync" - "time" -) - -type rawProgressWriter interface { - WriteRawProgress(*Progress) error - Close() error -} - -type MultiWriter struct { - mu sync.Mutex - items []*Progress - writers map[rawProgressWriter]struct{} - done bool - meta map[string]interface{} -} - -func NewMultiWriter(opts ...WriterOption) *MultiWriter { - mw := &MultiWriter{ - writers: map[rawProgressWriter]struct{}{}, - meta: map[string]interface{}{}, - } - for _, o := range opts { - o(mw) - } - return mw -} - -func (ps *MultiWriter) Add(pw Writer) { - rw, ok := pw.(rawProgressWriter) - if !ok { - return - } - ps.mu.Lock() - plist := make([]*Progress, 0, len(ps.items)) - for _, p := range ps.items { - plist = append(plist, p) - } - sort.Slice(plist, func(i, j int) bool { - return plist[i].Timestamp.Before(plist[j].Timestamp) - }) - for _, p := range plist { - rw.WriteRawProgress(p) - } - ps.writers[rw] = struct{}{} - ps.mu.Unlock() -} - -func (ps *MultiWriter) Delete(pw Writer) { - rw, ok := pw.(rawProgressWriter) - if !ok { - return - } - - ps.mu.Lock() - delete(ps.writers, rw) - ps.mu.Unlock() -} - -func (ps *MultiWriter) Write(id string, v interface{}) error { - p := &Progress{ - ID: id, - Timestamp: time.Now(), - Sys: v, - meta: ps.meta, - } - return ps.WriteRawProgress(p) -} - -func (ps *MultiWriter) WriteRawProgress(p *Progress) error { - meta := p.meta - if len(ps.meta) > 0 { - meta = map[string]interface{}{} - for k, v := range p.meta { - meta[k] = v - } - for k, v := range ps.meta { - if _, ok := meta[k]; !ok { - meta[k] = v - } - } - } - p.meta = meta - return ps.writeRawProgress(p) -} - -func (ps *MultiWriter) writeRawProgress(p *Progress) error { - ps.mu.Lock() - defer ps.mu.Unlock() - ps.items = append(ps.items, p) - for w := range ps.writers { - if err := w.WriteRawProgress(p); err != nil { - return err - } - } - return nil -} - -func (ps *MultiWriter) Close() error { - return nil -} diff --git a/vendor/github.com/moby/buildkit/util/progress/progress.go b/vendor/github.com/moby/buildkit/util/progress/progress.go deleted file mode 100644 index b802716bf783..000000000000 --- a/vendor/github.com/moby/buildkit/util/progress/progress.go +++ /dev/null @@ -1,252 +0,0 @@ -package progress - -import ( - "context" - "io" - "sort" - "sync" - "time" - - "github.com/pkg/errors" -) - -// Progress package provides utility functions for using the context to capture -// progress of a running function. All progress items written contain an ID -// that is used to collapse unread messages. - -type contextKeyT string - -var contextKey = contextKeyT("buildkit/util/progress") - -// FromContext returns a progress writer from a context. -func FromContext(ctx context.Context, opts ...WriterOption) (Writer, bool, context.Context) { - v := ctx.Value(contextKey) - pw, ok := v.(*progressWriter) - if !ok { - if pw, ok := v.(*MultiWriter); ok { - return pw, true, ctx - } - return &noOpWriter{}, false, ctx - } - pw = newWriter(pw) - for _, o := range opts { - o(pw) - } - ctx = context.WithValue(ctx, contextKey, pw) - return pw, true, ctx -} - -type WriterOption func(Writer) - -// NewContext returns a new context and a progress reader that captures all -// progress items writtern to this context. Last returned parameter is a closer -// function to signal that no new writes will happen to this context. -func NewContext(ctx context.Context) (Reader, context.Context, func()) { - pr, pw, cancel := pipe() - ctx = WithProgress(ctx, pw) - return pr, ctx, cancel -} - -func WithProgress(ctx context.Context, pw Writer) context.Context { - return context.WithValue(ctx, contextKey, pw) -} - -func WithMetadata(key string, val interface{}) WriterOption { - return func(w Writer) { - if pw, ok := w.(*progressWriter); ok { - pw.meta[key] = val - } - if pw, ok := w.(*MultiWriter); ok { - pw.meta[key] = val - } - } -} - -type Writer interface { - Write(id string, value interface{}) error - Close() error -} - -type Reader interface { - Read(context.Context) ([]*Progress, error) -} - -type Progress struct { - ID string - Timestamp time.Time - Sys interface{} - meta map[string]interface{} -} - -type Status struct { - Action string - Current int - Total int - Started *time.Time - Completed *time.Time -} - -type progressReader struct { - ctx context.Context - cond *sync.Cond - mu sync.Mutex - writers map[*progressWriter]struct{} - dirty map[string]*Progress -} - -func (pr *progressReader) Read(ctx context.Context) ([]*Progress, error) { - done := make(chan struct{}) - defer close(done) - go func() { - select { - case <-done: - case <-ctx.Done(): - pr.cond.Broadcast() - } - }() - pr.mu.Lock() - for { - select { - case <-ctx.Done(): - pr.mu.Unlock() - return nil, ctx.Err() - default: - } - dmap := pr.dirty - if len(dmap) == 0 { - select { - case <-pr.ctx.Done(): - if len(pr.writers) == 0 { - pr.mu.Unlock() - return nil, io.EOF - } - default: - } - pr.cond.Wait() - continue - } - pr.dirty = make(map[string]*Progress) - pr.mu.Unlock() - - out := make([]*Progress, 0, len(dmap)) - for _, p := range dmap { - out = append(out, p) - } - - sort.Slice(out, func(i, j int) bool { - return out[i].Timestamp.Before(out[j].Timestamp) - }) - - return out, nil - } -} - -func (pr *progressReader) append(pw *progressWriter) { - pr.mu.Lock() - defer pr.mu.Unlock() - - select { - case <-pr.ctx.Done(): - return - default: - pr.writers[pw] = struct{}{} - } -} - -func pipe() (*progressReader, *progressWriter, func()) { - ctx, cancel := context.WithCancel(context.Background()) - pr := &progressReader{ - ctx: ctx, - writers: make(map[*progressWriter]struct{}), - dirty: make(map[string]*Progress), - } - pr.cond = sync.NewCond(&pr.mu) - go func() { - <-ctx.Done() - pr.cond.Broadcast() - }() - pw := &progressWriter{ - reader: pr, - } - return pr, pw, cancel -} - -func newWriter(pw *progressWriter) *progressWriter { - meta := make(map[string]interface{}) - for k, v := range pw.meta { - meta[k] = v - } - pw = &progressWriter{ - reader: pw.reader, - meta: meta, - } - pw.reader.append(pw) - return pw -} - -type progressWriter struct { - done bool - reader *progressReader - meta map[string]interface{} -} - -func (pw *progressWriter) Write(id string, v interface{}) error { - if pw.done { - return errors.Errorf("writing %s to closed progress writer", id) - } - return pw.writeRawProgress(&Progress{ - ID: id, - Timestamp: time.Now(), - Sys: v, - meta: pw.meta, - }) -} - -func (pw *progressWriter) WriteRawProgress(p *Progress) error { - meta := p.meta - if len(pw.meta) > 0 { - meta = map[string]interface{}{} - for k, v := range p.meta { - meta[k] = v - } - for k, v := range pw.meta { - if _, ok := meta[k]; !ok { - meta[k] = v - } - } - } - p.meta = meta - return pw.writeRawProgress(p) -} - -func (pw *progressWriter) writeRawProgress(p *Progress) error { - pw.reader.mu.Lock() - pw.reader.dirty[p.ID] = p - pw.reader.cond.Broadcast() - pw.reader.mu.Unlock() - return nil -} - -func (pw *progressWriter) Close() error { - pw.reader.mu.Lock() - delete(pw.reader.writers, pw) - pw.reader.mu.Unlock() - pw.reader.cond.Broadcast() - pw.done = true - return nil -} - -func (p *Progress) Meta(key string) (interface{}, bool) { - v, ok := p.meta[key] - return v, ok -} - -type noOpWriter struct{} - -func (pw *noOpWriter) Write(_ string, _ interface{}) error { - return nil -} - -func (pw *noOpWriter) Close() error { - return nil -} diff --git a/vendor/github.com/moby/buildkit/util/progress/progress_test.go b/vendor/github.com/moby/buildkit/util/progress/progress_test.go deleted file mode 100644 index 39fe16c58aa4..000000000000 --- a/vendor/github.com/moby/buildkit/util/progress/progress_test.go +++ /dev/null @@ -1,134 +0,0 @@ -package progress - -import ( - "context" - "fmt" - "io" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "golang.org/x/sync/errgroup" -) - -func TestProgress(t *testing.T) { - t.Parallel() - s, err := calc(context.TODO(), 4, "calc") - assert.NoError(t, err) - assert.Equal(t, 10, s) - - eg, ctx := errgroup.WithContext(context.Background()) - - pr, ctx, cancelProgress := NewContext(ctx) - var trace trace - eg.Go(func() error { - return saveProgress(ctx, pr, &trace) - }) - - pw, _, ctx := FromContext(ctx, WithMetadata("tag", "foo")) - s, err = calc(ctx, 5, "calc") - pw.Close() - assert.NoError(t, err) - assert.Equal(t, 15, s) - - cancelProgress() - err = eg.Wait() - assert.NoError(t, err) - - assert.True(t, len(trace.items) > 5) - assert.True(t, len(trace.items) <= 7) - for _, p := range trace.items { - v, ok := p.Meta("tag") - assert.True(t, ok) - assert.Equal(t, v.(string), "foo") - } -} - -func TestProgressNested(t *testing.T) { - t.Parallel() - eg, ctx := errgroup.WithContext(context.Background()) - pr, ctx, cancelProgress := NewContext(ctx) - var trace trace - eg.Go(func() error { - return saveProgress(ctx, pr, &trace) - }) - s, err := reduceCalc(ctx, 3) - assert.NoError(t, err) - assert.Equal(t, 6, s) - - cancelProgress() - - err = eg.Wait() - assert.NoError(t, err) - - assert.True(t, len(trace.items) > 9) // usually 14 - assert.True(t, len(trace.items) <= 15) -} - -func calc(ctx context.Context, total int, name string) (int, error) { - pw, _, ctx := FromContext(ctx) - defer pw.Close() - - sum := 0 - pw.Write(name, Status{Action: "starting", Total: total}) - for i := 1; i <= total; i++ { - select { - case <-ctx.Done(): - return 0, ctx.Err() - case <-time.After(10 * time.Millisecond): - } - if i == total { - pw.Write(name, Status{Action: "done", Total: total, Current: total}) - } else { - pw.Write(name, Status{Action: "calculating", Total: total, Current: i}) - } - sum += i - } - - return sum, nil -} - -func reduceCalc(ctx context.Context, total int) (int, error) { - eg, ctx := errgroup.WithContext(ctx) - - pw, _, ctx := FromContext(ctx) - defer pw.Close() - - pw.Write("reduce", Status{Action: "starting"}) - - // sync step - sum, err := calc(ctx, total, "synccalc") - if err != nil { - return 0, err - } - // parallel steps - for i := 0; i < 2; i++ { - func(i int) { - eg.Go(func() error { - _, err := calc(ctx, total, fmt.Sprintf("calc-%d", i)) - return err - }) - }(i) - } - if err := eg.Wait(); err != nil { - return 0, err - } - return sum, nil -} - -type trace struct { - items []*Progress -} - -func saveProgress(ctx context.Context, pr Reader, t *trace) error { - for { - p, err := pr.Read(ctx) - if err != nil { - if err == io.EOF { - return nil - } - return err - } - t.items = append(t.items, p...) - } -} diff --git a/vendor/github.com/moby/buildkit/util/progress/progressui/display.go b/vendor/github.com/moby/buildkit/util/progress/progressui/display.go deleted file mode 100644 index e615d683ddca..000000000000 --- a/vendor/github.com/moby/buildkit/util/progress/progressui/display.go +++ /dev/null @@ -1,432 +0,0 @@ -package progressui - -import ( - "bytes" - "context" - "fmt" - "io" - "strings" - "time" - - "github.com/containerd/console" - "github.com/moby/buildkit/client" - "github.com/morikuni/aec" - digest "github.com/opencontainers/go-digest" - "github.com/tonistiigi/units" - "golang.org/x/time/rate" -) - -func DisplaySolveStatus(ctx context.Context, phase string, c console.Console, w io.Writer, ch chan *client.SolveStatus) error { - - modeConsole := c != nil - - disp := &display{c: c, phase: phase} - printer := &textMux{w: w} - - if disp.phase == "" { - disp.phase = "Building" - } - - t := newTrace(w) - - var done bool - ticker := time.NewTicker(100 * time.Millisecond) - defer ticker.Stop() - - displayLimiter := rate.NewLimiter(rate.Every(70*time.Millisecond), 1) - - for { - select { - case <-ctx.Done(): - return ctx.Err() - case <-ticker.C: - case ss, ok := <-ch: - if ok { - t.update(ss) - } else { - done = true - } - } - - if modeConsole { - if done { - disp.print(t.displayInfo(), true) - t.printErrorLogs(c) - return nil - } else if displayLimiter.Allow() { - disp.print(t.displayInfo(), false) - } - } else { - if done || displayLimiter.Allow() { - printer.print(t) - if done { - return nil - } - } - } - } -} - -type displayInfo struct { - startTime time.Time - jobs []job - countTotal int - countCompleted int -} - -type job struct { - startTime *time.Time - completedTime *time.Time - name string - status string - hasError bool - isCanceled bool -} - -type trace struct { - w io.Writer - localTimeDiff time.Duration - vertexes []*vertex - byDigest map[digest.Digest]*vertex - nextIndex int - updates map[digest.Digest]struct{} -} - -type vertex struct { - *client.Vertex - statuses []*status - byID map[string]*status - indent string - index int - - logs [][]byte - logsPartial bool - logsOffset int - prev *client.Vertex - events []string - lastBlockTime *time.Time - count int - statusUpdates map[string]struct{} -} - -func (v *vertex) update(c int) { - if v.count == 0 { - now := time.Now() - v.lastBlockTime = &now - } - v.count += c -} - -type status struct { - *client.VertexStatus -} - -func newTrace(w io.Writer) *trace { - return &trace{ - byDigest: make(map[digest.Digest]*vertex), - updates: make(map[digest.Digest]struct{}), - w: w, - } -} - -func (t *trace) triggerVertexEvent(v *client.Vertex) { - if v.Started == nil { - return - } - - var old client.Vertex - vtx := t.byDigest[v.Digest] - if v := vtx.prev; v != nil { - old = *v - } - - var ev []string - if v.Digest != old.Digest { - ev = append(ev, fmt.Sprintf("%13s %s", "digest:", v.Digest)) - } - if v.Name != old.Name { - ev = append(ev, fmt.Sprintf("%13s %q", "name:", v.Name)) - } - if v.Started != old.Started { - if v.Started != nil && old.Started == nil || !v.Started.Equal(*old.Started) { - ev = append(ev, fmt.Sprintf("%13s %v", "started:", v.Started)) - } - } - if v.Completed != old.Completed && v.Completed != nil { - ev = append(ev, fmt.Sprintf("%13s %v", "completed:", v.Completed)) - if v.Started != nil { - ev = append(ev, fmt.Sprintf("%13s %v", "duration:", v.Completed.Sub(*v.Started))) - } - } - if v.Cached != old.Cached { - ev = append(ev, fmt.Sprintf("%13s %v", "cached:", v.Cached)) - } - if v.Error != old.Error { - ev = append(ev, fmt.Sprintf("%13s %q", "error:", v.Error)) - } - - if len(ev) > 0 { - vtx.events = append(vtx.events, ev...) - vtx.update(len(ev)) - t.updates[v.Digest] = struct{}{} - } - - t.byDigest[v.Digest].prev = v -} - -func (t *trace) update(s *client.SolveStatus) { - for _, v := range s.Vertexes { - prev, ok := t.byDigest[v.Digest] - if !ok { - t.nextIndex++ - t.byDigest[v.Digest] = &vertex{ - byID: make(map[string]*status), - statusUpdates: make(map[string]struct{}), - index: t.nextIndex, - } - } - t.triggerVertexEvent(v) - if v.Started != nil && (prev == nil || prev.Started == nil) { - if t.localTimeDiff == 0 { - t.localTimeDiff = time.Since(*v.Started) - } - t.vertexes = append(t.vertexes, t.byDigest[v.Digest]) - } - t.byDigest[v.Digest].Vertex = v - } - for _, s := range s.Statuses { - v, ok := t.byDigest[s.Vertex] - if !ok { - continue // shouldn't happen - } - prev, ok := v.byID[s.ID] - if !ok { - v.byID[s.ID] = &status{VertexStatus: s} - } - if s.Started != nil && (prev == nil || prev.Started == nil) { - v.statuses = append(v.statuses, v.byID[s.ID]) - } - v.byID[s.ID].VertexStatus = s - v.statusUpdates[s.ID] = struct{}{} - t.updates[v.Digest] = struct{}{} - v.update(1) - } - for _, l := range s.Logs { - v, ok := t.byDigest[l.Vertex] - if !ok { - continue // shouldn't happen - } - i := 0 - complete := split(l.Data, byte('\n'), func(dt []byte) { - if v.logsPartial && len(v.logs) != 0 && i == 0 { - v.logs[len(v.logs)-1] = append(v.logs[len(v.logs)-1], dt...) - } else { - ts := time.Duration(0) - if v.Started != nil { - ts = l.Timestamp.Sub(*v.Started) - } - v.logs = append(v.logs, []byte(fmt.Sprintf("#%d %s %s", v.index, fmt.Sprintf("%#.4g", ts.Seconds())[:5], dt))) - } - i++ - }) - v.logsPartial = !complete - t.updates[v.Digest] = struct{}{} - v.update(1) - } -} - -func (t *trace) printErrorLogs(f io.Writer) { - for _, v := range t.vertexes { - if v.Error != "" && !strings.HasSuffix(v.Error, context.Canceled.Error()) { - fmt.Fprintln(f, "------") - fmt.Fprintf(f, " > %s:\n", v.Name) - for _, l := range v.logs { - f.Write(l) - fmt.Fprintln(f) - } - fmt.Fprintln(f, "------") - } - } -} - -func (t *trace) displayInfo() (d displayInfo) { - d.startTime = time.Now() - if t.localTimeDiff != 0 { - d.startTime = (*t.vertexes[0].Started).Add(t.localTimeDiff) - } - d.countTotal = len(t.byDigest) - for _, v := range t.byDigest { - if v.Completed != nil { - d.countCompleted++ - } - } - - for _, v := range t.vertexes { - j := job{ - startTime: addTime(v.Started, t.localTimeDiff), - completedTime: addTime(v.Completed, t.localTimeDiff), - name: strings.Replace(v.Name, "\t", " ", -1), - } - if v.Error != "" { - if strings.HasSuffix(v.Error, context.Canceled.Error()) { - j.isCanceled = true - j.name = "CANCELED " + j.name - } else { - j.hasError = true - j.name = "ERROR " + j.name - } - } - if v.Cached { - j.name = "CACHED " + j.name - } - j.name = v.indent + j.name - d.jobs = append(d.jobs, j) - for _, s := range v.statuses { - j := job{ - startTime: addTime(s.Started, t.localTimeDiff), - completedTime: addTime(s.Completed, t.localTimeDiff), - name: v.indent + "=> " + s.ID, - } - if s.Total != 0 { - j.status = fmt.Sprintf("%.2f / %.2f", units.Bytes(s.Current), units.Bytes(s.Total)) - } else if s.Current != 0 { - j.status = fmt.Sprintf("%.2f", units.Bytes(s.Current)) - } - d.jobs = append(d.jobs, j) - } - } - - return d -} - -func split(dt []byte, sep byte, fn func([]byte)) bool { - if len(dt) == 0 { - return false - } - for { - if len(dt) == 0 { - return true - } - idx := bytes.IndexByte(dt, sep) - if idx == -1 { - fn(dt) - return false - } - fn(dt[:idx]) - dt = dt[idx+1:] - } -} - -func addTime(tm *time.Time, d time.Duration) *time.Time { - if tm == nil { - return nil - } - t := (*tm).Add(d) - return &t -} - -type display struct { - c console.Console - phase string - lineCount int - repeated bool -} - -func (disp *display) print(d displayInfo, all bool) { - // this output is inspired by Buck - width := 80 - height := 10 - size, err := disp.c.Size() - if err == nil && size.Width > 0 && size.Height > 0 { - width = int(size.Width) - height = int(size.Height) - } - - if !all { - d.jobs = wrapHeight(d.jobs, height-2) - } - - b := aec.EmptyBuilder - for i := 0; i <= disp.lineCount; i++ { - b = b.Up(1) - } - if !disp.repeated { - b = b.Down(1) - } - disp.repeated = true - fmt.Fprint(disp.c, b.Column(0).ANSI) - - statusStr := "" - if d.countCompleted > 0 && d.countCompleted == d.countTotal && all { - statusStr = "FINISHED" - } - - fmt.Fprint(disp.c, aec.Hide) - defer fmt.Fprint(disp.c, aec.Show) - - out := fmt.Sprintf("[+] %s %.1fs (%d/%d) %s", disp.phase, time.Since(d.startTime).Seconds(), d.countCompleted, d.countTotal, statusStr) - out = align(out, "", width) - fmt.Fprintln(disp.c, out) - lineCount := 0 - for _, j := range d.jobs { - endTime := time.Now() - if j.completedTime != nil { - endTime = *j.completedTime - } - if j.startTime == nil { - continue - } - dt := endTime.Sub(*j.startTime).Seconds() - if dt < 0.05 { - dt = 0 - } - pfx := " => " - timer := fmt.Sprintf(" %3.1fs\n", dt) - status := j.status - showStatus := false - - left := width - len(pfx) - len(timer) - 1 - if status != "" { - if left+len(status) > 20 { - showStatus = true - left -= len(status) + 1 - } - } - if left < 12 { // too small screen to show progress - continue - } - if len(j.name) > left { - j.name = j.name[:left] - } - - out := pfx + j.name - if showStatus { - out += " " + status - } - - out = align(out, timer, width) - if j.completedTime != nil { - color := aec.BlueF - if j.isCanceled { - color = aec.YellowF - } else if j.hasError { - color = aec.RedF - } - out = aec.Apply(out, color) - } - fmt.Fprint(disp.c, out) - lineCount++ - } - disp.lineCount = lineCount -} - -func align(l, r string, w int) string { - return fmt.Sprintf("%-[2]*[1]s %[3]s", l, w-len(r)-1, r) -} - -func wrapHeight(j []job, limit int) []job { - if len(j) > limit { - j = j[len(j)-limit:] - } - return j -} diff --git a/vendor/github.com/moby/buildkit/util/progress/progressui/printer.go b/vendor/github.com/moby/buildkit/util/progress/progressui/printer.go deleted file mode 100644 index 3794105aa3ed..000000000000 --- a/vendor/github.com/moby/buildkit/util/progress/progressui/printer.go +++ /dev/null @@ -1,248 +0,0 @@ -package progressui - -import ( - "fmt" - "io" - "time" - - digest "github.com/opencontainers/go-digest" - "github.com/tonistiigi/units" -) - -const antiFlicker = 5 * time.Second -const maxDelay = 10 * time.Second -const minTimeDelta = 5 * time.Second -const minProgressDelta = 0.05 // % - -type lastStatus struct { - Current int64 - Timestamp time.Time -} - -type textMux struct { - w io.Writer - current digest.Digest - last map[string]lastStatus -} - -func (p *textMux) printVtx(t *trace, dgst digest.Digest) { - if p.last == nil { - p.last = make(map[string]lastStatus) - } - - v, ok := t.byDigest[dgst] - if !ok { - return - } - - if dgst != p.current { - if p.current != "" { - old := t.byDigest[p.current] - if old.logsPartial { - fmt.Fprintln(p.w, "") - } - old.logsOffset = 0 - old.count = 0 - fmt.Fprintf(p.w, "#%d ...\n", v.index) - } - - fmt.Fprintf(p.w, "\n#%d %s\n", v.index, limitString(v.Name, 72)) - } - - if len(v.events) != 0 { - v.logsOffset = 0 - } - for _, ev := range v.events { - fmt.Fprintf(p.w, "#%d %s\n", v.index, ev) - } - v.events = v.events[:0] - - for _, s := range v.statuses { - if _, ok := v.statusUpdates[s.ID]; ok { - doPrint := true - - if last, ok := p.last[s.ID]; ok && s.Completed == nil { - var progressDelta float64 - if s.Total > 0 { - progressDelta = float64(s.Current-last.Current) / float64(s.Total) - } - timeDelta := s.Timestamp.Sub(last.Timestamp) - if progressDelta < minProgressDelta && timeDelta < minTimeDelta { - doPrint = false - } - } - - if !doPrint { - continue - } - - p.last[s.ID] = lastStatus{ - Timestamp: s.Timestamp, - Current: s.Current, - } - - var bytes string - if s.Total != 0 { - bytes = fmt.Sprintf(" %.2f / %.2f", units.Bytes(s.Current), units.Bytes(s.Total)) - } else if s.Current != 0 { - bytes = fmt.Sprintf(" %.2f", units.Bytes(s.Current)) - } - var tm string - endTime := s.Timestamp - if s.Completed != nil { - endTime = *s.Completed - } - if s.Started != nil { - diff := endTime.Sub(*s.Started).Seconds() - if diff > 0.01 { - tm = fmt.Sprintf(" %.1fs", diff) - } - } - if s.Completed != nil { - tm += " done" - } - fmt.Fprintf(p.w, "#%d %s%s%s\n", v.index, s.ID, bytes, tm) - } - } - v.statusUpdates = map[string]struct{}{} - - for i, l := range v.logs { - if i == 0 { - l = l[v.logsOffset:] - } - fmt.Fprintf(p.w, "%s", []byte(l)) - if i != len(v.logs)-1 || !v.logsPartial { - fmt.Fprintln(p.w, "") - } - } - - if len(v.logs) > 0 { - if v.logsPartial { - v.logs = v.logs[len(v.logs)-1:] - v.logsOffset = len(v.logs[0]) - } else { - v.logs = nil - v.logsOffset = 0 - } - } - - p.current = dgst - - if v.Completed != nil { - p.current = "" - v.count = 0 - fmt.Fprintf(p.w, "\n") - } - - delete(t.updates, dgst) -} - -func (p *textMux) print(t *trace) { - - completed := map[digest.Digest]struct{}{} - rest := map[digest.Digest]struct{}{} - - for dgst := range t.updates { - v, ok := t.byDigest[dgst] - if !ok { - continue - } - if v.Vertex.Completed != nil { - completed[dgst] = struct{}{} - } else { - rest[dgst] = struct{}{} - } - } - - current := p.current - - // items that have completed need to be printed first - if _, ok := completed[current]; ok { - p.printVtx(t, current) - } - - for dgst := range completed { - if dgst != current { - p.printVtx(t, dgst) - } - } - - if len(rest) == 0 { - if current != "" { - if v := t.byDigest[current]; v.Started != nil && v.Completed == nil { - return - } - } - // make any open vertex active - for dgst, v := range t.byDigest { - if v.Started != nil && v.Completed == nil { - p.printVtx(t, dgst) - return - } - } - return - } - - // now print the active one - if _, ok := rest[current]; ok { - p.printVtx(t, current) - } - - stats := map[digest.Digest]*vtxStat{} - now := time.Now() - sum := 0.0 - var max digest.Digest - if current != "" { - rest[current] = struct{}{} - } - for dgst := range rest { - v, ok := t.byDigest[dgst] - if !ok { - continue - } - tm := now.Sub(*v.lastBlockTime) - speed := float64(v.count) / tm.Seconds() - overLimit := tm > maxDelay && dgst != current - stats[dgst] = &vtxStat{blockTime: tm, speed: speed, overLimit: overLimit} - sum += speed - if overLimit || max == "" || stats[max].speed < speed { - max = dgst - } - } - for dgst := range stats { - stats[dgst].share = stats[dgst].speed / sum - } - - if _, ok := completed[current]; ok || current == "" { - p.printVtx(t, max) - return - } - - // show items that were hidden - for dgst := range rest { - if stats[dgst].overLimit { - p.printVtx(t, dgst) - return - } - } - - // fair split between vertexes - if 1.0/(1.0-stats[current].share)*antiFlicker.Seconds() < stats[current].blockTime.Seconds() { - p.printVtx(t, max) - return - } -} - -type vtxStat struct { - blockTime time.Duration - speed float64 - share float64 - overLimit bool -} - -func limitString(s string, l int) string { - if len(s) > l { - return s[:l] + "..." - } - return s -} diff --git a/vendor/github.com/moby/buildkit/util/pull/pull.go b/vendor/github.com/moby/buildkit/util/pull/pull.go deleted file mode 100644 index 350d77fd73f3..000000000000 --- a/vendor/github.com/moby/buildkit/util/pull/pull.go +++ /dev/null @@ -1,480 +0,0 @@ -package pull - -import ( - "context" - "sync" - "time" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/diff" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/images" - "github.com/containerd/containerd/platforms" - "github.com/containerd/containerd/reference" - "github.com/containerd/containerd/remotes" - "github.com/containerd/containerd/remotes/docker/schema1" - "github.com/containerd/containerd/rootfs" - ctdsnapshot "github.com/containerd/containerd/snapshots" - "github.com/moby/buildkit/snapshot" - "github.com/moby/buildkit/util/imageutil" - "github.com/moby/buildkit/util/progress" - digest "github.com/opencontainers/go-digest" - "github.com/opencontainers/image-spec/identity" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -type Puller struct { - Snapshotter snapshot.Snapshotter - ContentStore content.Store - Applier diff.Applier - Src reference.Spec - Platform *ocispec.Platform - // See NewResolver() - Resolver remotes.Resolver - resolveOnce sync.Once - desc ocispec.Descriptor - ref string - resolveErr error -} - -type Pulled struct { - Ref string - Descriptor ocispec.Descriptor - ChainID digest.Digest -} - -func (p *Puller) Resolve(ctx context.Context) (string, ocispec.Descriptor, error) { - p.resolveOnce.Do(func() { - resolveProgressDone := oneOffProgress(ctx, "resolve "+p.Src.String()) - - desc := ocispec.Descriptor{ - Digest: p.Src.Digest(), - } - if desc.Digest != "" { - info, err := p.ContentStore.Info(ctx, desc.Digest) - if err == nil { - desc.Size = info.Size - p.ref = p.Src.String() - ra, err := p.ContentStore.ReaderAt(ctx, desc) - if err == nil { - mt, err := imageutil.DetectManifestMediaType(ra) - if err == nil { - desc.MediaType = mt - p.desc = desc - resolveProgressDone(nil) - return - } - } - } - } - - ref, desc, err := p.Resolver.Resolve(ctx, p.Src.String()) - if err != nil { - p.resolveErr = err - resolveProgressDone(err) - return - } - p.desc = desc - p.ref = ref - resolveProgressDone(nil) - }) - return p.ref, p.desc, p.resolveErr -} - -func (p *Puller) Pull(ctx context.Context) (*Pulled, error) { - if _, _, err := p.Resolve(ctx); err != nil { - return nil, err - } - - var platform platforms.MatchComparer - if p.Platform != nil { - platform = platforms.Only(*p.Platform) - } else { - platform = platforms.Default() - } - - ongoing := newJobs(p.ref) - - pctx, stopProgress := context.WithCancel(ctx) - - go showProgress(pctx, ongoing, p.ContentStore) - - fetcher, err := p.Resolver.Fetcher(ctx, p.ref) - if err != nil { - stopProgress() - return nil, err - } - - // TODO: need a wrapper snapshot interface that combines content - // and snapshots as 1) buildkit shouldn't have a dependency on contentstore - // or 2) cachemanager should manage the contentstore - handlers := []images.Handler{ - images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - ongoing.add(desc) - return nil, nil - }), - } - var schema1Converter *schema1.Converter - if p.desc.MediaType == images.MediaTypeDockerSchema1Manifest { - schema1Converter = schema1.NewConverter(p.ContentStore, fetcher) - handlers = append(handlers, schema1Converter) - } else { - // Get all the children for a descriptor - childrenHandler := images.ChildrenHandler(p.ContentStore) - // Set any children labels for that content - childrenHandler = images.SetChildrenLabels(p.ContentStore, childrenHandler) - // Filter the children by the platform - childrenHandler = images.FilterPlatforms(childrenHandler, platform) - // Limit manifests pulled to the best match in an index - childrenHandler = images.LimitManifests(childrenHandler, platform, 1) - - handlers = append(handlers, - remotes.FetchHandler(p.ContentStore, fetcher), - childrenHandler, - ) - } - - if err := images.Dispatch(ctx, images.Handlers(handlers...), p.desc); err != nil { - stopProgress() - return nil, err - } - stopProgress() - - var usedBlobs, unusedBlobs []ocispec.Descriptor - - if schema1Converter != nil { - ongoing.remove(p.desc) // Not left in the content store so this is sufficient. - p.desc, err = schema1Converter.Convert(ctx) - if err != nil { - return nil, err - } - ongoing.add(p.desc) - - var mu sync.Mutex // images.Dispatch calls handlers in parallel - allBlobs := make(map[digest.Digest]ocispec.Descriptor) - for _, j := range ongoing.added { - allBlobs[j.Digest] = j.Descriptor - } - - handlers := []images.Handler{ - images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - mu.Lock() - defer mu.Unlock() - usedBlobs = append(usedBlobs, desc) - delete(allBlobs, desc.Digest) - return nil, nil - }), - images.FilterPlatforms(images.ChildrenHandler(p.ContentStore), platform), - } - - if err := images.Dispatch(ctx, images.Handlers(handlers...), p.desc); err != nil { - return nil, err - } - - for _, j := range allBlobs { - unusedBlobs = append(unusedBlobs, j) - } - } else { - for _, j := range ongoing.added { - usedBlobs = append(usedBlobs, j.Descriptor) - } - } - - // split all pulled data to layers and rest. layers remain roots and are deleted with snapshots. rest will be linked to layers. - var notLayerBlobs []ocispec.Descriptor - var layerBlobs []ocispec.Descriptor - for _, j := range usedBlobs { - switch j.MediaType { - case ocispec.MediaTypeImageLayer, images.MediaTypeDockerSchema2Layer, ocispec.MediaTypeImageLayerGzip, images.MediaTypeDockerSchema2LayerGzip, images.MediaTypeDockerSchema2LayerForeign, images.MediaTypeDockerSchema2LayerForeignGzip: - layerBlobs = append(layerBlobs, j) - default: - notLayerBlobs = append(notLayerBlobs, j) - } - } - - for _, l := range layerBlobs { - labels := map[string]string{} - var fields []string - for _, nl := range notLayerBlobs { - k := "containerd.io/gc.ref.content." + nl.Digest.Hex()[:12] - labels[k] = nl.Digest.String() - fields = append(fields, "labels."+k) - } - if _, err := p.ContentStore.Update(ctx, content.Info{ - Digest: l.Digest, - Labels: labels, - }, fields...); err != nil { - return nil, err - } - } - - for _, nl := range append(notLayerBlobs, unusedBlobs...) { - if err := p.ContentStore.Delete(ctx, nl.Digest); err != nil { - return nil, err - } - } - - csh, release := snapshot.NewContainerdSnapshotter(p.Snapshotter) - defer release() - - unpackProgressDone := oneOffProgress(ctx, "unpacking "+p.Src.String()) - chainid, err := unpack(ctx, p.desc, p.ContentStore, csh, p.Snapshotter, p.Applier, platform) - if err != nil { - return nil, unpackProgressDone(err) - } - unpackProgressDone(nil) - - return &Pulled{ - Ref: p.ref, - Descriptor: p.desc, - ChainID: chainid, - }, nil -} - -func unpack(ctx context.Context, desc ocispec.Descriptor, cs content.Store, csh ctdsnapshot.Snapshotter, s snapshot.Snapshotter, applier diff.Applier, platform platforms.MatchComparer) (digest.Digest, error) { - layers, err := getLayers(ctx, cs, desc, platform) - if err != nil { - return "", err - } - - var chain []digest.Digest - for _, layer := range layers { - labels := map[string]string{ - "containerd.io/gc.root": time.Now().UTC().Format(time.RFC3339Nano), - } - if _, err := rootfs.ApplyLayer(ctx, layer, chain, csh, applier, ctdsnapshot.WithLabels(labels)); err != nil { - return "", err - } - chain = append(chain, layer.Diff.Digest) - } - chainID := identity.ChainID(chain) - if err != nil { - return "", err - } - - if err := fillBlobMapping(ctx, s, layers); err != nil { - return "", err - } - - return chainID, nil -} - -func fillBlobMapping(ctx context.Context, s snapshot.Snapshotter, layers []rootfs.Layer) error { - var chain []digest.Digest - for _, l := range layers { - chain = append(chain, l.Diff.Digest) - chainID := identity.ChainID(chain) - if err := s.SetBlob(ctx, string(chainID), l.Diff.Digest, l.Blob.Digest); err != nil { - return err - } - } - return nil -} - -func getLayers(ctx context.Context, provider content.Provider, desc ocispec.Descriptor, platform platforms.MatchComparer) ([]rootfs.Layer, error) { - manifest, err := images.Manifest(ctx, provider, desc, platform) - if err != nil { - return nil, errors.WithStack(err) - } - image := images.Image{Target: desc} - diffIDs, err := image.RootFS(ctx, provider, platform) - if err != nil { - return nil, errors.Wrap(err, "failed to resolve rootfs") - } - if len(diffIDs) != len(manifest.Layers) { - return nil, errors.Errorf("mismatched image rootfs and manifest layers %+v %+v", diffIDs, manifest.Layers) - } - layers := make([]rootfs.Layer, len(diffIDs)) - for i := range diffIDs { - layers[i].Diff = ocispec.Descriptor{ - // TODO: derive media type from compressed type - MediaType: ocispec.MediaTypeImageLayer, - Digest: diffIDs[i], - } - layers[i].Blob = manifest.Layers[i] - } - return layers, nil -} - -func showProgress(ctx context.Context, ongoing *jobs, cs content.Store) { - var ( - ticker = time.NewTicker(150 * time.Millisecond) - statuses = map[string]statusInfo{} - done bool - ) - defer ticker.Stop() - - pw, _, ctx := progress.FromContext(ctx) - defer pw.Close() - - for { - select { - case <-ticker.C: - case <-ctx.Done(): - done = true - } - - resolved := "resolved" - if !ongoing.isResolved() { - resolved = "resolving" - } - statuses[ongoing.name] = statusInfo{ - Ref: ongoing.name, - Status: resolved, - } - - actives := make(map[string]statusInfo) - - if !done { - active, err := cs.ListStatuses(ctx, "") - if err != nil { - // log.G(ctx).WithError(err).Error("active check failed") - continue - } - // update status of active entries! - for _, active := range active { - actives[active.Ref] = statusInfo{ - Ref: active.Ref, - Status: "downloading", - Offset: active.Offset, - Total: active.Total, - StartedAt: active.StartedAt, - UpdatedAt: active.UpdatedAt, - } - } - } - - // now, update the items in jobs that are not in active - for _, j := range ongoing.jobs() { - refKey := remotes.MakeRefKey(ctx, j.Descriptor) - if a, ok := actives[refKey]; ok { - started := j.started - pw.Write(j.Digest.String(), progress.Status{ - Action: a.Status, - Total: int(a.Total), - Current: int(a.Offset), - Started: &started, - }) - continue - } - - if !j.done { - info, err := cs.Info(context.TODO(), j.Digest) - if err != nil { - if errdefs.IsNotFound(err) { - pw.Write(j.Digest.String(), progress.Status{ - Action: "waiting", - }) - continue - } - } else { - j.done = true - } - - if done || j.done { - started := j.started - createdAt := info.CreatedAt - pw.Write(j.Digest.String(), progress.Status{ - Action: "done", - Current: int(info.Size), - Total: int(info.Size), - Completed: &createdAt, - Started: &started, - }) - } - } - } - if done { - return - } - } -} - -// jobs provides a way of identifying the download keys for a particular task -// encountering during the pull walk. -// -// This is very minimal and will probably be replaced with something more -// featured. -type jobs struct { - name string - added map[digest.Digest]*job - mu sync.Mutex - resolved bool -} - -type job struct { - ocispec.Descriptor - done bool - started time.Time -} - -func newJobs(name string) *jobs { - return &jobs{ - name: name, - added: make(map[digest.Digest]*job), - } -} - -func (j *jobs) add(desc ocispec.Descriptor) { - j.mu.Lock() - defer j.mu.Unlock() - - if _, ok := j.added[desc.Digest]; ok { - return - } - j.added[desc.Digest] = &job{ - Descriptor: desc, - started: time.Now(), - } -} - -func (j *jobs) remove(desc ocispec.Descriptor) { - j.mu.Lock() - defer j.mu.Unlock() - - delete(j.added, desc.Digest) -} - -func (j *jobs) jobs() []*job { - j.mu.Lock() - defer j.mu.Unlock() - - descs := make([]*job, 0, len(j.added)) - for _, j := range j.added { - descs = append(descs, j) - } - return descs -} - -func (j *jobs) isResolved() bool { - j.mu.Lock() - defer j.mu.Unlock() - return j.resolved -} - -type statusInfo struct { - Ref string - Status string - Offset int64 - Total int64 - StartedAt time.Time - UpdatedAt time.Time -} - -func oneOffProgress(ctx context.Context, id string) func(err error) error { - pw, _, _ := progress.FromContext(ctx) - now := time.Now() - st := progress.Status{ - Started: &now, - } - pw.Write(id, st) - return func(err error) error { - // TODO: set error on status - now := time.Now() - st.Completed = &now - pw.Write(id, st) - pw.Close() - return err - } -} diff --git a/vendor/github.com/moby/buildkit/util/pull/resolver.go b/vendor/github.com/moby/buildkit/util/pull/resolver.go deleted file mode 100644 index 5c57b04ff14f..000000000000 --- a/vendor/github.com/moby/buildkit/util/pull/resolver.go +++ /dev/null @@ -1,92 +0,0 @@ -package pull - -import ( - "context" - "net/http" - "time" - - "github.com/containerd/containerd/images" - "github.com/containerd/containerd/remotes" - "github.com/containerd/containerd/remotes/docker" - "github.com/moby/buildkit/session" - "github.com/moby/buildkit/session/auth" - "github.com/moby/buildkit/source" - "github.com/moby/buildkit/util/resolver" - "github.com/moby/buildkit/util/tracing" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -func NewResolver(ctx context.Context, rfn resolver.ResolveOptionsFunc, sm *session.Manager, imageStore images.Store, mode source.ResolveMode, ref string) remotes.Resolver { - opt := docker.ResolverOptions{ - Client: http.DefaultClient, - } - if rfn != nil { - opt = rfn(ref) - } - opt.Credentials = getCredentialsFromSession(ctx, sm) - - r := docker.NewResolver(opt) - - if imageStore == nil || mode == source.ResolveModeForcePull { - return r - } - - return withLocalResolver{r, imageStore, mode} -} - -func getCredentialsFromSession(ctx context.Context, sm *session.Manager) func(string) (string, string, error) { - id := session.FromContext(ctx) - if id == "" { - return nil - } - return func(host string) (string, string, error) { - timeoutCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - caller, err := sm.Get(timeoutCtx, id) - if err != nil { - return "", "", err - } - - return auth.CredentialsFunc(tracing.ContextWithSpanFromContext(context.TODO(), ctx), caller)(host) - } -} - -// A remotes.Resolver which checks the local image store if the real -// resolver cannot find the image, essentially falling back to a local -// image if one is present. -// -// We do not override the Fetcher or Pusher methods: -// -// - Fetcher is called by github.com/containerd/containerd/remotes/:fetch() -// only after it has checked for the content locally, so avoid the -// hassle of interposing a local-fetch proxy and simply pass on the -// request. -// - Pusher wouldn't make sense to push locally, so just forward. - -type withLocalResolver struct { - remotes.Resolver - is images.Store - mode source.ResolveMode -} - -func (r withLocalResolver) Resolve(ctx context.Context, ref string) (string, ocispec.Descriptor, error) { - if r.mode == source.ResolveModePreferLocal { - if img, err := r.is.Get(ctx, ref); err == nil { - return ref, img.Target, nil - } - } - - n, desc, err := r.Resolver.Resolve(ctx, ref) - if err == nil { - return n, desc, err - } - - if r.mode == source.ResolveModeDefault { - if img, err := r.is.Get(ctx, ref); err == nil { - return ref, img.Target, nil - } - } - - return "", ocispec.Descriptor{}, err -} diff --git a/vendor/github.com/moby/buildkit/util/push/push.go b/vendor/github.com/moby/buildkit/util/push/push.go deleted file mode 100644 index ab4df15a300f..000000000000 --- a/vendor/github.com/moby/buildkit/util/push/push.go +++ /dev/null @@ -1,186 +0,0 @@ -package push - -import ( - "context" - "encoding/json" - "fmt" - "sync" - "time" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/images" - "github.com/containerd/containerd/remotes" - "github.com/containerd/containerd/remotes/docker" - "github.com/docker/distribution/reference" - "github.com/moby/buildkit/session" - "github.com/moby/buildkit/session/auth" - "github.com/moby/buildkit/util/imageutil" - "github.com/moby/buildkit/util/progress" - "github.com/moby/buildkit/util/resolver" - digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/sirupsen/logrus" -) - -func getCredentialsFunc(ctx context.Context, sm *session.Manager) func(string) (string, string, error) { - id := session.FromContext(ctx) - if id == "" { - return nil - } - return func(host string) (string, string, error) { - timeoutCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - caller, err := sm.Get(timeoutCtx, id) - if err != nil { - return "", "", err - } - - return auth.CredentialsFunc(context.TODO(), caller)(host) - } -} - -func Push(ctx context.Context, sm *session.Manager, cs content.Provider, dgst digest.Digest, ref string, insecure bool, rfn resolver.ResolveOptionsFunc) error { - desc := ocispec.Descriptor{ - Digest: dgst, - } - parsed, err := reference.ParseNormalizedNamed(ref) - if err != nil { - return err - } - ref = reference.TagNameOnly(parsed).String() - - opt := rfn(ref) - opt.Credentials = getCredentialsFunc(ctx, sm) - if insecure { - opt.PlainHTTP = insecure - } - - resolver := docker.NewResolver(opt) - - pusher, err := resolver.Pusher(ctx, ref) - if err != nil { - return err - } - - var m sync.Mutex - manifestStack := []ocispec.Descriptor{} - - filterHandler := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - switch desc.MediaType { - case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest, - images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: - m.Lock() - manifestStack = append(manifestStack, desc) - m.Unlock() - return nil, images.ErrStopHandler - default: - return nil, nil - } - }) - - pushHandler := remotes.PushHandler(pusher, cs) - - handlers := append([]images.Handler{}, - childrenHandler(cs), - filterHandler, - pushHandler, - ) - - ra, err := cs.ReaderAt(ctx, desc) - if err != nil { - return err - } - - mtype, err := imageutil.DetectManifestMediaType(ra) - if err != nil { - return err - } - - layersDone := oneOffProgress(ctx, "pushing layers") - err = images.Dispatch(ctx, images.Handlers(handlers...), ocispec.Descriptor{ - Digest: dgst, - Size: ra.Size(), - MediaType: mtype, - }) - layersDone(err) - if err != nil { - return err - } - - mfstDone := oneOffProgress(ctx, fmt.Sprintf("pushing manifest for %s", ref)) - for i := len(manifestStack) - 1; i >= 0; i-- { - _, err := pushHandler(ctx, manifestStack[i]) - if err != nil { - mfstDone(err) - return err - } - } - mfstDone(nil) - return nil -} - -func oneOffProgress(ctx context.Context, id string) func(err error) error { - pw, _, _ := progress.FromContext(ctx) - now := time.Now() - st := progress.Status{ - Started: &now, - } - pw.Write(id, st) - return func(err error) error { - // TODO: set error on status - now := time.Now() - st.Completed = &now - pw.Write(id, st) - pw.Close() - return err - } -} - -func childrenHandler(provider content.Provider) images.HandlerFunc { - return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - var descs []ocispec.Descriptor - switch desc.MediaType { - case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: - p, err := content.ReadBlob(ctx, provider, desc) - if err != nil { - return nil, err - } - - // TODO(stevvooe): We just assume oci manifest, for now. There may be - // subtle differences from the docker version. - var manifest ocispec.Manifest - if err := json.Unmarshal(p, &manifest); err != nil { - return nil, err - } - - descs = append(descs, manifest.Config) - descs = append(descs, manifest.Layers...) - case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: - p, err := content.ReadBlob(ctx, provider, desc) - if err != nil { - return nil, err - } - - var index ocispec.Index - if err := json.Unmarshal(p, &index); err != nil { - return nil, err - } - - for _, m := range index.Manifests { - if m.Digest != "" { - descs = append(descs, m) - } - } - case images.MediaTypeDockerSchema2Layer, images.MediaTypeDockerSchema2LayerGzip, - images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig, - ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerGzip: - // childless data types. - return nil, nil - default: - logrus.Warnf("encountered unknown type %v; children may not be fetched", desc.MediaType) - } - - return descs, nil - } -} diff --git a/vendor/github.com/moby/buildkit/util/resolver/resolver.go b/vendor/github.com/moby/buildkit/util/resolver/resolver.go deleted file mode 100644 index da28923564cd..000000000000 --- a/vendor/github.com/moby/buildkit/util/resolver/resolver.go +++ /dev/null @@ -1,45 +0,0 @@ -package resolver - -import ( - "math/rand" - - "github.com/containerd/containerd/remotes/docker" - "github.com/docker/distribution/reference" - "github.com/moby/buildkit/util/tracing" -) - -type RegistryConf struct { - Mirrors []string - PlainHTTP bool -} - -type ResolveOptionsFunc func(string) docker.ResolverOptions - -func NewResolveOptionsFunc(m map[string]RegistryConf) ResolveOptionsFunc { - return func(ref string) docker.ResolverOptions { - def := docker.ResolverOptions{ - Client: tracing.DefaultClient, - } - - parsed, err := reference.ParseNormalizedNamed(ref) - if err != nil { - return def - } - host := reference.Domain(parsed) - - c, ok := m[host] - if !ok { - return def - } - - if len(c.Mirrors) > 0 { - def.Host = func(string) (string, error) { - return c.Mirrors[rand.Intn(len(c.Mirrors))], nil - } - } - - def.PlainHTTP = c.PlainHTTP - - return def - } -} diff --git a/vendor/github.com/moby/buildkit/util/rootless/specconv/specconv_linux.go b/vendor/github.com/moby/buildkit/util/rootless/specconv/specconv_linux.go deleted file mode 100644 index 12646e430acd..000000000000 --- a/vendor/github.com/moby/buildkit/util/rootless/specconv/specconv_linux.go +++ /dev/null @@ -1,40 +0,0 @@ -package specconv - -import ( - "strings" - - "github.com/opencontainers/runtime-spec/specs-go" -) - -// ToRootless converts spec to be compatible with "rootless" runc. -// * Remove /sys mount -// * Remove cgroups -// -// See docs/rootless.md for the supported runc revision. -func ToRootless(spec *specs.Spec) error { - // Remove /sys mount because we can't mount /sys when the daemon netns - // is not unshared from the host. - // - // Instead, we could bind-mount /sys from the host, however, `rbind, ro` - // does not make /sys/fs/cgroup read-only (and we can't bind-mount /sys - // without rbind) - // - // PR for making /sys/fs/cgroup read-only is proposed, but it is very - // complicated: https://github.com/opencontainers/runc/pull/1869 - // - // For buildkit usecase, we suppose we don't need to provide /sys to - // containers and remove /sys mount as a workaround. - var mounts []specs.Mount - for _, mount := range spec.Mounts { - if strings.HasPrefix(mount.Destination, "/sys") { - continue - } - mounts = append(mounts, mount) - } - spec.Mounts = mounts - - // Remove cgroups so as to avoid `container_linux.go:337: starting container process caused "process_linux.go:280: applying cgroup configuration for process caused \"mkdir /sys/fs/cgroup/cpuset/buildkit: permission denied\""` - spec.Linux.Resources = nil - spec.Linux.CgroupsPath = "" - return nil -} diff --git a/vendor/github.com/moby/buildkit/util/system/path_unix.go b/vendor/github.com/moby/buildkit/util/system/path_unix.go deleted file mode 100644 index c607c4db09f2..000000000000 --- a/vendor/github.com/moby/buildkit/util/system/path_unix.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build !windows - -package system - -// DefaultPathEnv is unix style list of directories to search for -// executables. Each directory is separated from the next by a colon -// ':' character . -const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - -// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, -// is the system drive. This is a no-op on Linux. -func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { - return path, nil -} diff --git a/vendor/github.com/moby/buildkit/util/system/path_windows.go b/vendor/github.com/moby/buildkit/util/system/path_windows.go deleted file mode 100644 index cbfe2c1576ce..000000000000 --- a/vendor/github.com/moby/buildkit/util/system/path_windows.go +++ /dev/null @@ -1,37 +0,0 @@ -// +build windows - -package system - -import ( - "fmt" - "path/filepath" - "strings" -) - -// DefaultPathEnv is deliberately empty on Windows as the default path will be set by -// the container. Docker has no context of what the default path should be. -const DefaultPathEnv = "" - -// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. -// This is used, for example, when validating a user provided path in docker cp. -// If a drive letter is supplied, it must be the system drive. The drive letter -// is always removed. Also, it translates it to OS semantics (IOW / to \). We -// need the path in this syntax so that it can ultimately be contatenated with -// a Windows long-path which doesn't support drive-letters. Examples: -// C: --> Fail -// C:\ --> \ -// a --> a -// /a --> \a -// d:\ --> Fail -func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { - if len(path) == 2 && string(path[1]) == ":" { - return "", fmt.Errorf("No relative path specified in %q", path) - } - if !filepath.IsAbs(path) || len(path) < 2 { - return filepath.FromSlash(path), nil - } - if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { - return "", fmt.Errorf("The specified path is not on the system drive (C:)") - } - return filepath.FromSlash(path[2:]), nil -} diff --git a/vendor/github.com/moby/buildkit/util/system/path_windows_test.go b/vendor/github.com/moby/buildkit/util/system/path_windows_test.go deleted file mode 100644 index f44d77e6ca87..000000000000 --- a/vendor/github.com/moby/buildkit/util/system/path_windows_test.go +++ /dev/null @@ -1,78 +0,0 @@ -// +build windows - -package system - -import "testing" - -// TestCheckSystemDriveAndRemoveDriveLetter tests CheckSystemDriveAndRemoveDriveLetter -func TestCheckSystemDriveAndRemoveDriveLetter(t *testing.T) { - // Fails if not C drive. - _, err := CheckSystemDriveAndRemoveDriveLetter(`d:\`) - if err == nil || (err != nil && err.Error() != "The specified path is not on the system drive (C:)") { - t.Fatalf("Expected error for d:") - } - - // Single character is unchanged - if path, err = CheckSystemDriveAndRemoveDriveLetter("z"); err != nil { - t.Fatalf("Single character should pass") - } - if path != "z" { - t.Fatalf("Single character should be unchanged") - } - - // Two characters without colon is unchanged - if path, err = CheckSystemDriveAndRemoveDriveLetter("AB"); err != nil { - t.Fatalf("2 characters without colon should pass") - } - if path != "AB" { - t.Fatalf("2 characters without colon should be unchanged") - } - - // Abs path without drive letter - if path, err = CheckSystemDriveAndRemoveDriveLetter(`\l`); err != nil { - t.Fatalf("abs path no drive letter should pass") - } - if path != `\l` { - t.Fatalf("abs path without drive letter should be unchanged") - } - - // Abs path without drive letter, linux style - if path, err = CheckSystemDriveAndRemoveDriveLetter(`/l`); err != nil { - t.Fatalf("abs path no drive letter linux style should pass") - } - if path != `\l` { - t.Fatalf("abs path without drive letter linux failed %s", path) - } - - // Drive-colon should be stripped - if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:\`); err != nil { - t.Fatalf("An absolute path should pass") - } - if path != `\` { - t.Fatalf(`An absolute path should have been shortened to \ %s`, path) - } - - // Verify with a linux-style path - if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:/`); err != nil { - t.Fatalf("An absolute path should pass") - } - if path != `\` { - t.Fatalf(`A linux style absolute path should have been shortened to \ %s`, path) - } - - // Failure on c: - if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:`); err == nil { - t.Fatalf("c: should fail") - } - if err.Error() != `No relative path specified in "c:"` { - t.Fatalf(path, err) - } - - // Failure on d: - if path, err = CheckSystemDriveAndRemoveDriveLetter(`d:`); err == nil { - t.Fatalf("c: should fail") - } - if err.Error() != `No relative path specified in "d:"` { - t.Fatalf(path, err) - } -} diff --git a/vendor/github.com/moby/buildkit/util/system/seccomp_linux.go b/vendor/github.com/moby/buildkit/util/system/seccomp_linux.go deleted file mode 100644 index 62afa03fef03..000000000000 --- a/vendor/github.com/moby/buildkit/util/system/seccomp_linux.go +++ /dev/null @@ -1,29 +0,0 @@ -// +build linux,seccomp - -package system - -import ( - "sync" - - "golang.org/x/sys/unix" -) - -var seccompSupported bool -var seccompOnce sync.Once - -func SeccompSupported() bool { - seccompOnce.Do(func() { - seccompSupported = getSeccompSupported() - }) - return seccompSupported -} - -func getSeccompSupported() bool { - if err := unix.Prctl(unix.PR_GET_SECCOMP, 0, 0, 0, 0); err != unix.EINVAL { - // Make sure the kernel has CONFIG_SECCOMP_FILTER. - if err := unix.Prctl(unix.PR_SET_SECCOMP, unix.SECCOMP_MODE_FILTER, 0, 0, 0); err != unix.EINVAL { - return true - } - } - return false -} diff --git a/vendor/github.com/moby/buildkit/util/system/seccomp_nolinux.go b/vendor/github.com/moby/buildkit/util/system/seccomp_nolinux.go deleted file mode 100644 index e348c379a903..000000000000 --- a/vendor/github.com/moby/buildkit/util/system/seccomp_nolinux.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !linux,seccomp - -package system - -func SeccompSupported() bool { - return false -} diff --git a/vendor/github.com/moby/buildkit/util/system/seccomp_noseccomp.go b/vendor/github.com/moby/buildkit/util/system/seccomp_noseccomp.go deleted file mode 100644 index 84cfb7fa8398..000000000000 --- a/vendor/github.com/moby/buildkit/util/system/seccomp_noseccomp.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !seccomp - -package system - -func SeccompSupported() bool { - return false -} diff --git a/vendor/github.com/moby/buildkit/util/testutil/httpserver/server.go b/vendor/github.com/moby/buildkit/util/testutil/httpserver/server.go deleted file mode 100644 index 76d1641700e2..000000000000 --- a/vendor/github.com/moby/buildkit/util/testutil/httpserver/server.go +++ /dev/null @@ -1,84 +0,0 @@ -package httpserver - -import ( - "bytes" - "io" - "net/http" - "net/http/httptest" - "sync" - "time" -) - -type TestServer struct { - *httptest.Server - mu sync.Mutex - routes map[string]Response - stats map[string]*Stat -} - -func NewTestServer(routes map[string]Response) *TestServer { - ts := &TestServer{ - routes: routes, - stats: map[string]*Stat{}, - } - ts.Server = httptest.NewServer(ts) - return ts -} - -func (s *TestServer) SetRoute(name string, resp Response) { - s.mu.Lock() - defer s.mu.Unlock() - s.routes[name] = resp -} - -func (s *TestServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { - s.mu.Lock() - resp, ok := s.routes[r.URL.Path] - if !ok { - w.WriteHeader(http.StatusNotFound) - s.mu.Unlock() - return - } - - if _, ok := s.stats[r.URL.Path]; !ok { - s.stats[r.URL.Path] = &Stat{} - } - - s.stats[r.URL.Path].AllRequests += 1 - - if resp.LastModified != nil { - w.Header().Set("Last-Modified", resp.LastModified.Format(time.RFC850)) - } - - if resp.Etag != "" { - w.Header().Set("ETag", resp.Etag) - if match := r.Header.Get("If-None-Match"); match == resp.Etag { - w.WriteHeader(http.StatusNotModified) - s.stats[r.URL.Path].CachedRequests++ - s.mu.Unlock() - return - } - } - - s.mu.Unlock() - - w.WriteHeader(http.StatusOK) - io.Copy(w, bytes.NewReader(resp.Content)) -} - -func (s *TestServer) Stats(name string) (st Stat) { - if st, ok := s.stats[name]; ok { - return *st - } - return -} - -type Response struct { - Content []byte - Etag string - LastModified *time.Time -} - -type Stat struct { - AllRequests, CachedRequests int -} diff --git a/vendor/github.com/moby/buildkit/util/testutil/integration/containerd.go b/vendor/github.com/moby/buildkit/util/testutil/integration/containerd.go deleted file mode 100644 index e75b9c4e97d1..000000000000 --- a/vendor/github.com/moby/buildkit/util/testutil/integration/containerd.go +++ /dev/null @@ -1,168 +0,0 @@ -package integration - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "strings" - "time" - - "github.com/pkg/errors" -) - -func init() { - register(&containerd{ - name: "containerd", - containerd: "containerd", - containerdShim: "containerd-shim", - }) - // defined in hack/dockerfiles/test.Dockerfile. - // e.g. `containerd-1.0=/opt/containerd-1.0/bin,containerd-42.0=/opt/containerd-42.0/bin` - if s := os.Getenv("BUILDKIT_INTEGRATION_CONTAINERD_EXTRA"); s != "" { - entries := strings.Split(s, ",") - for _, entry := range entries { - pair := strings.Split(strings.TrimSpace(entry), "=") - if len(pair) != 2 { - panic(errors.Errorf("unexpected BUILDKIT_INTEGRATION_CONTAINERD_EXTRA: %q", s)) - } - name, bin := pair[0], pair[1] - register(&containerd{ - name: name, - containerd: filepath.Join(bin, "containerd"), - containerdShim: filepath.Join(bin, "containerd-shim"), - }) - } - } -} - -type containerd struct { - name string - containerd string - containerdShim string -} - -func (c *containerd) Name() string { - return c.name -} - -func (c *containerd) New(opt ...SandboxOpt) (sb Sandbox, cl func() error, err error) { - var conf SandboxConf - for _, o := range opt { - o(&conf) - } - - if err := lookupBinary(c.containerd); err != nil { - return nil, nil, err - } - if err := lookupBinary(c.containerdShim); err != nil { - return nil, nil, err - } - if err := lookupBinary("buildkitd"); err != nil { - return nil, nil, err - } - if err := requireRoot(); err != nil { - return nil, nil, err - } - - deferF := &multiCloser{} - cl = deferF.F() - - defer func() { - if err != nil { - deferF.F()() - cl = nil - } - }() - - tmpdir, err := ioutil.TempDir("", "bktest_containerd") - if err != nil { - return nil, nil, err - } - - deferF.append(func() error { return os.RemoveAll(tmpdir) }) - - address := filepath.Join(tmpdir, "containerd.sock") - config := fmt.Sprintf(`root = %q -state = %q -# CRI plugins listens on 10010/tcp for stream server. -# We disable CRI plugin so that multiple instance can run simultaneously. -disabled_plugins = ["cri"] - -[grpc] - address = %q - -[debug] - level = "debug" - address = %q - -[plugins] - [plugins.linux] - shim = %q -`, filepath.Join(tmpdir, "root"), filepath.Join(tmpdir, "state"), address, filepath.Join(tmpdir, "debug.sock"), c.containerdShim) - configFile := filepath.Join(tmpdir, "config.toml") - if err := ioutil.WriteFile(configFile, []byte(config), 0644); err != nil { - return nil, nil, err - } - - cmd := exec.Command(c.containerd, "--config", configFile) - - logs := map[string]*bytes.Buffer{} - - ctdStop, err := startCmd(cmd, logs) - if err != nil { - return nil, nil, err - } - if err := waitUnix(address, 5*time.Second); err != nil { - ctdStop() - return nil, nil, errors.Wrapf(err, "containerd did not start up: %s", formatLogs(logs)) - } - deferF.append(ctdStop) - - buildkitdArgs := []string{"buildkitd", - "--oci-worker=false", - "--containerd-worker=true", - "--containerd-worker-addr", address, - "--containerd-worker-labels=org.mobyproject.buildkit.worker.sandbox=true", // Include use of --containerd-worker-labels to trigger https://github.com/moby/buildkit/pull/603 - } - - if conf.mirror != "" { - dir, err := configWithMirror(conf.mirror) - if err != nil { - return nil, nil, err - } - deferF.append(func() error { - return os.RemoveAll(dir) - }) - buildkitdArgs = append(buildkitdArgs, "--config="+filepath.Join(dir, "buildkitd.toml")) - } - - buildkitdSock, stop, err := runBuildkitd(buildkitdArgs, logs, 0, 0) - if err != nil { - return nil, nil, err - } - deferF.append(stop) - - return &cdsandbox{address: address, sandbox: sandbox{mv: conf.mv, address: buildkitdSock, logs: logs, cleanup: deferF, rootless: false}}, cl, nil -} - -func formatLogs(m map[string]*bytes.Buffer) string { - var ss []string - for k, b := range m { - if b != nil { - ss = append(ss, fmt.Sprintf("%q:%q", k, b.String())) - } - } - return strings.Join(ss, ",") -} - -type cdsandbox struct { - sandbox - address string -} - -func (s *cdsandbox) ContainerdAddress() string { - return s.address -} diff --git a/vendor/github.com/moby/buildkit/util/testutil/integration/frombinary.go b/vendor/github.com/moby/buildkit/util/testutil/integration/frombinary.go deleted file mode 100644 index 7596e758d94c..000000000000 --- a/vendor/github.com/moby/buildkit/util/testutil/integration/frombinary.go +++ /dev/null @@ -1,57 +0,0 @@ -package integration - -import ( - "context" - "encoding/json" - "io/ioutil" - "os" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/content/local" - "github.com/containerd/containerd/images/archive" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -func providerFromBinary(fn string) (_ ocispec.Descriptor, _ content.Provider, _ func(), err error) { - ctx := context.TODO() - - tmpDir, err := ioutil.TempDir("", "buildkit-state") - if err != nil { - return ocispec.Descriptor{}, nil, nil, err - } - close := func() { - os.RemoveAll(tmpDir) - } - defer func() { - if err != nil { - close() - } - }() - // can't use contentutil.Buffer because ImportIndex takes content.Store even though only requires Provider/Ingester - c, err := local.NewStore(tmpDir) - if err != nil { - return ocispec.Descriptor{}, nil, nil, err - } - - f, err := os.Open(fn) - if err != nil { - return ocispec.Descriptor{}, nil, nil, err - } - defer f.Close() - - desc, err := archive.ImportIndex(ctx, c, f) - if err != nil { - return ocispec.Descriptor{}, nil, nil, err - } - - var idx ocispec.Index - dt, err := content.ReadBlob(ctx, c, desc) - if err != nil { - return ocispec.Descriptor{}, nil, nil, err - } - if err := json.Unmarshal(dt, &idx); err != nil { - return ocispec.Descriptor{}, nil, nil, err - } - - return idx.Manifests[0], c, close, nil -} diff --git a/vendor/github.com/moby/buildkit/util/testutil/integration/oci.go b/vendor/github.com/moby/buildkit/util/testutil/integration/oci.go deleted file mode 100644 index a22af37293aa..000000000000 --- a/vendor/github.com/moby/buildkit/util/testutil/integration/oci.go +++ /dev/null @@ -1,201 +0,0 @@ -package integration - -import ( - "bufio" - "bytes" - "fmt" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "runtime" - "syscall" - "testing" - "time" - - "github.com/google/shlex" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -func init() { - register(&oci{}) - - // the rootless uid is defined in hack/dockerfiles/test.Dockerfile - if s := os.Getenv("BUILDKIT_INTEGRATION_ROOTLESS_IDPAIR"); s != "" { - var uid, gid int - if _, err := fmt.Sscanf(s, "%d:%d", &uid, &gid); err != nil { - logrus.Fatalf("unexpected BUILDKIT_INTEGRATION_ROOTLESS_IDPAIR: %q", s) - } - if rootlessSupported(uid) { - register(&oci{uid: uid, gid: gid}) - } - } - -} - -type oci struct { - uid int - gid int -} - -func (s *oci) Name() string { - if s.uid != 0 { - return "oci-rootless" - } - return "oci" -} - -func (s *oci) New(opt ...SandboxOpt) (Sandbox, func() error, error) { - var c SandboxConf - for _, o := range opt { - o(&c) - } - - if err := lookupBinary("buildkitd"); err != nil { - return nil, nil, err - } - if err := requireRoot(); err != nil { - return nil, nil, err - } - logs := map[string]*bytes.Buffer{} - // Include use of --oci-worker-labels to trigger https://github.com/moby/buildkit/pull/603 - buildkitdArgs := []string{"buildkitd", "--oci-worker=true", "--containerd-worker=false", "--oci-worker-labels=org.mobyproject.buildkit.worker.sandbox=true"} - - deferF := &multiCloser{} - - if c.mirror != "" { - dir, err := configWithMirror(c.mirror) - if err != nil { - return nil, nil, err - } - deferF.append(func() error { - return os.RemoveAll(dir) - }) - buildkitdArgs = append(buildkitdArgs, "--config="+filepath.Join(dir, "buildkitd.toml")) - } - - if s.uid != 0 { - if s.gid == 0 { - deferF.F()() - return nil, nil, errors.Errorf("unsupported id pair: uid=%d, gid=%d", s.uid, s.gid) - } - // TODO: make sure the user exists and subuid/subgid are configured. - buildkitdArgs = append([]string{"sudo", "-u", fmt.Sprintf("#%d", s.uid), "-i", "--", "rootlesskit"}, buildkitdArgs...) - } - buildkitdSock, stop, err := runBuildkitd(buildkitdArgs, logs, s.uid, s.gid) - if err != nil { - deferF.F()() - return nil, nil, err - } - - deferF.append(stop) - - return &sandbox{address: buildkitdSock, mv: c.mv, logs: logs, cleanup: deferF, rootless: s.uid != 0}, deferF.F(), nil -} - -type sandbox struct { - address string - logs map[string]*bytes.Buffer - cleanup *multiCloser - rootless bool - mv matrixValue -} - -func (sb *sandbox) Address() string { - return sb.address -} - -func (sb *sandbox) PrintLogs(t *testing.T) { - for name, l := range sb.logs { - t.Log(name) - s := bufio.NewScanner(l) - for s.Scan() { - t.Log(s.Text()) - } - } -} - -func (sb *sandbox) NewRegistry() (string, error) { - url, cl, err := newRegistry("") - if err != nil { - return "", err - } - sb.cleanup.append(cl) - return url, nil -} - -func (sb *sandbox) Cmd(args ...string) *exec.Cmd { - if len(args) == 1 { - if split, err := shlex.Split(args[0]); err == nil { - args = split - } - } - cmd := exec.Command("buildctl", args...) - cmd.Env = append(cmd.Env, os.Environ()...) - cmd.Env = append(cmd.Env, "BUILDKIT_HOST="+sb.Address()) - return cmd -} - -func (sb *sandbox) Rootless() bool { - return sb.rootless -} - -func (sb *sandbox) Value(k string) interface{} { - return sb.mv.values[k].value -} - -func runBuildkitd(args []string, logs map[string]*bytes.Buffer, uid, gid int) (address string, cl func() error, err error) { - deferF := &multiCloser{} - cl = deferF.F() - - defer func() { - if err != nil { - deferF.F()() - cl = nil - } - }() - - tmpdir, err := ioutil.TempDir("", "bktest_buildkitd") - if err != nil { - return "", nil, err - } - if err := os.Chown(tmpdir, uid, gid); err != nil { - return "", nil, err - } - deferF.append(func() error { return os.RemoveAll(tmpdir) }) - - address = "unix://" + filepath.Join(tmpdir, "buildkitd.sock") - if runtime.GOOS == "windows" { - address = "//./pipe/buildkitd-" + filepath.Base(tmpdir) - } - - args = append(args, "--root", tmpdir, "--addr", address, "--debug") - cmd := exec.Command(args[0], args[1:]...) - cmd.Env = append(os.Environ(), "BUILDKIT_DEBUG_EXEC_OUTPUT=1") - cmd.SysProcAttr = &syscall.SysProcAttr{ - Setsid: true, // stretch sudo needs this for sigterm - } - - if stop, err := startCmd(cmd, logs); err != nil { - return "", nil, err - } else { - deferF.append(stop) - } - - if err := waitUnix(address, 5*time.Second); err != nil { - return "", nil, err - } - - return -} - -func rootlessSupported(uid int) bool { - cmd := exec.Command("sudo", "-u", fmt.Sprintf("#%d", uid), "-i", "--", "unshare", "-U", "true") - b, err := cmd.CombinedOutput() - if err != nil { - logrus.Warnf("rootless mode is not supported on this host: %v (%s)", err, string(b)) - return false - } - return true -} diff --git a/vendor/github.com/moby/buildkit/util/testutil/integration/registry.go b/vendor/github.com/moby/buildkit/util/testutil/integration/registry.go deleted file mode 100644 index be74ad5bd246..000000000000 --- a/vendor/github.com/moby/buildkit/util/testutil/integration/registry.go +++ /dev/null @@ -1,110 +0,0 @@ -package integration - -import ( - "bufio" - "context" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "regexp" - "time" - - "github.com/pkg/errors" -) - -func newRegistry(dir string) (url string, cl func() error, err error) { - if err := lookupBinary("registry"); err != nil { - return "", nil, err - } - - deferF := &multiCloser{} - cl = deferF.F() - - defer func() { - if err != nil { - deferF.F()() - cl = nil - } - }() - - if dir == "" { - tmpdir, err := ioutil.TempDir("", "test-registry") - if err != nil { - return "", nil, err - } - deferF.append(func() error { return os.RemoveAll(tmpdir) }) - dir = tmpdir - } - - if _, err := os.Stat(filepath.Join(dir, "config.yaml")); err != nil { - if !os.IsNotExist(err) { - return "", nil, err - } - template := fmt.Sprintf(`version: 0.1 -loglevel: debug -storage: - filesystem: - rootdirectory: %s -http: - addr: 127.0.0.1:0 -`, filepath.Join(dir, "data")) - - if err := ioutil.WriteFile(filepath.Join(dir, "config.yaml"), []byte(template), 0600); err != nil { - return "", nil, err - } - } - - cmd := exec.Command("registry", "serve", filepath.Join(dir, "config.yaml")) - rc, err := cmd.StderrPipe() - if err != nil { - return "", nil, err - } - if stop, err := startCmd(cmd, nil); err != nil { - return "", nil, err - } else { - deferF.append(stop) - } - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - url, err = detectPort(ctx, rc) - if err != nil { - return "", nil, err - } - - return -} - -func detectPort(ctx context.Context, rc io.ReadCloser) (string, error) { - r := regexp.MustCompile("listening on 127\\.0\\.0\\.1:(\\d+)") - s := bufio.NewScanner(rc) - found := make(chan struct{}) - defer func() { - close(found) - go io.Copy(ioutil.Discard, rc) - }() - - go func() { - select { - case <-ctx.Done(): - select { - case <-found: - return - default: - rc.Close() - } - case <-found: - } - }() - - for s.Scan() { - res := r.FindSubmatch(s.Bytes()) - if len(res) > 1 { - return "localhost:" + string(res[1]), nil - } - } - return "", errors.Errorf("no listening address found") -} diff --git a/vendor/github.com/moby/buildkit/util/testutil/integration/run.go b/vendor/github.com/moby/buildkit/util/testutil/integration/run.go deleted file mode 100644 index a3223a2c264e..000000000000 --- a/vendor/github.com/moby/buildkit/util/testutil/integration/run.go +++ /dev/null @@ -1,350 +0,0 @@ -package integration - -import ( - "context" - "fmt" - "io/ioutil" - "math/rand" - "os" - "os/exec" - "path/filepath" - "reflect" - "runtime" - "sort" - "strings" - "sync" - "syscall" - "testing" - "time" - - "github.com/containerd/containerd/content" - "github.com/moby/buildkit/util/contentutil" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -type Sandbox interface { - Address() string - PrintLogs(*testing.T) - Cmd(...string) *exec.Cmd - NewRegistry() (string, error) - Rootless() bool - Value(string) interface{} // chosen matrix value -} - -type Worker interface { - New(...SandboxOpt) (Sandbox, func() error, error) - Name() string -} - -type SandboxConf struct { - mirror string - mv matrixValue -} - -type SandboxOpt func(*SandboxConf) - -func WithMirror(h string) SandboxOpt { - return func(c *SandboxConf) { - c.mirror = h - } -} - -func withMatrixValues(mv matrixValue) SandboxOpt { - return func(c *SandboxConf) { - c.mv = mv - } -} - -type Test func(*testing.T, Sandbox) - -var defaultWorkers []Worker - -func register(w Worker) { - defaultWorkers = append(defaultWorkers, w) -} - -func List() []Worker { - return defaultWorkers -} - -type TestOpt func(*TestConf) - -func WithMatrix(key string, m map[string]interface{}) TestOpt { - return func(tc *TestConf) { - if tc.matrix == nil { - tc.matrix = map[string]map[string]interface{}{} - } - tc.matrix[key] = m - } -} - -func WithMirroredImages(m map[string]string) TestOpt { - return func(tc *TestConf) { - if tc.mirroredImages == nil { - tc.mirroredImages = map[string]string{} - } - for k, v := range m { - tc.mirroredImages[k] = v - } - } -} - -type TestConf struct { - matrix map[string]map[string]interface{} - mirroredImages map[string]string -} - -func Run(t *testing.T, testCases []Test, opt ...TestOpt) { - if testing.Short() { - t.Skip("skipping in short mode") - } - - var tc TestConf - for _, o := range opt { - o(&tc) - } - - mirror, cleanup, err := runMirror(t, tc.mirroredImages) - require.NoError(t, err) - - var mu sync.Mutex - var count int - cleanOnComplete := func() func() { - count++ - return func() { - mu.Lock() - count-- - if count == 0 { - cleanup() - } - mu.Unlock() - } - } - defer cleanOnComplete()() - - matrix := prepareValueMatrix(tc) - - list := List() - if os.Getenv("BUILDKIT_WORKER_RANDOM") == "1" && len(list) > 0 { - rand.Seed(time.Now().UnixNano()) - list = []Worker{list[rand.Intn(len(list))]} - } - - for _, br := range list { - for _, tc := range testCases { - for _, mv := range matrix { - fn := getFunctionName(tc) - name := fn + "/worker=" + br.Name() + mv.functionSuffix() - func(fn, testName string, br Worker, tc Test, mv matrixValue) { - ok := t.Run(testName, func(t *testing.T) { - defer cleanOnComplete()() - if !strings.HasSuffix(fn, "NoParallel") { - t.Parallel() - } - sb, close, err := br.New(WithMirror(mirror), withMatrixValues(mv)) - if err != nil { - if errors.Cause(err) == ErrorRequirements { - t.Skip(err.Error()) - } - require.NoError(t, err) - } - defer func() { - assert.NoError(t, close()) - if t.Failed() { - sb.PrintLogs(t) - } - }() - tc(t, sb) - }) - require.True(t, ok) - }(fn, name, br, tc, mv) - } - } - } -} - -func getFunctionName(i interface{}) string { - fullname := runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name() - dot := strings.LastIndex(fullname, ".") + 1 - return strings.Title(fullname[dot:]) -} - -var localImageCache map[string]map[string]struct{} - -func copyImagesLocal(t *testing.T, host string, images map[string]string) error { - for to, from := range images { - if localImageCache == nil { - localImageCache = map[string]map[string]struct{}{} - } - if _, ok := localImageCache[host]; !ok { - localImageCache[host] = map[string]struct{}{} - } - if _, ok := localImageCache[host][to]; ok { - continue - } - localImageCache[host][to] = struct{}{} - - var desc ocispec.Descriptor - var provider content.Provider - var err error - if strings.HasPrefix(from, "local:") { - var closer func() - desc, provider, closer, err = providerFromBinary(strings.TrimPrefix(from, "local:")) - if err != nil { - return err - } - if closer != nil { - defer closer() - } - } else { - desc, provider, err = contentutil.ProviderFromRef(from) - if err != nil { - return err - } - } - ingester, err := contentutil.IngesterFromRef(host + "/" + to) - if err != nil { - return err - } - if err := contentutil.CopyChain(context.TODO(), ingester, provider, desc); err != nil { - return err - } - t.Logf("copied %s to local mirror %s", from, host+"/"+to) - } - return nil -} - -func OfficialImages(names ...string) map[string]string { - ns := runtime.GOARCH - if ns == "arm64" { - ns = "arm64v8" - } else if ns != "amd64" && ns != "armhf" { - ns = "library" - } - m := map[string]string{} - for _, name := range names { - m["library/"+name] = "docker.io/" + ns + "/" + name - } - return m -} - -func configWithMirror(mirror string) (string, error) { - tmpdir, err := ioutil.TempDir("", "bktest_config") - if err != nil { - return "", err - } - if err := os.Chmod(tmpdir, 0711); err != nil { - return "", err - } - if err := ioutil.WriteFile(filepath.Join(tmpdir, "buildkitd.toml"), []byte(fmt.Sprintf(` -[registry."docker.io"] -mirrors=["%s"] -`, mirror)), 0644); err != nil { - return "", err - } - return tmpdir, nil -} - -func runMirror(t *testing.T, mirroredImages map[string]string) (host string, _ func() error, err error) { - mirrorDir := os.Getenv("BUILDKIT_REGISTRY_MIRROR_DIR") - - var f *os.File - if mirrorDir != "" { - f, err = os.Create(filepath.Join(mirrorDir, "lock")) - if err != nil { - return "", nil, err - } - defer func() { - if err != nil { - f.Close() - } - }() - if err := syscall.Flock(int(f.Fd()), syscall.LOCK_EX); err != nil { - return "", nil, err - } - } - - mirror, cleanup, err := newRegistry(mirrorDir) - if err != nil { - return "", nil, err - } - defer func() { - if err != nil { - cleanup() - } - }() - - if err := copyImagesLocal(t, mirror, mirroredImages); err != nil { - return "", nil, err - } - - if mirrorDir != "" { - if err := syscall.Flock(int(f.Fd()), syscall.LOCK_UN); err != nil { - return "", nil, err - } - } - - return mirror, cleanup, err -} - -type matrixValue struct { - fn []string - values map[string]matrixValueChoice -} - -func (mv matrixValue) functionSuffix() string { - if len(mv.fn) == 0 { - return "" - } - sort.Strings(mv.fn) - sb := &strings.Builder{} - for _, f := range mv.fn { - sb.Write([]byte("/" + f + "=" + mv.values[f].name)) - } - return sb.String() -} - -type matrixValueChoice struct { - name string - value interface{} -} - -func newMatrixValue(key, name string, v interface{}) matrixValue { - return matrixValue{ - fn: []string{key}, - values: map[string]matrixValueChoice{ - key: { - name: name, - value: v, - }, - }, - } -} - -func prepareValueMatrix(tc TestConf) []matrixValue { - m := []matrixValue{} - for featureName, values := range tc.matrix { - current := m - m = []matrixValue{} - for featureValue, v := range values { - if len(current) == 0 { - m = append(m, newMatrixValue(featureName, featureValue, v)) - } - for _, c := range current { - vv := newMatrixValue(featureName, featureValue, v) - vv.fn = append(vv.fn, c.fn...) - for k, v := range c.values { - vv.values[k] = v - } - m = append(m, vv) - } - } - } - if len(m) == 0 { - m = append(m, matrixValue{}) - } - return m -} diff --git a/vendor/github.com/moby/buildkit/util/testutil/integration/util.go b/vendor/github.com/moby/buildkit/util/testutil/integration/util.go deleted file mode 100644 index 136d57aab39b..000000000000 --- a/vendor/github.com/moby/buildkit/util/testutil/integration/util.go +++ /dev/null @@ -1,128 +0,0 @@ -package integration - -import ( - "bytes" - "context" - "net" - "os" - "os/exec" - "strings" - "syscall" - "time" - - "github.com/pkg/errors" - "golang.org/x/sync/errgroup" -) - -func startCmd(cmd *exec.Cmd, logs map[string]*bytes.Buffer) (func() error, error) { - if logs != nil { - b := new(bytes.Buffer) - logs["stdout: "+cmd.Path] = b - cmd.Stdout = b - b = new(bytes.Buffer) - logs["stderr: "+cmd.Path] = b - cmd.Stderr = b - - } - - if err := cmd.Start(); err != nil { - return nil, err - } - eg, ctx := errgroup.WithContext(context.TODO()) - - stopped := make(chan struct{}) - stop := make(chan struct{}) - eg.Go(func() error { - _, err := cmd.Process.Wait() - close(stopped) - select { - case <-stop: - return nil - default: - return err - } - }) - - eg.Go(func() error { - select { - case <-ctx.Done(): - case <-stopped: - case <-stop: - cmd.Process.Signal(syscall.SIGTERM) - go func() { - select { - case <-stopped: - case <-time.After(20 * time.Second): - cmd.Process.Kill() - } - }() - } - return nil - }) - - return func() error { - close(stop) - return eg.Wait() - }, nil -} - -func waitUnix(address string, d time.Duration) error { - address = strings.TrimPrefix(address, "unix://") - addr, err := net.ResolveUnixAddr("unix", address) - if err != nil { - return err - } - - step := 50 * time.Millisecond - i := 0 - for { - if conn, err := net.DialUnix("unix", nil, addr); err == nil { - conn.Close() - break - } - i++ - if time.Duration(i)*step > d { - return errors.Errorf("failed dialing: %s", address) - } - time.Sleep(step) - } - return nil -} - -type multiCloser struct { - fns []func() error -} - -func (mc *multiCloser) F() func() error { - return func() error { - var err error - for i := range mc.fns { - if err1 := mc.fns[len(mc.fns)-1-i](); err == nil { - err = err1 - } - } - mc.fns = nil - return err - } -} - -func (mc *multiCloser) append(f func() error) { - mc.fns = append(mc.fns, f) -} - -var ErrorRequirements = errors.Errorf("missing requirements") - -func lookupBinary(name string) error { - _, err := exec.LookPath(name) - if err != nil { - return errors.Wrapf(ErrorRequirements, "failed to lookup %s binary", name) - } - return nil -} - -func requireRoot() error { - if os.Getuid() != 0 { - return errors.Wrap(ErrorRequirements, "requires root") - } - return nil -} diff --git a/vendor/github.com/moby/buildkit/util/testutil/tar.go b/vendor/github.com/moby/buildkit/util/testutil/tar.go deleted file mode 100644 index e7a9b41fa3b8..000000000000 --- a/vendor/github.com/moby/buildkit/util/testutil/tar.go +++ /dev/null @@ -1,51 +0,0 @@ -package testutil - -import ( - "archive/tar" - "bytes" - "compress/gzip" - "io" - "io/ioutil" - - "github.com/pkg/errors" -) - -type TarItem struct { - Header *tar.Header - Data []byte -} - -func ReadTarToMap(dt []byte, compressed bool) (map[string]*TarItem, error) { - m := map[string]*TarItem{} - var r io.Reader = bytes.NewBuffer(dt) - if compressed { - gz, err := gzip.NewReader(r) - if err != nil { - return nil, errors.Wrapf(err, "error creating gzip reader") - } - defer gz.Close() - r = gz - } - tr := tar.NewReader(r) - for { - h, err := tr.Next() - if err != nil { - if err == io.EOF { - return m, nil - } - return nil, errors.Wrap(err, "error reading tar") - } - if _, ok := m[h.Name]; ok { - return nil, errors.Errorf("duplicate entries for %s", h.Name) - } - - var dt []byte - if h.Typeflag == tar.TypeReg { - dt, err = ioutil.ReadAll(tr) - if err != nil { - return nil, errors.Wrapf(err, "error reading file") - } - } - m[h.Name] = &TarItem{Header: h, Data: dt} - } -} diff --git a/vendor/github.com/moby/buildkit/util/throttle/throttle.go b/vendor/github.com/moby/buildkit/util/throttle/throttle.go deleted file mode 100644 index 490ccd9c3dcd..000000000000 --- a/vendor/github.com/moby/buildkit/util/throttle/throttle.go +++ /dev/null @@ -1,58 +0,0 @@ -package throttle - -import ( - "sync" - "time" -) - -// Throttle wraps a function so that internal function does not get called -// more frequently than the specified duration. -func Throttle(d time.Duration, f func()) func() { - return throttle(d, f, true) -} - -// ThrottleAfter wraps a function so that internal function does not get called -// more frequently than the specified duration. The delay is added after function -// has been called. -func ThrottleAfter(d time.Duration, f func()) func() { - return throttle(d, f, false) -} - -func throttle(d time.Duration, f func(), wait bool) func() { - var next, running bool - var mu sync.Mutex - return func() { - mu.Lock() - defer mu.Unlock() - - next = true - if !running { - running = true - go func() { - for { - mu.Lock() - if next == false { - running = false - mu.Unlock() - return - } - if !wait { - next = false - } - mu.Unlock() - - if wait { - time.Sleep(d) - mu.Lock() - next = false - mu.Unlock() - f() - } else { - f() - time.Sleep(d) - } - } - }() - } - } -} diff --git a/vendor/github.com/moby/buildkit/util/throttle/throttle_test.go b/vendor/github.com/moby/buildkit/util/throttle/throttle_test.go deleted file mode 100644 index a7f32ad9a6f6..000000000000 --- a/vendor/github.com/moby/buildkit/util/throttle/throttle_test.go +++ /dev/null @@ -1,78 +0,0 @@ -package throttle - -import ( - "sync/atomic" - "testing" - "time" - - "github.com/stretchr/testify/require" -) - -func TestThrottle(t *testing.T) { - t.Parallel() - - var i int64 - f := func() { - atomic.AddInt64(&i, 1) - } - - f = Throttle(50*time.Millisecond, f) - - f() - f() - - require.Equal(t, int64(0), atomic.LoadInt64(&i)) - - // test that i is never incremented twice and at least once in next 600ms - retries := 0 - for { - require.True(t, retries < 10) - time.Sleep(60 * time.Millisecond) - v := atomic.LoadInt64(&i) - if v > 1 { - require.Fail(t, "invalid value %d", v) - } - if v == 1 { - break - } - retries++ - } - - require.Equal(t, int64(1), atomic.LoadInt64(&i)) - - f() - - retries = 0 - for { - require.True(t, retries < 10) - time.Sleep(60 * time.Millisecond) - v := atomic.LoadInt64(&i) - if v == 2 { - break - } - retries++ - } - -} - -func TestThrottleAfter(t *testing.T) { - t.Parallel() - - var i int64 - f := func() { - atomic.AddInt64(&i, 1) - } - - f = ThrottleAfter(100*time.Millisecond, f) - - f() - - time.Sleep(10 * time.Millisecond) - require.Equal(t, int64(1), atomic.LoadInt64(&i)) - f() - time.Sleep(10 * time.Millisecond) - require.Equal(t, int64(1), atomic.LoadInt64(&i)) - - time.Sleep(200 * time.Millisecond) - require.Equal(t, int64(2), atomic.LoadInt64(&i)) -} diff --git a/vendor/github.com/moby/buildkit/util/tracing/multispan.go b/vendor/github.com/moby/buildkit/util/tracing/multispan.go deleted file mode 100644 index 2b157a1de3cf..000000000000 --- a/vendor/github.com/moby/buildkit/util/tracing/multispan.go +++ /dev/null @@ -1,22 +0,0 @@ -package tracing - -import ( - opentracing "github.com/opentracing/opentracing-go" -) - -// MultiSpan allows shared tracing to multiple spans. -// TODO: This is a temporary solution and doesn't really support shared tracing yet. Instead the first always wins. - -type MultiSpan struct { - opentracing.Span -} - -func NewMultiSpan() *MultiSpan { - return &MultiSpan{} -} - -func (ms *MultiSpan) Add(s opentracing.Span) { - if ms.Span == nil { - ms.Span = s - } -} diff --git a/vendor/github.com/moby/buildkit/util/tracing/tracing.go b/vendor/github.com/moby/buildkit/util/tracing/tracing.go deleted file mode 100644 index 6af2b8c55e39..000000000000 --- a/vendor/github.com/moby/buildkit/util/tracing/tracing.go +++ /dev/null @@ -1,109 +0,0 @@ -package tracing - -import ( - "context" - "fmt" - "io" - "net/http" - - "github.com/opentracing-contrib/go-stdlib/nethttp" - opentracing "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/ext" - "github.com/opentracing/opentracing-go/log" -) - -// StartSpan starts a new span as a child of the span in context. -// If there is no span in context then this is a no-op. -// The difference from opentracing.StartSpanFromContext is that this method -// does not depend on global tracer. -func StartSpan(ctx context.Context, operationName string, opts ...opentracing.StartSpanOption) (opentracing.Span, context.Context) { - parent := opentracing.SpanFromContext(ctx) - tracer := opentracing.Tracer(&opentracing.NoopTracer{}) - if parent != nil { - tracer = parent.Tracer() - opts = append(opts, opentracing.ChildOf(parent.Context())) - } - span := tracer.StartSpan(operationName, opts...) - if parent != nil { - return span, opentracing.ContextWithSpan(ctx, span) - } - return span, ctx -} - -// FinishWithError finalizes the span and sets the error if one is passed -func FinishWithError(span opentracing.Span, err error) { - if err != nil { - fields := []log.Field{ - log.String("event", "error"), - log.String("message", err.Error()), - } - if _, ok := err.(interface { - Cause() error - }); ok { - fields = append(fields, log.String("stack", fmt.Sprintf("%+v", err))) - } - span.LogFields(fields...) - ext.Error.Set(span, true) - } - span.Finish() -} - -// ContextWithSpanFromContext sets the tracing span of a context from other -// context if one is not already set. Alternative would be -// context.WithoutCancel() that would copy the context but reset ctx.Done -func ContextWithSpanFromContext(ctx, ctx2 context.Context) context.Context { - // if already is a span then noop - if span := opentracing.SpanFromContext(ctx); span != nil { - return ctx - } - if span := opentracing.SpanFromContext(ctx2); span != nil { - return opentracing.ContextWithSpan(ctx, span) - } - return ctx -} - -var DefaultTransport http.RoundTripper = &Transport{ - RoundTripper: &nethttp.Transport{RoundTripper: http.DefaultTransport}, -} - -var DefaultClient = &http.Client{ - Transport: DefaultTransport, -} - -type Transport struct { - http.RoundTripper -} - -func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { - span := opentracing.SpanFromContext(req.Context()) - if span == nil { // no tracer connected with either request or transport - return t.RoundTripper.RoundTrip(req) - } - - req, tracer := nethttp.TraceRequest(span.Tracer(), req) - - resp, err := t.RoundTripper.RoundTrip(req) - if err != nil { - tracer.Finish() - return resp, err - } - - if req.Method == "HEAD" { - tracer.Finish() - } else { - resp.Body = closeTracker{resp.Body, tracer.Finish} - } - - return resp, err -} - -type closeTracker struct { - io.ReadCloser - finish func() -} - -func (c closeTracker) Close() error { - err := c.ReadCloser.Close() - c.finish() - return err -} diff --git a/vendor/github.com/moby/buildkit/util/winlayers/applier.go b/vendor/github.com/moby/buildkit/util/winlayers/applier.go deleted file mode 100644 index 25581b33d3d2..000000000000 --- a/vendor/github.com/moby/buildkit/util/winlayers/applier.go +++ /dev/null @@ -1,187 +0,0 @@ -package winlayers - -import ( - "archive/tar" - "context" - "io" - "io/ioutil" - "runtime" - "strings" - "sync" - - "github.com/containerd/containerd/archive" - "github.com/containerd/containerd/archive/compression" - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/diff" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/images" - "github.com/containerd/containerd/mount" - digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -func NewFileSystemApplierWithWindows(cs content.Provider, a diff.Applier) diff.Applier { - if runtime.GOOS == "windows" { - return a - } - - return &winApplier{ - cs: cs, - a: a, - } -} - -type winApplier struct { - cs content.Provider - a diff.Applier -} - -func (s *winApplier) Apply(ctx context.Context, desc ocispec.Descriptor, mounts []mount.Mount) (d ocispec.Descriptor, err error) { - if !hasWindowsLayerMode(ctx) { - return s.a.Apply(ctx, desc, mounts) - } - - isCompressed, err := images.IsCompressedDiff(ctx, desc.MediaType) - if err != nil { - return ocispec.Descriptor{}, errors.Wrapf(errdefs.ErrNotImplemented, "unsupported diff media type: %v", desc.MediaType) - } - - var ocidesc ocispec.Descriptor - if err := mount.WithTempMount(ctx, mounts, func(root string) error { - ra, err := s.cs.ReaderAt(ctx, desc) - if err != nil { - return errors.Wrap(err, "failed to get reader from content store") - } - defer ra.Close() - - r := content.NewReader(ra) - if isCompressed { - ds, err := compression.DecompressStream(r) - if err != nil { - return err - } - defer ds.Close() - r = ds - } - - digester := digest.Canonical.Digester() - rc := &readCounter{ - r: io.TeeReader(r, digester.Hash()), - } - - rc2, discard := filter(rc, func(hdr *tar.Header) bool { - if strings.HasPrefix(hdr.Name, "Files/") { - hdr.Name = strings.TrimPrefix(hdr.Name, "Files/") - hdr.Linkname = strings.TrimPrefix(hdr.Linkname, "Files/") - // TODO: could convert the windows PAX headers to xattr here to reuse - // the original ones in diff for parent directories and file modifications - return true - } - return false - }) - - if _, err := archive.Apply(ctx, root, rc2); err != nil { - discard(err) - return err - } - - // Read any trailing data - if _, err := io.Copy(ioutil.Discard, rc); err != nil { - discard(err) - return err - } - - ocidesc = ocispec.Descriptor{ - MediaType: ocispec.MediaTypeImageLayer, - Size: rc.c, - Digest: digester.Digest(), - } - return nil - - }); err != nil { - return ocispec.Descriptor{}, err - } - return ocidesc, nil -} - -type readCounter struct { - r io.Reader - c int64 -} - -func (rc *readCounter) Read(p []byte) (n int, err error) { - n, err = rc.r.Read(p) - rc.c += int64(n) - return -} - -func filter(in io.Reader, f func(*tar.Header) bool) (io.Reader, func(error)) { - pr, pw := io.Pipe() - - rc := &readCanceler{Reader: in} - - go func() { - tarReader := tar.NewReader(rc) - tarWriter := tar.NewWriter(pw) - - pw.CloseWithError(func() error { - for { - h, err := tarReader.Next() - if err == io.EOF { - break - } - if err != nil { - return err - } - if f(h) { - if err := tarWriter.WriteHeader(h); err != nil { - return err - } - if h.Size > 0 { - if _, err := io.Copy(tarWriter, tarReader); err != nil { - return err - } - } - } else { - if h.Size > 0 { - if _, err := io.Copy(ioutil.Discard, tarReader); err != nil { - return err - } - } - } - } - return tarWriter.Close() - }()) - }() - - discard := func(err error) { - rc.cancel(err) - pw.CloseWithError(err) - } - - return pr, discard -} - -type readCanceler struct { - mu sync.Mutex - io.Reader - err error -} - -func (r *readCanceler) Read(b []byte) (int, error) { - r.mu.Lock() - if r.err != nil { - r.mu.Unlock() - return 0, r.err - } - n, err := r.Reader.Read(b) - r.mu.Unlock() - return n, err -} - -func (r *readCanceler) cancel(err error) { - r.mu.Lock() - r.err = err - r.mu.Unlock() -} diff --git a/vendor/github.com/moby/buildkit/util/winlayers/context.go b/vendor/github.com/moby/buildkit/util/winlayers/context.go deleted file mode 100644 index c0bd3f8a2f06..000000000000 --- a/vendor/github.com/moby/buildkit/util/winlayers/context.go +++ /dev/null @@ -1,19 +0,0 @@ -package winlayers - -import "context" - -type contextKeyT string - -var contextKey = contextKeyT("buildkit/winlayers-on") - -func UseWindowsLayerMode(ctx context.Context) context.Context { - return context.WithValue(ctx, contextKey, true) -} - -func hasWindowsLayerMode(ctx context.Context) bool { - v := ctx.Value(contextKey) - if v == nil { - return false - } - return true -} diff --git a/vendor/github.com/moby/buildkit/util/winlayers/differ.go b/vendor/github.com/moby/buildkit/util/winlayers/differ.go deleted file mode 100644 index cdbc335d49d9..000000000000 --- a/vendor/github.com/moby/buildkit/util/winlayers/differ.go +++ /dev/null @@ -1,274 +0,0 @@ -package winlayers - -import ( - "archive/tar" - "context" - "crypto/rand" - "encoding/base64" - "fmt" - "io" - "time" - - "github.com/containerd/containerd/archive" - "github.com/containerd/containerd/archive/compression" - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/diff" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/log" - "github.com/containerd/containerd/mount" - digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -const ( - keyFileAttr = "MSWINDOWS.fileattr" - keySDRaw = "MSWINDOWS.rawsd" - keyCreationTime = "LIBARCHIVE.creationtime" -) - -func NewWalkingDiffWithWindows(store content.Store, d diff.Comparer) diff.Comparer { - return &winDiffer{ - store: store, - d: d, - } -} - -var emptyDesc = ocispec.Descriptor{} - -type winDiffer struct { - store content.Store - d diff.Comparer -} - -// Compare creates a diff between the given mounts and uploads the result -// to the content store. -func (s *winDiffer) Compare(ctx context.Context, lower, upper []mount.Mount, opts ...diff.Opt) (d ocispec.Descriptor, err error) { - if !hasWindowsLayerMode(ctx) { - return s.d.Compare(ctx, lower, upper, opts...) - } - - var config diff.Config - for _, opt := range opts { - if err := opt(&config); err != nil { - return emptyDesc, err - } - } - - if config.MediaType == "" { - config.MediaType = ocispec.MediaTypeImageLayerGzip - } - - var isCompressed bool - switch config.MediaType { - case ocispec.MediaTypeImageLayer: - case ocispec.MediaTypeImageLayerGzip: - isCompressed = true - default: - return emptyDesc, errors.Wrapf(errdefs.ErrNotImplemented, "unsupported diff media type: %v", config.MediaType) - } - - var ocidesc ocispec.Descriptor - if err := mount.WithTempMount(ctx, lower, func(lowerRoot string) error { - return mount.WithTempMount(ctx, upper, func(upperRoot string) error { - var newReference bool - if config.Reference == "" { - newReference = true - config.Reference = uniqueRef() - } - - cw, err := s.store.Writer(ctx, - content.WithRef(config.Reference), - content.WithDescriptor(ocispec.Descriptor{ - MediaType: config.MediaType, // most contentstore implementations just ignore this - })) - if err != nil { - return errors.Wrap(err, "failed to open writer") - } - defer func() { - if err != nil { - cw.Close() - if newReference { - if err := s.store.Abort(ctx, config.Reference); err != nil { - log.G(ctx).WithField("ref", config.Reference).Warnf("failed to delete diff upload") - } - } - } - }() - if !newReference { - if err := cw.Truncate(0); err != nil { - return err - } - } - - if isCompressed { - dgstr := digest.SHA256.Digester() - compressed, err := compression.CompressStream(cw, compression.Gzip) - if err != nil { - return errors.Wrap(err, "failed to get compressed stream") - } - var w io.Writer = io.MultiWriter(compressed, dgstr.Hash()) - w, discard, done := makeWindowsLayer(w) - err = archive.WriteDiff(ctx, w, lowerRoot, upperRoot) - if err != nil { - discard(err) - } - <-done - compressed.Close() - if err != nil { - return errors.Wrap(err, "failed to write compressed diff") - } - - if config.Labels == nil { - config.Labels = map[string]string{} - } - config.Labels["containerd.io/uncompressed"] = dgstr.Digest().String() - } else { - w, discard, done := makeWindowsLayer(cw) - if err = archive.WriteDiff(ctx, w, lowerRoot, upperRoot); err != nil { - discard(err) - return errors.Wrap(err, "failed to write diff") - } - <-done - } - - var commitopts []content.Opt - if config.Labels != nil { - commitopts = append(commitopts, content.WithLabels(config.Labels)) - } - - dgst := cw.Digest() - if err := cw.Commit(ctx, 0, dgst, commitopts...); err != nil { - return errors.Wrap(err, "failed to commit") - } - - info, err := s.store.Info(ctx, dgst) - if err != nil { - return errors.Wrap(err, "failed to get info from content store") - } - - ocidesc = ocispec.Descriptor{ - MediaType: config.MediaType, - Size: info.Size, - Digest: info.Digest, - } - return nil - }) - }); err != nil { - return emptyDesc, err - } - - return ocidesc, nil -} - -func uniqueRef() string { - t := time.Now() - var b [3]byte - // Ignore read failures, just decreases uniqueness - rand.Read(b[:]) - return fmt.Sprintf("%d-%s", t.UnixNano(), base64.URLEncoding.EncodeToString(b[:])) -} - -func prepareWinHeader(h *tar.Header) { - if h.PAXRecords == nil { - h.PAXRecords = map[string]string{} - } - if h.Typeflag == tar.TypeDir { - h.Mode |= 1 << 14 - h.PAXRecords[keyFileAttr] = "16" - } - - if h.Typeflag == tar.TypeReg { - h.Mode |= 1 << 15 - h.PAXRecords[keyFileAttr] = "32" - } - - if !h.ModTime.IsZero() { - h.PAXRecords[keyCreationTime] = fmt.Sprintf("%d.%d", h.ModTime.Unix(), h.ModTime.Nanosecond()) - } - - h.Format = tar.FormatPAX -} - -func addSecurityDescriptor(h *tar.Header) { - if h.Typeflag == tar.TypeDir { - // O:BAG:SYD:(A;OICI;FA;;;BA)(A;OICI;FA;;;SY)(A;;FA;;;BA)(A;OICIIO;GA;;;CO)(A;OICI;0x1200a9;;;BU)(A;CI;LC;;;BU)(A;CI;DC;;;BU) - h.PAXRecords[keySDRaw] = "AQAEgBQAAAAkAAAAAAAAADAAAAABAgAAAAAABSAAAAAgAgAAAQEAAAAAAAUSAAAAAgCoAAcAAAAAAxgA/wEfAAECAAAAAAAFIAAAACACAAAAAxQA/wEfAAEBAAAAAAAFEgAAAAAAGAD/AR8AAQIAAAAAAAUgAAAAIAIAAAALFAAAAAAQAQEAAAAAAAMAAAAAAAMYAKkAEgABAgAAAAAABSAAAAAhAgAAAAIYAAQAAAABAgAAAAAABSAAAAAhAgAAAAIYAAIAAAABAgAAAAAABSAAAAAhAgAA" - } - - if h.Typeflag == tar.TypeReg { - // O:BAG:SYD:(A;;FA;;;BA)(A;;FA;;;SY)(A;;0x1200a9;;;BU) - h.PAXRecords[keySDRaw] = "AQAEgBQAAAAkAAAAAAAAADAAAAABAgAAAAAABSAAAAAgAgAAAQEAAAAAAAUSAAAAAgBMAAMAAAAAABgA/wEfAAECAAAAAAAFIAAAACACAAAAABQA/wEfAAEBAAAAAAAFEgAAAAAAGACpABIAAQIAAAAAAAUgAAAAIQIAAA==" - } -} - -func makeWindowsLayer(w io.Writer) (io.Writer, func(error), chan error) { - pr, pw := io.Pipe() - done := make(chan error) - - go func() { - tarReader := tar.NewReader(pr) - tarWriter := tar.NewWriter(w) - - err := func() error { - - h := &tar.Header{ - Name: "Hives", - Typeflag: tar.TypeDir, - ModTime: time.Now(), - } - prepareWinHeader(h) - if err := tarWriter.WriteHeader(h); err != nil { - return err - } - - h = &tar.Header{ - Name: "Files", - Typeflag: tar.TypeDir, - ModTime: time.Now(), - } - prepareWinHeader(h) - if err := tarWriter.WriteHeader(h); err != nil { - return err - } - - for { - h, err := tarReader.Next() - if err == io.EOF { - break - } - if err != nil { - return err - } - h.Name = "Files/" + h.Name - if h.Linkname != "" { - h.Linkname = "Files/" + h.Linkname - } - prepareWinHeader(h) - addSecurityDescriptor(h) - if err := tarWriter.WriteHeader(h); err != nil { - return err - } - if h.Size > 0 { - if _, err := io.Copy(tarWriter, tarReader); err != nil { - return err - } - } - } - return tarWriter.Close() - }() - if err != nil { - logrus.Errorf("makeWindowsLayer %+v", err) - } - pw.CloseWithError(err) - done <- err - return - }() - - discard := func(err error) { - pw.CloseWithError(err) - } - - return pw, discard, done -} diff --git a/vendor/github.com/moby/buildkit/vendor.conf b/vendor/github.com/moby/buildkit/vendor.conf deleted file mode 100644 index 070bff650925..000000000000 --- a/vendor/github.com/moby/buildkit/vendor.conf +++ /dev/null @@ -1,69 +0,0 @@ -github.com/pkg/errors v0.8.0 -go.etcd.io/bbolt v1.3.1-etcd.8 - -github.com/stretchr/testify v1.1.4 -github.com/davecgh/go-spew v1.1.0 -github.com/pmezard/go-difflib v1.0.0 -golang.org/x/sys 1b2967e3c290b7c545b3db0deeda16e9be4f98a2 - -github.com/containerd/containerd 1a5f9a3434ac53c0e9d27093ecc588e0c281c333 -github.com/containerd/typeurl a93fcdb778cd272c6e9b3028b2f42d813e785d40 -golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c -github.com/sirupsen/logrus v1.0.0 -google.golang.org/grpc v1.12.0 -github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7 -golang.org/x/net 0ed95abb35c445290478a5348a7b38bb154135fd -github.com/gogo/protobuf v1.0.0 -github.com/gogo/googleapis b23578765ee54ff6bceff57f397d833bf4ca6869 -github.com/golang/protobuf v1.1.0 -github.com/containerd/continuity bd77b46c8352f74eb12c85bdc01f4b90f69d66b4 -github.com/opencontainers/image-spec v1.0.1 -github.com/opencontainers/runc a00bf0190895aa465a5fbed0268888e2c8ddfe85 -github.com/Microsoft/go-winio v0.4.11 -github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c -github.com/opencontainers/runtime-spec eba862dc2470385a233c7507392675cbeadf7353 # v1.0.1-45-geba862d -github.com/containerd/go-runc 5a6d9f37cfa36b15efba46dc7ea349fa9b7143c3 -github.com/containerd/console c12b1e7919c14469339a5d38f2f8ed9b64a9de23 -google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944 -golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4 -github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9 -github.com/syndtr/gocapability db04d3cc01c8b54962a58ec7e491717d06cfcc16 -github.com/Microsoft/hcsshim v0.7.9 -golang.org/x/crypto 0709b304e793a5edb4a2c0145f281ecdc20838a4 -github.com/containerd/cri 8506fe836677cc3bb23a16b68145128243d843b5 # release/1.2 branch - -github.com/urfave/cli 7bc6a0acffa589f415f88aca16cc1de5ffd66f9c -github.com/morikuni/aec 39771216ff4c63d11f5e604076f9c45e8be1067b -github.com/docker/go-units v0.3.1 -github.com/google/shlex 6f45313302b9c56850fc17f99e40caebce98c716 -golang.org/x/time f51c12702a4d776e4c1fa9b0fabab841babae631 - -github.com/docker/docker 71cd53e4a197b303c6ba086bd584ffd67a884281 -github.com/pkg/profile 5b67d428864e92711fcbd2f8629456121a56d91f - -github.com/tonistiigi/fsutil 2862f6bc5ac9b97124e552a5c108230b38a1b0ca -github.com/hashicorp/go-immutable-radix 826af9ccf0feeee615d546d69b11f8e98da8c8f1 git://github.com/tonistiigi/go-immutable-radix.git -github.com/hashicorp/golang-lru a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4 -github.com/mitchellh/hashstructure 2bca23e0e452137f789efbc8610126fd8b94f73b -github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d -github.com/docker/distribution 30578ca32960a4d368bf6db67b0a33c2a1f3dc6f - -github.com/tonistiigi/units 6950e57a87eaf136bbe44ef2ec8e75b9e3569de2 -github.com/docker/cli 99576756eb3303b7af8102c502f21a912e3c1af6 https://github.com/tonistiigi/docker-cli.git -github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1 -github.com/docker/libnetwork 36d3bed0e9f4b3c8c66df9bd45278bb90b33e911 -github.com/BurntSushi/toml 3012a1dbe2e4bd1391d42b32f0577cb7bbc7f005 -github.com/ishidawataru/sctp 07191f837fedd2f13d1ec7b5f885f0f3ec54b1cb - -github.com/grpc-ecosystem/grpc-opentracing 8e809c8a86450a29b90dcc9efbf062d0fe6d9746 -github.com/opentracing/opentracing-go 1361b9cd60be79c4c3a7fa9841b3c132e40066a7 -github.com/uber/jaeger-client-go e02c85f9069ea625a96fc4e1afb5e9ac6c569a6d -github.com/apache/thrift b2a4d4ae21c789b689dd162deb819665567f481c -github.com/uber/jaeger-lib c48167d9cae5887393dd5e61efd06a4a48b7fbb3 -github.com/codahale/hdrhistogram f8ad88b59a584afeee9d334eff879b104439117b - -github.com/opentracing-contrib/go-stdlib b1a47cfbdd7543e70e9ef3e73d0802ad306cc1cc - -# used by dockerfile tests -gotest.tools v2.1.0 -github.com/google/go-cmp v0.2.0 diff --git a/vendor/github.com/moby/buildkit/version/version.go b/vendor/github.com/moby/buildkit/version/version.go deleted file mode 100644 index 9a7de98c2166..000000000000 --- a/vendor/github.com/moby/buildkit/version/version.go +++ /dev/null @@ -1,30 +0,0 @@ -/* - Copyright The BuildKit Authors. - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package version - -var ( - // Package is filled at linking time - Package = "github.com/moby/buildkit" - - // Version holds the complete version number. Filled in at linking time. - Version = "0.0.0+unknown" - - // Revision is filled with the VCS (e.g. git) revision being used to build - // the program at linking time. - Revision = "" -) diff --git a/vendor/github.com/moby/buildkit/worker/base/worker.go b/vendor/github.com/moby/buildkit/worker/base/worker.go deleted file mode 100644 index 2c666ef16d0d..000000000000 --- a/vendor/github.com/moby/buildkit/worker/base/worker.go +++ /dev/null @@ -1,481 +0,0 @@ -package base - -import ( - "context" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "time" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/diff" - "github.com/containerd/containerd/images" - "github.com/containerd/containerd/rootfs" - cdsnapshot "github.com/containerd/containerd/snapshots" - "github.com/moby/buildkit/cache" - "github.com/moby/buildkit/cache/blobs" - "github.com/moby/buildkit/cache/metadata" - "github.com/moby/buildkit/client" - "github.com/moby/buildkit/executor" - "github.com/moby/buildkit/exporter" - imageexporter "github.com/moby/buildkit/exporter/containerimage" - localexporter "github.com/moby/buildkit/exporter/local" - ociexporter "github.com/moby/buildkit/exporter/oci" - "github.com/moby/buildkit/frontend" - gw "github.com/moby/buildkit/frontend/gateway/client" - "github.com/moby/buildkit/identity" - "github.com/moby/buildkit/session" - "github.com/moby/buildkit/snapshot" - "github.com/moby/buildkit/snapshot/imagerefchecker" - "github.com/moby/buildkit/solver" - "github.com/moby/buildkit/solver/llbsolver/ops" - "github.com/moby/buildkit/solver/pb" - "github.com/moby/buildkit/source" - "github.com/moby/buildkit/source/containerimage" - "github.com/moby/buildkit/source/git" - "github.com/moby/buildkit/source/http" - "github.com/moby/buildkit/source/local" - "github.com/moby/buildkit/util/contentutil" - "github.com/moby/buildkit/util/progress" - "github.com/moby/buildkit/util/resolver" - "github.com/moby/buildkit/worker" - digest "github.com/opencontainers/go-digest" - ociidentity "github.com/opencontainers/image-spec/identity" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - specs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "golang.org/x/sync/errgroup" -) - -const labelCreatedAt = "buildkit/createdat" - -// TODO: this file should be removed. containerd defines ContainerdWorker, oci defines OCIWorker. There is no base worker. - -// WorkerOpt is specific to a worker. -// See also CommonOpt. -type WorkerOpt struct { - ID string - Labels map[string]string - Platforms []specs.Platform - GCPolicy []client.PruneInfo - SessionManager *session.Manager - MetadataStore *metadata.Store - Executor executor.Executor - Snapshotter snapshot.Snapshotter - ContentStore content.Store - Applier diff.Applier - Differ diff.Comparer - ImageStore images.Store // optional - ResolveOptionsFunc resolver.ResolveOptionsFunc -} - -// Worker is a local worker instance with dedicated snapshotter, cache, and so on. -// TODO: s/Worker/OpWorker/g ? -type Worker struct { - WorkerOpt - CacheManager cache.Manager - SourceManager *source.Manager - Exporters map[string]exporter.Exporter - ImageSource source.Source -} - -// NewWorker instantiates a local worker -func NewWorker(opt WorkerOpt) (*Worker, error) { - imageRefChecker := imagerefchecker.New(imagerefchecker.Opt{ - ImageStore: opt.ImageStore, - Snapshotter: opt.Snapshotter, - ContentStore: opt.ContentStore, - }) - - cm, err := cache.NewManager(cache.ManagerOpt{ - Snapshotter: opt.Snapshotter, - MetadataStore: opt.MetadataStore, - PruneRefChecker: imageRefChecker, - }) - if err != nil { - return nil, err - } - - sm, err := source.NewManager() - if err != nil { - return nil, err - } - - is, err := containerimage.NewSource(containerimage.SourceOpt{ - Snapshotter: opt.Snapshotter, - ContentStore: opt.ContentStore, - SessionManager: opt.SessionManager, - Applier: opt.Applier, - ImageStore: opt.ImageStore, - CacheAccessor: cm, - ResolverOpt: opt.ResolveOptionsFunc, - }) - if err != nil { - return nil, err - } - - sm.Register(is) - - gs, err := git.NewSource(git.Opt{ - CacheAccessor: cm, - MetadataStore: opt.MetadataStore, - }) - if err != nil { - return nil, err - } - - sm.Register(gs) - - hs, err := http.NewSource(http.Opt{ - CacheAccessor: cm, - MetadataStore: opt.MetadataStore, - }) - if err != nil { - return nil, err - } - - sm.Register(hs) - - ss, err := local.NewSource(local.Opt{ - SessionManager: opt.SessionManager, - CacheAccessor: cm, - MetadataStore: opt.MetadataStore, - }) - if err != nil { - return nil, err - } - sm.Register(ss) - - exporters := map[string]exporter.Exporter{} - - iw, err := imageexporter.NewImageWriter(imageexporter.WriterOpt{ - Snapshotter: opt.Snapshotter, - ContentStore: opt.ContentStore, - Differ: opt.Differ, - }) - if err != nil { - return nil, err - } - - imageExporter, err := imageexporter.New(imageexporter.Opt{ - Images: opt.ImageStore, - SessionManager: opt.SessionManager, - ImageWriter: iw, - ResolverOpt: opt.ResolveOptionsFunc, - }) - if err != nil { - return nil, err - } - exporters[client.ExporterImage] = imageExporter - - localExporter, err := localexporter.New(localexporter.Opt{ - SessionManager: opt.SessionManager, - }) - if err != nil { - return nil, err - } - exporters[client.ExporterLocal] = localExporter - - ociExporter, err := ociexporter.New(ociexporter.Opt{ - SessionManager: opt.SessionManager, - ImageWriter: iw, - Variant: ociexporter.VariantOCI, - }) - if err != nil { - return nil, err - } - exporters[client.ExporterOCI] = ociExporter - - dockerExporter, err := ociexporter.New(ociexporter.Opt{ - SessionManager: opt.SessionManager, - ImageWriter: iw, - Variant: ociexporter.VariantDocker, - }) - if err != nil { - return nil, err - } - exporters[client.ExporterDocker] = dockerExporter - - return &Worker{ - WorkerOpt: opt, - CacheManager: cm, - SourceManager: sm, - Exporters: exporters, - ImageSource: is, - }, nil -} - -func (w *Worker) ID() string { - return w.WorkerOpt.ID -} - -func (w *Worker) Labels() map[string]string { - return w.WorkerOpt.Labels -} - -func (w *Worker) Platforms() []specs.Platform { - return w.WorkerOpt.Platforms -} - -func (w *Worker) GCPolicy() []client.PruneInfo { - return w.WorkerOpt.GCPolicy -} - -func (w *Worker) LoadRef(id string, hidden bool) (cache.ImmutableRef, error) { - var opts []cache.RefOption - if hidden { - opts = append(opts, cache.NoUpdateLastUsed) - } - return w.CacheManager.Get(context.TODO(), id, opts...) -} - -func (w *Worker) ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge) (solver.Op, error) { - if baseOp, ok := v.Sys().(*pb.Op); ok { - switch op := baseOp.Op.(type) { - case *pb.Op_Source: - return ops.NewSourceOp(v, op, baseOp.Platform, w.SourceManager, w) - case *pb.Op_Exec: - return ops.NewExecOp(v, op, w.CacheManager, w.SessionManager, w.MetadataStore, w.Executor, w) - case *pb.Op_Build: - return ops.NewBuildOp(v, op, s, w) - } - } - return nil, errors.Errorf("could not resolve %v", v) -} - -func (w *Worker) ResolveImageConfig(ctx context.Context, ref string, opt gw.ResolveImageConfigOpt) (digest.Digest, []byte, error) { - // ImageSource is typically source/containerimage - resolveImageConfig, ok := w.ImageSource.(resolveImageConfig) - if !ok { - return "", nil, errors.Errorf("worker %q does not implement ResolveImageConfig", w.ID()) - } - return resolveImageConfig.ResolveImageConfig(ctx, ref, opt) -} - -type resolveImageConfig interface { - ResolveImageConfig(ctx context.Context, ref string, opt gw.ResolveImageConfigOpt) (digest.Digest, []byte, error) -} - -func (w *Worker) Exec(ctx context.Context, meta executor.Meta, rootFS cache.ImmutableRef, stdin io.ReadCloser, stdout, stderr io.WriteCloser) error { - active, err := w.CacheManager.New(ctx, rootFS) - if err != nil { - return err - } - defer active.Release(context.TODO()) - return w.Executor.Exec(ctx, meta, active, nil, stdin, stdout, stderr) -} - -func (w *Worker) DiskUsage(ctx context.Context, opt client.DiskUsageInfo) ([]*client.UsageInfo, error) { - return w.CacheManager.DiskUsage(ctx, opt) -} - -func (w *Worker) Prune(ctx context.Context, ch chan client.UsageInfo, opt ...client.PruneInfo) error { - return w.CacheManager.Prune(ctx, ch, opt...) -} - -func (w *Worker) Exporter(name string) (exporter.Exporter, error) { - exp, ok := w.Exporters[name] - if !ok { - return nil, errors.Errorf("exporter %q could not be found", name) - } - return exp, nil -} - -func (w *Worker) GetRemote(ctx context.Context, ref cache.ImmutableRef, createIfNeeded bool) (*solver.Remote, error) { - diffPairs, err := blobs.GetDiffPairs(ctx, w.ContentStore, w.Snapshotter, w.Differ, ref, createIfNeeded) - if err != nil { - return nil, errors.Wrap(err, "failed calculating diff pairs for exported snapshot") - } - if len(diffPairs) == 0 { - return nil, nil - } - - createdTimes := getCreatedTimes(ref) - if len(createdTimes) != len(diffPairs) { - return nil, errors.Errorf("invalid createdtimes/diffpairs") - } - - descs := make([]ocispec.Descriptor, len(diffPairs)) - - for i, dp := range diffPairs { - info, err := w.ContentStore.Info(ctx, dp.Blobsum) - if err != nil { - return nil, err - } - - tm, err := createdTimes[i].MarshalText() - if err != nil { - return nil, err - } - - descs[i] = ocispec.Descriptor{ - Digest: dp.Blobsum, - Size: info.Size, - MediaType: images.MediaTypeDockerSchema2LayerGzip, - Annotations: map[string]string{ - "containerd.io/uncompressed": dp.DiffID.String(), - labelCreatedAt: string(tm), - }, - } - } - - return &solver.Remote{ - Descriptors: descs, - Provider: w.ContentStore, - }, nil -} - -func getCreatedTimes(ref cache.ImmutableRef) (out []time.Time) { - parent := ref.Parent() - if parent != nil { - defer parent.Release(context.TODO()) - out = getCreatedTimes(parent) - } - return append(out, cache.GetCreatedAt(ref.Metadata())) -} - -func (w *Worker) FromRemote(ctx context.Context, remote *solver.Remote) (cache.ImmutableRef, error) { - eg, gctx := errgroup.WithContext(ctx) - for _, desc := range remote.Descriptors { - func(desc ocispec.Descriptor) { - eg.Go(func() error { - done := oneOffProgress(ctx, fmt.Sprintf("pulling %s", desc.Digest)) - return done(contentutil.Copy(gctx, w.ContentStore, remote.Provider, desc)) - }) - }(desc) - } - - if err := eg.Wait(); err != nil { - return nil, err - } - - cs, release := snapshot.NewContainerdSnapshotter(w.Snapshotter) - defer release() - - unpackProgressDone := oneOffProgress(ctx, "unpacking") - chainIDs, err := w.unpack(ctx, remote.Descriptors, cs) - if err != nil { - return nil, unpackProgressDone(err) - } - unpackProgressDone(nil) - - for i, chainID := range chainIDs { - tm := time.Now() - if tmstr, ok := remote.Descriptors[i].Annotations[labelCreatedAt]; ok { - if err := (&tm).UnmarshalText([]byte(tmstr)); err != nil { - return nil, err - } - } - ref, err := w.CacheManager.Get(ctx, chainID, cache.WithDescription(fmt.Sprintf("imported %s", remote.Descriptors[i].Digest)), cache.WithCreationTime(tm)) - if err != nil { - return nil, err - } - if i == len(remote.Descriptors)-1 { - return ref, nil - } - ref.Release(context.TODO()) - } - return nil, errors.Errorf("unreachable") -} - -func (w *Worker) unpack(ctx context.Context, descs []ocispec.Descriptor, s cdsnapshot.Snapshotter) ([]string, error) { - layers, err := getLayers(ctx, descs) - if err != nil { - return nil, err - } - - var chain []digest.Digest - for _, layer := range layers { - if _, err := rootfs.ApplyLayer(ctx, layer, chain, s, w.Applier); err != nil { - return nil, err - } - chain = append(chain, layer.Diff.Digest) - - chainID := ociidentity.ChainID(chain) - if err := w.Snapshotter.SetBlob(ctx, string(chainID), layer.Diff.Digest, layer.Blob.Digest); err != nil { - return nil, err - } - } - - ids := make([]string, len(chain)) - for i := range chain { - ids[i] = string(ociidentity.ChainID(chain[:i+1])) - } - - return ids, nil -} - -// Labels returns default labels -// utility function. could be moved to the constructor logic? -func Labels(executor, snapshotter string) map[string]string { - hostname, err := os.Hostname() - if err != nil { - hostname = "unknown" - } - labels := map[string]string{ - worker.LabelExecutor: executor, - worker.LabelSnapshotter: snapshotter, - worker.LabelHostname: hostname, - } - return labels -} - -// ID reads the worker id from the `workerid` file. -// If not exist, it creates a random one, -func ID(root string) (string, error) { - f := filepath.Join(root, "workerid") - b, err := ioutil.ReadFile(f) - if err != nil { - if os.IsNotExist(err) { - id := identity.NewID() - err := ioutil.WriteFile(f, []byte(id), 0400) - return id, err - } else { - return "", err - } - } - return string(b), nil -} - -func getLayers(ctx context.Context, descs []ocispec.Descriptor) ([]rootfs.Layer, error) { - layers := make([]rootfs.Layer, len(descs)) - for i, desc := range descs { - diffIDStr := desc.Annotations["containerd.io/uncompressed"] - if diffIDStr == "" { - return nil, errors.Errorf("%s missing uncompressed digest", desc.Digest) - } - diffID, err := digest.Parse(diffIDStr) - if err != nil { - return nil, err - } - layers[i].Diff = ocispec.Descriptor{ - MediaType: ocispec.MediaTypeImageLayer, - Digest: diffID, - } - layers[i].Blob = ocispec.Descriptor{ - MediaType: desc.MediaType, - Digest: desc.Digest, - Size: desc.Size, - } - } - return layers, nil -} - -func oneOffProgress(ctx context.Context, id string) func(err error) error { - pw, _, _ := progress.FromContext(ctx) - now := time.Now() - st := progress.Status{ - Started: &now, - } - pw.Write(id, st) - return func(err error) error { - // TODO: set error on status - now := time.Now() - st.Completed = &now - pw.Write(id, st) - pw.Close() - return err - } -} diff --git a/vendor/github.com/moby/buildkit/worker/base/worker_test.go b/vendor/github.com/moby/buildkit/worker/base/worker_test.go deleted file mode 100644 index a80372baaaf0..000000000000 --- a/vendor/github.com/moby/buildkit/worker/base/worker_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package base - -import ( - "io/ioutil" - "os" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestID(t *testing.T) { - t.Parallel() - tmpdir, err := ioutil.TempDir("", "worker-base-test-id") - require.NoError(t, err) - - id0, err := ID(tmpdir) - require.NoError(t, err) - - id1, err := ID(tmpdir) - require.NoError(t, err) - - require.Equal(t, id0, id1) - - // reset tmpdir - require.NoError(t, os.RemoveAll(tmpdir)) - require.NoError(t, os.MkdirAll(tmpdir, 0700)) - - id2, err := ID(tmpdir) - require.NoError(t, err) - - require.NotEqual(t, id0, id2) - - require.NoError(t, os.RemoveAll(tmpdir)) -} diff --git a/vendor/github.com/moby/buildkit/worker/cacheresult.go b/vendor/github.com/moby/buildkit/worker/cacheresult.go deleted file mode 100644 index fb11525d7724..000000000000 --- a/vendor/github.com/moby/buildkit/worker/cacheresult.go +++ /dev/null @@ -1,100 +0,0 @@ -package worker - -import ( - "context" - "strings" - "time" - - "github.com/moby/buildkit/cache" - "github.com/moby/buildkit/solver" - "github.com/pkg/errors" -) - -func NewCacheResultStorage(wc *Controller) solver.CacheResultStorage { - return &cacheResultStorage{ - wc: wc, - } -} - -type cacheResultStorage struct { - wc *Controller -} - -func (s *cacheResultStorage) Save(res solver.Result) (solver.CacheResult, error) { - ref, ok := res.Sys().(*WorkerRef) - if !ok { - return solver.CacheResult{}, errors.Errorf("invalid result: %T", res.Sys()) - } - if ref.ImmutableRef != nil { - if !cache.HasCachePolicyRetain(ref.ImmutableRef) { - if err := cache.CachePolicyRetain(ref.ImmutableRef); err != nil { - return solver.CacheResult{}, err - } - ref.ImmutableRef.Metadata().Commit() - } - } - return solver.CacheResult{ID: ref.ID(), CreatedAt: time.Now()}, nil -} -func (s *cacheResultStorage) Load(ctx context.Context, res solver.CacheResult) (solver.Result, error) { - return s.load(res.ID, false) -} - -func (s *cacheResultStorage) getWorkerRef(id string) (Worker, string, error) { - workerID, refID, err := parseWorkerRef(id) - if err != nil { - return nil, "", err - } - w, err := s.wc.Get(workerID) - if err != nil { - return nil, "", err - } - return w, refID, nil -} - -func (s *cacheResultStorage) load(id string, hidden bool) (solver.Result, error) { - w, refID, err := s.getWorkerRef(id) - if err != nil { - return nil, err - } - if refID == "" { - return NewWorkerRefResult(nil, w), nil - } - ref, err := w.LoadRef(refID, hidden) - if err != nil { - return nil, err - } - return NewWorkerRefResult(ref, w), nil -} - -func (s *cacheResultStorage) LoadRemote(ctx context.Context, res solver.CacheResult) (*solver.Remote, error) { - w, refID, err := s.getWorkerRef(res.ID) - if err != nil { - return nil, err - } - ref, err := w.LoadRef(refID, true) - if err != nil { - return nil, err - } - defer ref.Release(context.TODO()) - remote, err := w.GetRemote(ctx, ref, false) - if err != nil { - return nil, nil // ignore error. loadRemote is best effort - } - return remote, nil -} -func (s *cacheResultStorage) Exists(id string) bool { - ref, err := s.load(id, true) - if err != nil { - return false - } - ref.Release(context.TODO()) - return true -} - -func parseWorkerRef(id string) (string, string, error) { - parts := strings.Split(id, "::") - if len(parts) != 2 { - return "", "", errors.Errorf("invalid workerref id: %s", id) - } - return parts[0], parts[1], nil -} diff --git a/vendor/github.com/moby/buildkit/worker/containerd/containerd.go b/vendor/github.com/moby/buildkit/worker/containerd/containerd.go deleted file mode 100644 index 3e1f2cc62cd2..000000000000 --- a/vendor/github.com/moby/buildkit/worker/containerd/containerd.go +++ /dev/null @@ -1,119 +0,0 @@ -package containerd - -import ( - "context" - "os" - "path/filepath" - "strings" - "time" - - "github.com/containerd/containerd" - introspection "github.com/containerd/containerd/api/services/introspection/v1" - "github.com/containerd/containerd/namespaces" - "github.com/containerd/containerd/snapshots" - "github.com/moby/buildkit/cache/metadata" - "github.com/moby/buildkit/executor/containerdexecutor" - "github.com/moby/buildkit/identity" - containerdsnapshot "github.com/moby/buildkit/snapshot/containerd" - "github.com/moby/buildkit/util/network" - "github.com/moby/buildkit/util/throttle" - "github.com/moby/buildkit/util/winlayers" - "github.com/moby/buildkit/worker/base" - specs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// NewWorkerOpt creates a WorkerOpt. -// But it does not set the following fields: -// - SessionManager -func NewWorkerOpt(root string, address, snapshotterName, ns string, labels map[string]string, opts ...containerd.ClientOpt) (base.WorkerOpt, error) { - opts = append(opts, containerd.WithDefaultNamespace(ns)) - client, err := containerd.New(address, opts...) - if err != nil { - return base.WorkerOpt{}, errors.Wrapf(err, "failed to connect client to %q . make sure containerd is running", address) - } - return newContainerd(root, client, snapshotterName, ns, labels) -} - -func newContainerd(root string, client *containerd.Client, snapshotterName, ns string, labels map[string]string) (base.WorkerOpt, error) { - if strings.Contains(snapshotterName, "/") { - return base.WorkerOpt{}, errors.Errorf("bad snapshotter name: %q", snapshotterName) - } - name := "containerd-" + snapshotterName - root = filepath.Join(root, name) - if err := os.MkdirAll(root, 0700); err != nil { - return base.WorkerOpt{}, errors.Wrapf(err, "failed to create %s", root) - } - - md, err := metadata.NewStore(filepath.Join(root, "metadata.db")) - if err != nil { - return base.WorkerOpt{}, err - } - df := client.DiffService() - // TODO: should use containerd daemon instance ID (containerd/containerd#1862)? - id, err := base.ID(root) - if err != nil { - return base.WorkerOpt{}, err - } - xlabels := base.Labels("containerd", snapshotterName) - for k, v := range labels { - xlabels[k] = v - } - - throttledGC := throttle.Throttle(time.Second, func() { - // TODO: how to avoid this? - ctx := context.TODO() - snapshotter := client.SnapshotService(snapshotterName) - ctx = namespaces.WithNamespace(ctx, ns) - key := identity.NewID() - if _, err := snapshotter.Prepare(ctx, key, "", snapshots.WithLabels(map[string]string{ - "containerd.io/gc.root": time.Now().UTC().Format(time.RFC3339Nano), - })); err != nil { - logrus.Errorf("GC error: %+v", err) - } - if err := snapshotter.Remove(ctx, key); err != nil { - logrus.Errorf("GC error: %+v", err) - } - }) - - gc := func(ctx context.Context) error { - throttledGC() - return nil - } - - cs := containerdsnapshot.NewContentStore(client.ContentStore(), ns, gc) - - resp, err := client.IntrospectionService().Plugins(context.TODO(), &introspection.PluginsRequest{Filters: []string{"type==io.containerd.runtime.v1"}}) - if err != nil { - return base.WorkerOpt{}, errors.Wrap(err, "failed to list runtime plugin") - } - if len(resp.Plugins) == 0 { - return base.WorkerOpt{}, errors.Wrap(err, "failed to get runtime plugin") - } - - var platforms []specs.Platform - for _, plugin := range resp.Plugins { - for _, p := range plugin.Platforms { - platforms = append(platforms, specs.Platform{ - OS: p.OS, - Architecture: p.Architecture, - Variant: p.Variant, - }) - } - } - - opt := base.WorkerOpt{ - ID: id, - Labels: xlabels, - MetadataStore: md, - Executor: containerdexecutor.New(client, root, "", network.Default()), - Snapshotter: containerdsnapshot.NewSnapshotter(client.SnapshotService(snapshotterName), cs, md, ns, gc), - ContentStore: cs, - Applier: winlayers.NewFileSystemApplierWithWindows(cs, df), - Differ: winlayers.NewWalkingDiffWithWindows(cs, df), - ImageStore: client.ImageService(), - Platforms: platforms, - } - return opt, nil -} diff --git a/vendor/github.com/moby/buildkit/worker/filter.go b/vendor/github.com/moby/buildkit/worker/filter.go deleted file mode 100644 index c94a6265f381..000000000000 --- a/vendor/github.com/moby/buildkit/worker/filter.go +++ /dev/null @@ -1,33 +0,0 @@ -package worker - -import ( - "strings" - - "github.com/containerd/containerd/filters" -) - -func adaptWorker(w Worker) filters.Adaptor { - return filters.AdapterFunc(func(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } - - switch fieldpath[0] { - case "id": - return w.ID(), len(w.ID()) > 0 - case "labels": - return checkMap(fieldpath[1:], w.Labels()) - } - - return "", false - }) -} - -func checkMap(fieldpath []string, m map[string]string) (string, bool) { - if len(m) == 0 { - return "", false - } - - value, ok := m[strings.Join(fieldpath, ".")] - return value, ok -} diff --git a/vendor/github.com/moby/buildkit/worker/result.go b/vendor/github.com/moby/buildkit/worker/result.go deleted file mode 100644 index 9aa6af416748..000000000000 --- a/vendor/github.com/moby/buildkit/worker/result.go +++ /dev/null @@ -1,40 +0,0 @@ -package worker - -import ( - "context" - - "github.com/moby/buildkit/cache" - "github.com/moby/buildkit/solver" -) - -func NewWorkerRefResult(ref cache.ImmutableRef, worker Worker) solver.Result { - return &workerRefResult{&WorkerRef{ImmutableRef: ref, Worker: worker}} -} - -type WorkerRef struct { - ImmutableRef cache.ImmutableRef - Worker Worker -} - -func (wr *WorkerRef) ID() string { - refID := "" - if wr.ImmutableRef != nil { - refID = wr.ImmutableRef.ID() - } - return wr.Worker.ID() + "::" + refID -} - -type workerRefResult struct { - *WorkerRef -} - -func (r *workerRefResult) Release(ctx context.Context) error { - if r.ImmutableRef == nil { - return nil - } - return r.ImmutableRef.Release(ctx) -} - -func (r *workerRefResult) Sys() interface{} { - return r.WorkerRef -} diff --git a/vendor/github.com/moby/buildkit/worker/runc/runc.go b/vendor/github.com/moby/buildkit/worker/runc/runc.go deleted file mode 100644 index 404c44df2830..000000000000 --- a/vendor/github.com/moby/buildkit/worker/runc/runc.go +++ /dev/null @@ -1,112 +0,0 @@ -package runc - -import ( - "context" - "os" - "path/filepath" - "time" - - "github.com/containerd/containerd/content/local" - "github.com/containerd/containerd/diff/apply" - "github.com/containerd/containerd/diff/walking" - ctdmetadata "github.com/containerd/containerd/metadata" - "github.com/containerd/containerd/platforms" - ctdsnapshot "github.com/containerd/containerd/snapshots" - "github.com/moby/buildkit/cache/metadata" - "github.com/moby/buildkit/executor/runcexecutor" - containerdsnapshot "github.com/moby/buildkit/snapshot/containerd" - "github.com/moby/buildkit/util/network" - "github.com/moby/buildkit/util/throttle" - "github.com/moby/buildkit/util/winlayers" - "github.com/moby/buildkit/worker/base" - specs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/sirupsen/logrus" - bolt "go.etcd.io/bbolt" -) - -// SnapshotterFactory instantiates a snapshotter -type SnapshotterFactory struct { - Name string - New func(root string) (ctdsnapshot.Snapshotter, error) -} - -// NewWorkerOpt creates a WorkerOpt. -// But it does not set the following fields: -// - SessionManager -func NewWorkerOpt(root string, snFactory SnapshotterFactory, rootless bool, labels map[string]string) (base.WorkerOpt, error) { - var opt base.WorkerOpt - name := "runc-" + snFactory.Name - root = filepath.Join(root, name) - if err := os.MkdirAll(root, 0700); err != nil { - return opt, err - } - md, err := metadata.NewStore(filepath.Join(root, "metadata.db")) - if err != nil { - return opt, err - } - exe, err := runcexecutor.New(runcexecutor.Opt{ - // Root directory - Root: filepath.Join(root, "executor"), - // without root privileges - Rootless: rootless, - }, network.Default()) - if err != nil { - return opt, err - } - s, err := snFactory.New(filepath.Join(root, "snapshots")) - if err != nil { - return opt, err - } - - c, err := local.NewStore(filepath.Join(root, "content")) - if err != nil { - return opt, err - } - - db, err := bolt.Open(filepath.Join(root, "containerdmeta.db"), 0644, nil) - if err != nil { - return opt, err - } - - mdb := ctdmetadata.NewDB(db, c, map[string]ctdsnapshot.Snapshotter{ - snFactory.Name: s, - }) - if err := mdb.Init(context.TODO()); err != nil { - return opt, err - } - - throttledGC := throttle.Throttle(time.Second, func() { - if _, err := mdb.GarbageCollect(context.TODO()); err != nil { - logrus.Errorf("GC error: %+v", err) - } - }) - - gc := func(ctx context.Context) error { - throttledGC() - return nil - } - - c = containerdsnapshot.NewContentStore(mdb.ContentStore(), "buildkit", gc) - - id, err := base.ID(root) - if err != nil { - return opt, err - } - xlabels := base.Labels("oci", snFactory.Name) - for k, v := range labels { - xlabels[k] = v - } - opt = base.WorkerOpt{ - ID: id, - Labels: xlabels, - MetadataStore: md, - Executor: exe, - Snapshotter: containerdsnapshot.NewSnapshotter(mdb.Snapshotter(snFactory.Name), c, md, "buildkit", gc), - ContentStore: c, - Applier: winlayers.NewFileSystemApplierWithWindows(c, apply.NewFileSystemApplier(c)), - Differ: winlayers.NewWalkingDiffWithWindows(c, walking.NewWalkingDiff(c)), - ImageStore: nil, // explicitly - Platforms: []specs.Platform{platforms.Normalize(platforms.DefaultSpec())}, - } - return opt, nil -} diff --git a/vendor/github.com/moby/buildkit/worker/runc/runc_test.go b/vendor/github.com/moby/buildkit/worker/runc/runc_test.go deleted file mode 100644 index fd6e643b93d7..000000000000 --- a/vendor/github.com/moby/buildkit/worker/runc/runc_test.go +++ /dev/null @@ -1,167 +0,0 @@ -// +build linux,!no_runc_worker - -package runc - -import ( - "bytes" - "context" - "io" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "testing" - - "github.com/containerd/containerd/namespaces" - ctdsnapshot "github.com/containerd/containerd/snapshots" - "github.com/containerd/containerd/snapshots/overlay" - "github.com/moby/buildkit/client" - "github.com/moby/buildkit/executor" - "github.com/moby/buildkit/session" - "github.com/moby/buildkit/snapshot" - "github.com/moby/buildkit/source" - "github.com/moby/buildkit/worker/base" - "github.com/stretchr/testify/require" -) - -func TestRuncWorker(t *testing.T) { - t.Parallel() - if os.Getuid() != 0 { - t.Skip("requires root") - } - - if _, err := exec.LookPath("runc"); err != nil { - if _, err := exec.LookPath("buildkit-runc"); err != nil { - t.Skipf("no runc found: %s", err.Error()) - } - } - - ctx := namespaces.WithNamespace(context.Background(), "buildkit-test") - - // this should be an example or e2e test - tmpdir, err := ioutil.TempDir("", "workertest") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) - - snFactory := SnapshotterFactory{ - Name: "overlayfs", - New: func(root string) (ctdsnapshot.Snapshotter, error) { - return overlay.NewSnapshotter(root) - }, - } - rootless := false - workerOpt, err := NewWorkerOpt(tmpdir, snFactory, rootless, nil) - require.NoError(t, err) - - workerOpt.SessionManager, err = session.NewManager() - require.NoError(t, err) - - w, err := base.NewWorker(workerOpt) - require.NoError(t, err) - - img, err := source.NewImageIdentifier("docker.io/library/busybox:latest") - require.NoError(t, err) - - src, err := w.SourceManager.Resolve(ctx, img) - require.NoError(t, err) - - snap, err := src.Snapshot(ctx) - require.NoError(t, err) - - mounts, err := snap.Mount(ctx, false) - require.NoError(t, err) - - lm := snapshot.LocalMounter(mounts) - - target, err := lm.Mount() - require.NoError(t, err) - - f, err := os.Open(target) - require.NoError(t, err) - - names, err := f.Readdirnames(-1) - require.NoError(t, err) - require.True(t, len(names) > 5) - - err = f.Close() - require.NoError(t, err) - - lm.Unmount() - require.NoError(t, err) - - du, err := w.CacheManager.DiskUsage(ctx, client.DiskUsageInfo{}) - require.NoError(t, err) - - // for _, d := range du { - // fmt.Printf("du: %+v\n", d) - // } - - for _, d := range du { - require.True(t, d.Size >= 8192) - } - - meta := executor.Meta{ - Args: []string{"/bin/sh", "-c", "mkdir /run && echo \"foo\" > /run/bar"}, - Cwd: "/", - } - - stderr := bytes.NewBuffer(nil) - err = w.Executor.Exec(ctx, meta, snap, nil, nil, nil, &nopCloser{stderr}) - require.Error(t, err) // Read-only root - // typical error is like `mkdir /.../rootfs/proc: read-only file system`. - // make sure the error is caused before running `echo foo > /bar`. - require.Contains(t, stderr.String(), "read-only file system") - - root, err := w.CacheManager.New(ctx, snap) - require.NoError(t, err) - - err = w.Executor.Exec(ctx, meta, root, nil, nil, nil, &nopCloser{stderr}) - require.NoError(t, err) - - meta = executor.Meta{ - Args: []string{"/bin/ls", "/etc/resolv.conf"}, - Cwd: "/", - } - - err = w.Executor.Exec(ctx, meta, root, nil, nil, nil, &nopCloser{stderr}) - require.NoError(t, err) - - rf, err := root.Commit(ctx) - require.NoError(t, err) - - mounts, err = rf.Mount(ctx, false) - require.NoError(t, err) - - lm = snapshot.LocalMounter(mounts) - - target, err = lm.Mount() - require.NoError(t, err) - - //Verifies fix for issue https://github.com/moby/buildkit/issues/429 - dt, err := ioutil.ReadFile(filepath.Join(target, "run", "bar")) - - require.NoError(t, err) - require.Equal(t, string(dt), "foo\n") - - lm.Unmount() - require.NoError(t, err) - - err = rf.Release(ctx) - require.NoError(t, err) - - err = snap.Release(ctx) - require.NoError(t, err) - - du2, err := w.CacheManager.DiskUsage(ctx, client.DiskUsageInfo{}) - require.NoError(t, err) - require.Equal(t, 1, len(du2)-len(du)) - -} - -type nopCloser struct { - io.Writer -} - -func (n *nopCloser) Close() error { - return nil -} diff --git a/vendor/github.com/moby/buildkit/worker/worker.go b/vendor/github.com/moby/buildkit/worker/worker.go deleted file mode 100644 index 47a5d4bf74eb..000000000000 --- a/vendor/github.com/moby/buildkit/worker/worker.go +++ /dev/null @@ -1,43 +0,0 @@ -package worker - -import ( - "context" - "io" - - "github.com/moby/buildkit/cache" - "github.com/moby/buildkit/client" - "github.com/moby/buildkit/executor" - "github.com/moby/buildkit/exporter" - "github.com/moby/buildkit/frontend" - gw "github.com/moby/buildkit/frontend/gateway/client" - "github.com/moby/buildkit/solver" - digest "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - -type Worker interface { - // ID needs to be unique in the cluster - ID() string - Labels() map[string]string - Platforms() []specs.Platform - GCPolicy() []client.PruneInfo - LoadRef(id string, hidden bool) (cache.ImmutableRef, error) - // ResolveOp resolves Vertex.Sys() to Op implementation. - ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge) (solver.Op, error) - ResolveImageConfig(ctx context.Context, ref string, opt gw.ResolveImageConfigOpt) (digest.Digest, []byte, error) - // Exec is similar to executor.Exec but without []mount.Mount - Exec(ctx context.Context, meta executor.Meta, rootFS cache.ImmutableRef, stdin io.ReadCloser, stdout, stderr io.WriteCloser) error - DiskUsage(ctx context.Context, opt client.DiskUsageInfo) ([]*client.UsageInfo, error) - Exporter(name string) (exporter.Exporter, error) - Prune(ctx context.Context, ch chan client.UsageInfo, opt ...client.PruneInfo) error - GetRemote(ctx context.Context, ref cache.ImmutableRef, createIfNeeded bool) (*solver.Remote, error) - FromRemote(ctx context.Context, remote *solver.Remote) (cache.ImmutableRef, error) -} - -// Pre-defined label keys -const ( - labelPrefix = "org.mobyproject.buildkit.worker." - LabelExecutor = labelPrefix + "executor" // "oci" or "containerd" - LabelSnapshotter = labelPrefix + "snapshotter" // containerd snapshotter name ("overlay", "native", ...) - LabelHostname = labelPrefix + "hostname" -) diff --git a/vendor/github.com/moby/buildkit/worker/workercontroller.go b/vendor/github.com/moby/buildkit/worker/workercontroller.go deleted file mode 100644 index b07db8f599f4..000000000000 --- a/vendor/github.com/moby/buildkit/worker/workercontroller.go +++ /dev/null @@ -1,77 +0,0 @@ -package worker - -import ( - "sync" - - "github.com/containerd/containerd/filters" - "github.com/moby/buildkit/client" - "github.com/pkg/errors" -) - -// Controller holds worker instances. -// Currently, only local workers are supported. -type Controller struct { - // TODO: define worker interface and support remote ones - workers sync.Map - defaultID string -} - -// Add adds a local worker -func (c *Controller) Add(w Worker) error { - c.workers.Store(w.ID(), w) - if c.defaultID == "" { - c.defaultID = w.ID() - } - return nil -} - -// List lists workers -func (c *Controller) List(filterStrings ...string) ([]Worker, error) { - filter, err := filters.ParseAll(filterStrings...) - if err != nil { - return nil, err - } - var workers []Worker - c.workers.Range(func(k, v interface{}) bool { - w := v.(Worker) - if filter.Match(adaptWorker(w)) { - workers = append(workers, w) - } - return true - }) - return workers, nil -} - -// GetDefault returns the default local worker -func (c *Controller) GetDefault() (Worker, error) { - if c.defaultID == "" { - return nil, errors.Errorf("no default worker") - } - return c.Get(c.defaultID) -} - -func (c *Controller) Get(id string) (Worker, error) { - v, ok := c.workers.Load(id) - if !ok { - return nil, errors.Errorf("worker %s not found", id) - } - return v.(Worker), nil -} - -// TODO: add Get(Constraint) (*Worker, error) - -func (c *Controller) WorkerInfos() []client.WorkerInfo { - workers, err := c.List() - if err != nil { - return nil - } - out := make([]client.WorkerInfo, 0, len(workers)) - for _, w := range workers { - out = append(out, client.WorkerInfo{ - ID: w.ID(), - Labels: w.Labels(), - Platforms: w.Platforms(), - }) - } - return out -} diff --git a/vendor/github.com/mtrmac/gpgme/.gitignore b/vendor/github.com/mtrmac/gpgme/.gitignore deleted file mode 100644 index 0210b26e0387..000000000000 --- a/vendor/github.com/mtrmac/gpgme/.gitignore +++ /dev/null @@ -1 +0,0 @@ -testdata/gpghome/random_seed diff --git a/vendor/github.com/mtrmac/gpgme/LICENSE b/vendor/github.com/mtrmac/gpgme/LICENSE deleted file mode 100644 index 06d4ab77316f..000000000000 --- a/vendor/github.com/mtrmac/gpgme/LICENSE +++ /dev/null @@ -1,12 +0,0 @@ -Copyright (c) 2015, James Fargher -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - -3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/mtrmac/gpgme/README.md b/vendor/github.com/mtrmac/gpgme/README.md deleted file mode 100644 index 4770b82a8e61..000000000000 --- a/vendor/github.com/mtrmac/gpgme/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# GPGME (golang) - -Go wrapper for the GPGME library. - -This library is intended for use with desktop applications. If you are looking to add OpenPGP support to a server application I suggest you first look at [golang.org/x/crypto/openpgp](https://godoc.org/golang.org/x/crypto/openpgp). - -## Installation - - go get -u github.com/proglottis/gpgme - -## Documentation - -* [godoc](https://godoc.org/github.com/proglottis/gpgme) diff --git a/vendor/github.com/mtrmac/gpgme/callbacks.go b/vendor/github.com/mtrmac/gpgme/callbacks.go deleted file mode 100644 index d1dc610d42a8..000000000000 --- a/vendor/github.com/mtrmac/gpgme/callbacks.go +++ /dev/null @@ -1,42 +0,0 @@ -package gpgme - -import ( - "sync" -) - -var callbacks struct { - sync.Mutex - m map[uintptr]interface{} - c uintptr -} - -func callbackAdd(v interface{}) uintptr { - callbacks.Lock() - defer callbacks.Unlock() - if callbacks.m == nil { - callbacks.m = make(map[uintptr]interface{}) - } - callbacks.c++ - ret := callbacks.c - callbacks.m[ret] = v - return ret -} - -func callbackLookup(c uintptr) interface{} { - callbacks.Lock() - defer callbacks.Unlock() - ret := callbacks.m[c] - if ret == nil { - panic("callback pointer not found") - } - return ret -} - -func callbackDelete(c uintptr) { - callbacks.Lock() - defer callbacks.Unlock() - if callbacks.m[c] == nil { - panic("callback pointer not found") - } - delete(callbacks.m, c) -} diff --git a/vendor/github.com/mtrmac/gpgme/data.go b/vendor/github.com/mtrmac/gpgme/data.go deleted file mode 100644 index eebc9726347d..000000000000 --- a/vendor/github.com/mtrmac/gpgme/data.go +++ /dev/null @@ -1,191 +0,0 @@ -package gpgme - -// #include -// #include -// #include -// #include "go_gpgme.h" -import "C" - -import ( - "io" - "os" - "runtime" - "unsafe" -) - -const ( - SeekSet = C.SEEK_SET - SeekCur = C.SEEK_CUR - SeekEnd = C.SEEK_END -) - -//export gogpgme_readfunc -func gogpgme_readfunc(handle, buffer unsafe.Pointer, size C.size_t) C.ssize_t { - d := callbackLookup(uintptr(handle)).(*Data) - if len(d.buf) < int(size) { - d.buf = make([]byte, size) - } - n, err := d.r.Read(d.buf[:size]) - if err != nil && err != io.EOF { - C.gpgme_err_set_errno(C.EIO) - return -1 - } - C.memcpy(buffer, unsafe.Pointer(&d.buf[0]), C.size_t(n)) - return C.ssize_t(n) -} - -//export gogpgme_writefunc -func gogpgme_writefunc(handle, buffer unsafe.Pointer, size C.size_t) C.ssize_t { - d := callbackLookup(uintptr(handle)).(*Data) - if len(d.buf) < int(size) { - d.buf = make([]byte, size) - } - C.memcpy(unsafe.Pointer(&d.buf[0]), buffer, C.size_t(size)) - n, err := d.w.Write(d.buf[:size]) - if err != nil && err != io.EOF { - C.gpgme_err_set_errno(C.EIO) - return -1 - } - return C.ssize_t(n) -} - -//export gogpgme_seekfunc -func gogpgme_seekfunc(handle unsafe.Pointer, offset C.off_t, whence C.int) C.off_t { - d := callbackLookup(uintptr(handle)).(*Data) - n, err := d.s.Seek(int64(offset), int(whence)) - if err != nil { - C.gpgme_err_set_errno(C.EIO) - return -1 - } - return C.off_t(n) -} - -// The Data buffer used to communicate with GPGME -type Data struct { - dh C.gpgme_data_t - buf []byte - cbs C.struct_gpgme_data_cbs - r io.Reader - w io.Writer - s io.Seeker - cbc uintptr -} - -func newData() *Data { - d := &Data{} - runtime.SetFinalizer(d, (*Data).Close) - return d -} - -// NewData returns a new memory based data buffer -func NewData() (*Data, error) { - d := newData() - return d, handleError(C.gpgme_data_new(&d.dh)) -} - -// NewDataFile returns a new file based data buffer -func NewDataFile(f *os.File) (*Data, error) { - d := newData() - return d, handleError(C.gpgme_data_new_from_fd(&d.dh, C.int(f.Fd()))) -} - -// NewDataBytes returns a new memory based data buffer that contains `b` bytes -func NewDataBytes(b []byte) (*Data, error) { - d := newData() - var cb *C.char - if len(b) != 0 { - cb = (*C.char)(unsafe.Pointer(&b[0])) - } - return d, handleError(C.gpgme_data_new_from_mem(&d.dh, cb, C.size_t(len(b)), 1)) -} - -// NewDataReader returns a new callback based data buffer -func NewDataReader(r io.Reader) (*Data, error) { - d := newData() - d.r = r - d.cbs.read = C.gpgme_data_read_cb_t(C.gogpgme_readfunc) - cbc := callbackAdd(d) - d.cbc = cbc - return d, handleError(C.gogpgme_data_new_from_cbs(&d.dh, &d.cbs, C.uintptr_t(cbc))) -} - -// NewDataWriter returns a new callback based data buffer -func NewDataWriter(w io.Writer) (*Data, error) { - d := newData() - d.w = w - d.cbs.write = C.gpgme_data_write_cb_t(C.gogpgme_writefunc) - cbc := callbackAdd(d) - d.cbc = cbc - return d, handleError(C.gogpgme_data_new_from_cbs(&d.dh, &d.cbs, C.uintptr_t(cbc))) -} - -// NewDataReadWriter returns a new callback based data buffer -func NewDataReadWriter(rw io.ReadWriter) (*Data, error) { - d := newData() - d.r = rw - d.w = rw - d.cbs.read = C.gpgme_data_read_cb_t(C.gogpgme_readfunc) - d.cbs.write = C.gpgme_data_write_cb_t(C.gogpgme_writefunc) - cbc := callbackAdd(d) - d.cbc = cbc - return d, handleError(C.gogpgme_data_new_from_cbs(&d.dh, &d.cbs, C.uintptr_t(cbc))) -} - -// NewDataReadWriteSeeker returns a new callback based data buffer -func NewDataReadWriteSeeker(rw io.ReadWriteSeeker) (*Data, error) { - d := newData() - d.r = rw - d.w = rw - d.s = rw - d.cbs.read = C.gpgme_data_read_cb_t(C.gogpgme_readfunc) - d.cbs.write = C.gpgme_data_write_cb_t(C.gogpgme_writefunc) - d.cbs.seek = C.gpgme_data_seek_cb_t(C.gogpgme_seekfunc) - cbc := callbackAdd(d) - d.cbc = cbc - return d, handleError(C.gogpgme_data_new_from_cbs(&d.dh, &d.cbs, C.uintptr_t(cbc))) -} - -// Close releases any resources associated with the data buffer -func (d *Data) Close() error { - if d.dh == nil { - return nil - } - if d.cbc > 0 { - callbackDelete(d.cbc) - } - _, err := C.gpgme_data_release(d.dh) - d.dh = nil - return err -} - -func (d *Data) Write(p []byte) (int, error) { - n, err := C.gpgme_data_write(d.dh, unsafe.Pointer(&p[0]), C.size_t(len(p))) - if err != nil { - return 0, err - } - if n == 0 { - return 0, io.EOF - } - return int(n), nil -} - -func (d *Data) Read(p []byte) (int, error) { - n, err := C.gpgme_data_read(d.dh, unsafe.Pointer(&p[0]), C.size_t(len(p))) - if err != nil { - return 0, err - } - if n == 0 { - return 0, io.EOF - } - return int(n), nil -} - -func (d *Data) Seek(offset int64, whence int) (int64, error) { - n, err := C.gpgme_data_seek(d.dh, C.off_t(offset), C.int(whence)) - return int64(n), err -} - -// Name returns the associated filename if any -func (d *Data) Name() string { - return C.GoString(C.gpgme_data_get_file_name(d.dh)) -} diff --git a/vendor/github.com/mtrmac/gpgme/data_test.go b/vendor/github.com/mtrmac/gpgme/data_test.go deleted file mode 100644 index 76f94eda8996..000000000000 --- a/vendor/github.com/mtrmac/gpgme/data_test.go +++ /dev/null @@ -1,92 +0,0 @@ -package gpgme - -import ( - "bytes" - "io" - "io/ioutil" - "os" - "testing" -) - -func TestNewData(t *testing.T) { - dh, err := NewData() - checkError(t, err) - for i := 0; i < 5; i++ { - _, err := dh.Write([]byte(testData)) - checkError(t, err) - } - _, err = dh.Seek(0, SeekSet) - checkError(t, err) - - var buf bytes.Buffer - _, err = io.Copy(&buf, dh) - checkError(t, err) - expected := bytes.Repeat([]byte(testData), 5) - diff(t, buf.Bytes(), expected) - - dh.Close() -} - -func TestNewDataBytes(t *testing.T) { - // Test ordinary data, and empty slices - for _, content := range [][]byte{[]byte("content"), []byte{}} { - dh, err := NewDataBytes(content) - checkError(t, err) - - _, err = dh.Seek(0, SeekSet) - checkError(t, err) - var buf bytes.Buffer - _, err = io.Copy(&buf, dh) - checkError(t, err) - diff(t, buf.Bytes(), content) - } -} - -func TestDataNewDataFile(t *testing.T) { - f, err := ioutil.TempFile("", "gpgme") - checkError(t, err) - defer func() { - f.Close() - os.Remove(f.Name()) - }() - dh, err := NewDataFile(f) - checkError(t, err) - defer dh.Close() - for i := 0; i < 5; i++ { - _, err := dh.Write([]byte(testData)) - checkError(t, err) - } - _, err = dh.Seek(0, SeekSet) - checkError(t, err) - var buf bytes.Buffer - _, err = io.Copy(&buf, dh) - checkError(t, err) - expected := bytes.Repeat([]byte(testData), 5) - diff(t, buf.Bytes(), expected) -} - -func TestDataNewDataReader(t *testing.T) { - r := bytes.NewReader([]byte(testData)) - dh, err := NewDataReader(r) - checkError(t, err) - var buf bytes.Buffer - _, err = io.Copy(&buf, dh) - checkError(t, err) - diff(t, buf.Bytes(), []byte(testData)) - - dh.Close() -} - -func TestDataNewDataWriter(t *testing.T) { - var buf bytes.Buffer - dh, err := NewDataWriter(&buf) - checkError(t, err) - for i := 0; i < 5; i++ { - _, err := dh.Write([]byte(testData)) - checkError(t, err) - } - expected := bytes.Repeat([]byte(testData), 5) - diff(t, buf.Bytes(), expected) - - dh.Close() -} diff --git a/vendor/github.com/mtrmac/gpgme/examples/decrypt.go b/vendor/github.com/mtrmac/gpgme/examples/decrypt.go deleted file mode 100644 index 2eba2bba23cf..000000000000 --- a/vendor/github.com/mtrmac/gpgme/examples/decrypt.go +++ /dev/null @@ -1,19 +0,0 @@ -package main - -import ( - "io" - "os" - - "github.com/proglottis/gpgme" -) - -func main() { - plain, err := gpgme.Decrypt(os.Stdin) - if err != nil { - panic(err) - } - defer plain.Close() - if _, err := io.Copy(os.Stdout, plain); err != nil { - panic(err) - } -} diff --git a/vendor/github.com/mtrmac/gpgme/examples/encrypt.go b/vendor/github.com/mtrmac/gpgme/examples/encrypt.go deleted file mode 100644 index 52f40b09f51b..000000000000 --- a/vendor/github.com/mtrmac/gpgme/examples/encrypt.go +++ /dev/null @@ -1,39 +0,0 @@ -package main - -import ( - "flag" - "os" - - "github.com/proglottis/gpgme" -) - -func main() { - flag.Parse() - filter := flag.Arg(0) - if filter == "" { - panic("must specify recipient filter") - } - recipients, err := gpgme.FindKeys(filter, false) - if err != nil { - panic(err) - } - if len(recipients) < 1 { - panic("no keys match") - } - plain, err := gpgme.NewDataReader(os.Stdin) - if err != nil { - panic(err) - } - cipher, err := gpgme.NewDataWriter(os.Stdout) - if err != nil { - panic(err) - } - ctx, err := gpgme.New() - if err != nil { - panic(err) - } - ctx.SetArmor(true) - if err := ctx.Encrypt(recipients, 0, plain, cipher); err != nil { - panic(err) - } -} diff --git a/vendor/github.com/mtrmac/gpgme/go_gpgme.c b/vendor/github.com/mtrmac/gpgme/go_gpgme.c deleted file mode 100644 index b887574e0cb9..000000000000 --- a/vendor/github.com/mtrmac/gpgme/go_gpgme.c +++ /dev/null @@ -1,89 +0,0 @@ -#include "go_gpgme.h" - -gpgme_error_t gogpgme_data_new_from_cbs(gpgme_data_t *dh, gpgme_data_cbs_t cbs, uintptr_t handle) { - return gpgme_data_new_from_cbs(dh, cbs, (void *)handle); -} - -void gogpgme_set_passphrase_cb(gpgme_ctx_t ctx, gpgme_passphrase_cb_t cb, uintptr_t handle) { - gpgme_set_passphrase_cb(ctx, cb, (void *)handle); -} - -unsigned int key_revoked(gpgme_key_t k) { - return k->revoked; -} - -unsigned int key_expired(gpgme_key_t k) { - return k->expired; -} - -unsigned int key_disabled(gpgme_key_t k) { - return k->disabled; -} - -unsigned int key_invalid(gpgme_key_t k) { - return k->invalid; -} - -unsigned int key_can_encrypt(gpgme_key_t k) { - return k->can_encrypt; -} - -unsigned int key_can_sign(gpgme_key_t k) { - return k->can_sign; -} - -unsigned int key_can_certify(gpgme_key_t k) { - return k->can_certify; -} - -unsigned int key_secret(gpgme_key_t k) { - return k->secret; -} - -unsigned int key_can_authenticate(gpgme_key_t k) { - return k->can_authenticate; -} - -unsigned int key_is_qualified(gpgme_key_t k) { - return k->is_qualified; -} - -unsigned int signature_wrong_key_usage(gpgme_signature_t s) { - return s->wrong_key_usage; -} - -unsigned int signature_pka_trust(gpgme_signature_t s) { - return s->pka_trust; -} - -unsigned int signature_chain_model(gpgme_signature_t s) { - return s->chain_model; -} - -unsigned int subkey_revoked(gpgme_subkey_t k) { - return k->revoked; -} - -unsigned int subkey_expired(gpgme_subkey_t k) { - return k->expired; -} - -unsigned int subkey_disabled(gpgme_subkey_t k) { - return k->disabled; -} - -unsigned int subkey_invalid(gpgme_subkey_t k) { - return k->invalid; -} - -unsigned int subkey_secret(gpgme_subkey_t k) { - return k->secret; -} - -unsigned int uid_revoked(gpgme_user_id_t u) { - return u->revoked; -} - -unsigned int uid_invalid(gpgme_user_id_t u) { - return u->invalid; -} diff --git a/vendor/github.com/mtrmac/gpgme/go_gpgme.h b/vendor/github.com/mtrmac/gpgme/go_gpgme.h deleted file mode 100644 index a3678b127ac7..000000000000 --- a/vendor/github.com/mtrmac/gpgme/go_gpgme.h +++ /dev/null @@ -1,37 +0,0 @@ -#ifndef GO_GPGME_H -#define GO_GPGME_H - -#define _FILE_OFFSET_BITS 64 -#include - -#include - -extern ssize_t gogpgme_readfunc(void *handle, void *buffer, size_t size); -extern ssize_t gogpgme_writefunc(void *handle, void *buffer, size_t size); -extern off_t gogpgme_seekfunc(void *handle, off_t offset, int whence); -extern gpgme_error_t gogpgme_passfunc(void *hook, char *uid_hint, char *passphrase_info, int prev_was_bad, int fd); -extern gpgme_error_t gogpgme_data_new_from_cbs(gpgme_data_t *dh, gpgme_data_cbs_t cbs, uintptr_t handle); -extern void gogpgme_set_passphrase_cb(gpgme_ctx_t ctx, gpgme_passphrase_cb_t cb, uintptr_t handle); - -extern unsigned int key_revoked(gpgme_key_t k); -extern unsigned int key_expired(gpgme_key_t k); -extern unsigned int key_disabled(gpgme_key_t k); -extern unsigned int key_invalid(gpgme_key_t k); -extern unsigned int key_can_encrypt(gpgme_key_t k); -extern unsigned int key_can_sign(gpgme_key_t k); -extern unsigned int key_can_certify(gpgme_key_t k); -extern unsigned int key_secret(gpgme_key_t k); -extern unsigned int key_can_authenticate(gpgme_key_t k); -extern unsigned int key_is_qualified(gpgme_key_t k); -extern unsigned int signature_wrong_key_usage(gpgme_signature_t s); -extern unsigned int signature_pka_trust(gpgme_signature_t s); -extern unsigned int signature_chain_model(gpgme_signature_t s); -extern unsigned int subkey_revoked(gpgme_subkey_t k); -extern unsigned int subkey_expired(gpgme_subkey_t k); -extern unsigned int subkey_disabled(gpgme_subkey_t k); -extern unsigned int subkey_invalid(gpgme_subkey_t k); -extern unsigned int subkey_secret(gpgme_subkey_t k); -extern unsigned int uid_revoked(gpgme_user_id_t u); -extern unsigned int uid_invalid(gpgme_user_id_t u); - -#endif diff --git a/vendor/github.com/mtrmac/gpgme/gpgme.go b/vendor/github.com/mtrmac/gpgme/gpgme.go deleted file mode 100644 index 20aad737c687..000000000000 --- a/vendor/github.com/mtrmac/gpgme/gpgme.go +++ /dev/null @@ -1,748 +0,0 @@ -// Package gpgme provides a Go wrapper for the GPGME library -package gpgme - -// #cgo LDFLAGS: -lgpgme -lassuan -lgpg-error -// #cgo CPPFLAGS: -D_FILE_OFFSET_BITS=64 -// #include -// #include -// #include "go_gpgme.h" -import "C" - -import ( - "fmt" - "io" - "os" - "runtime" - "time" - "unsafe" -) - -var Version string - -func init() { - Version = C.GoString(C.gpgme_check_version(nil)) -} - -// Callback is the function that is called when a passphrase is required -type Callback func(uidHint string, prevWasBad bool, f *os.File) error - -//export gogpgme_passfunc -func gogpgme_passfunc(hook unsafe.Pointer, uid_hint, passphrase_info *C.char, prev_was_bad, fd C.int) C.gpgme_error_t { - c := callbackLookup(uintptr(hook)).(*Context) - go_uid_hint := C.GoString(uid_hint) - f := os.NewFile(uintptr(fd), go_uid_hint) - defer f.Close() - err := c.callback(go_uid_hint, prev_was_bad != 0, f) - if err != nil { - return C.GPG_ERR_CANCELED - } - return 0 -} - -type Protocol int - -const ( - ProtocolOpenPGP Protocol = C.GPGME_PROTOCOL_OpenPGP - ProtocolCMS Protocol = C.GPGME_PROTOCOL_CMS - ProtocolGPGConf Protocol = C.GPGME_PROTOCOL_GPGCONF - ProtocolAssuan Protocol = C.GPGME_PROTOCOL_ASSUAN - ProtocolG13 Protocol = C.GPGME_PROTOCOL_G13 - ProtocolUIServer Protocol = C.GPGME_PROTOCOL_UISERVER - // ProtocolSpawn Protocol = C.GPGME_PROTOCOL_SPAWN // Unavailable in 1.4.3 - ProtocolDefault Protocol = C.GPGME_PROTOCOL_DEFAULT - ProtocolUnknown Protocol = C.GPGME_PROTOCOL_UNKNOWN -) - -type PinEntryMode int - -// const ( // Unavailable in 1.3.2 -// PinEntryDefault PinEntryMode = C.GPGME_PINENTRY_MODE_DEFAULT -// PinEntryAsk PinEntryMode = C.GPGME_PINENTRY_MODE_ASK -// PinEntryCancel PinEntryMode = C.GPGME_PINENTRY_MODE_CANCEL -// PinEntryError PinEntryMode = C.GPGME_PINENTRY_MODE_ERROR -// PinEntryLoopback PinEntryMode = C.GPGME_PINENTRY_MODE_LOOPBACK -// ) - -type EncryptFlag uint - -const ( - EncryptAlwaysTrust EncryptFlag = C.GPGME_ENCRYPT_ALWAYS_TRUST - EncryptNoEncryptTo EncryptFlag = C.GPGME_ENCRYPT_NO_ENCRYPT_TO - EncryptPrepare EncryptFlag = C.GPGME_ENCRYPT_PREPARE - EncryptExceptSign EncryptFlag = C.GPGME_ENCRYPT_EXPECT_SIGN - // EncryptNoCompress EncryptFlag = C.GPGME_ENCRYPT_NO_COMPRESS // Unavailable in 1.4.3 -) - -type HashAlgo int - -// const values for HashAlgo values should be added when necessary. - -type KeyListMode uint - -const ( - KeyListModeLocal KeyListMode = C.GPGME_KEYLIST_MODE_LOCAL - KeyListModeExtern KeyListMode = C.GPGME_KEYLIST_MODE_EXTERN - KeyListModeSigs KeyListMode = C.GPGME_KEYLIST_MODE_SIGS - KeyListModeSigNotations KeyListMode = C.GPGME_KEYLIST_MODE_SIG_NOTATIONS - // KeyListModeWithSecret KeyListMode = C.GPGME_KEYLIST_MODE_WITH_SECRET // Unavailable in 1.4.3 - KeyListModeEphemeral KeyListMode = C.GPGME_KEYLIST_MODE_EPHEMERAL - KeyListModeModeValidate KeyListMode = C.GPGME_KEYLIST_MODE_VALIDATE -) - -type PubkeyAlgo int - -// const values for PubkeyAlgo values should be added when necessary. - -type SigMode int - -const ( - SigModeNormal SigMode = C.GPGME_SIG_MODE_NORMAL - SigModeDetach SigMode = C.GPGME_SIG_MODE_DETACH - SigModeClear SigMode = C.GPGME_SIG_MODE_CLEAR -) - -type SigSum int - -const ( - SigSumValid SigSum = C.GPGME_SIGSUM_VALID - SigSumGreen SigSum = C.GPGME_SIGSUM_GREEN - SigSumRed SigSum = C.GPGME_SIGSUM_RED - SigSumKeyRevoked SigSum = C.GPGME_SIGSUM_KEY_REVOKED - SigSumKeyExpired SigSum = C.GPGME_SIGSUM_KEY_EXPIRED - SigSumSigExpired SigSum = C.GPGME_SIGSUM_SIG_EXPIRED - SigSumKeyMissing SigSum = C.GPGME_SIGSUM_KEY_MISSING - SigSumCRLMissing SigSum = C.GPGME_SIGSUM_CRL_MISSING - SigSumCRLTooOld SigSum = C.GPGME_SIGSUM_CRL_TOO_OLD - SigSumBadPolicy SigSum = C.GPGME_SIGSUM_BAD_POLICY - SigSumSysError SigSum = C.GPGME_SIGSUM_SYS_ERROR -) - -type Validity int - -const ( - ValidityUnknown Validity = C.GPGME_VALIDITY_UNKNOWN - ValidityUndefined Validity = C.GPGME_VALIDITY_UNDEFINED - ValidityNever Validity = C.GPGME_VALIDITY_NEVER - ValidityMarginal Validity = C.GPGME_VALIDITY_MARGINAL - ValidityFull Validity = C.GPGME_VALIDITY_FULL - ValidityUltimate Validity = C.GPGME_VALIDITY_ULTIMATE -) - -type ErrorCode int - -const ( - ErrorNoError ErrorCode = C.GPG_ERR_NO_ERROR - ErrorEOF ErrorCode = C.GPG_ERR_EOF -) - -// Error is a wrapper for GPGME errors -type Error struct { - err C.gpgme_error_t -} - -func (e Error) Code() ErrorCode { - return ErrorCode(C.gpgme_err_code(e.err)) -} - -func (e Error) Error() string { - return C.GoString(C.gpgme_strerror(e.err)) -} - -func handleError(err C.gpgme_error_t) error { - e := Error{err: err} - if e.Code() == ErrorNoError { - return nil - } - return e -} - -func cbool(b bool) C.int { - if b { - return 1 - } - return 0 -} - -func EngineCheckVersion(p Protocol) error { - return handleError(C.gpgme_engine_check_version(C.gpgme_protocol_t(p))) -} - -type EngineInfo struct { - info C.gpgme_engine_info_t -} - -func (e *EngineInfo) Next() *EngineInfo { - if e.info.next == nil { - return nil - } - return &EngineInfo{info: e.info.next} -} - -func (e *EngineInfo) Protocol() Protocol { - return Protocol(e.info.protocol) -} - -func (e *EngineInfo) FileName() string { - return C.GoString(e.info.file_name) -} - -func (e *EngineInfo) Version() string { - return C.GoString(e.info.version) -} - -func (e *EngineInfo) RequiredVersion() string { - return C.GoString(e.info.req_version) -} - -func (e *EngineInfo) HomeDir() string { - return C.GoString(e.info.home_dir) -} - -func GetEngineInfo() (*EngineInfo, error) { - info := &EngineInfo{} - return info, handleError(C.gpgme_get_engine_info(&info.info)) -} - -func SetEngineInfo(proto Protocol, fileName, homeDir string) error { - var cfn, chome *C.char - if fileName != "" { - cfn = C.CString(fileName) - defer C.free(unsafe.Pointer(cfn)) - } - if homeDir != "" { - chome = C.CString(homeDir) - defer C.free(unsafe.Pointer(chome)) - } - return handleError(C.gpgme_set_engine_info(C.gpgme_protocol_t(proto), cfn, chome)) -} - -func FindKeys(pattern string, secretOnly bool) ([]*Key, error) { - var keys []*Key - ctx, err := New() - if err != nil { - return keys, err - } - defer ctx.Release() - if err := ctx.KeyListStart(pattern, secretOnly); err != nil { - return keys, err - } - defer ctx.KeyListEnd() - for ctx.KeyListNext() { - keys = append(keys, ctx.Key) - } - if ctx.KeyError != nil { - return keys, ctx.KeyError - } - return keys, nil -} - -func Decrypt(r io.Reader) (*Data, error) { - ctx, err := New() - if err != nil { - return nil, err - } - defer ctx.Release() - cipher, err := NewDataReader(r) - if err != nil { - return nil, err - } - defer cipher.Close() - plain, err := NewData() - if err != nil { - return nil, err - } - err = ctx.Decrypt(cipher, plain) - plain.Seek(0, SeekSet) - return plain, err -} - -type Context struct { - Key *Key - KeyError error - - callback Callback - cbc uintptr - - ctx C.gpgme_ctx_t -} - -func New() (*Context, error) { - c := &Context{} - err := C.gpgme_new(&c.ctx) - runtime.SetFinalizer(c, (*Context).Release) - return c, handleError(err) -} - -func (c *Context) Release() { - if c.ctx == nil { - return - } - if c.cbc > 0 { - callbackDelete(c.cbc) - } - C.gpgme_release(c.ctx) - c.ctx = nil -} - -func (c *Context) SetArmor(yes bool) { - C.gpgme_set_armor(c.ctx, cbool(yes)) -} - -func (c *Context) Armor() bool { - return C.gpgme_get_armor(c.ctx) != 0 -} - -func (c *Context) SetTextMode(yes bool) { - C.gpgme_set_textmode(c.ctx, cbool(yes)) -} - -func (c *Context) TextMode() bool { - return C.gpgme_get_textmode(c.ctx) != 0 -} - -func (c *Context) SetProtocol(p Protocol) error { - return handleError(C.gpgme_set_protocol(c.ctx, C.gpgme_protocol_t(p))) -} - -func (c *Context) Protocol() Protocol { - return Protocol(C.gpgme_get_protocol(c.ctx)) -} - -func (c *Context) SetKeyListMode(m KeyListMode) error { - return handleError(C.gpgme_set_keylist_mode(c.ctx, C.gpgme_keylist_mode_t(m))) -} - -func (c *Context) KeyListMode() KeyListMode { - return KeyListMode(C.gpgme_get_keylist_mode(c.ctx)) -} - -// Unavailable in 1.3.2: -// func (c *Context) SetPinEntryMode(m PinEntryMode) error { -// return handleError(C.gpgme_set_pinentry_mode(c.ctx, C.gpgme_pinentry_mode_t(m))) -// } - -// Unavailable in 1.3.2: -// func (c *Context) PinEntryMode() PinEntryMode { -// return PinEntryMode(C.gpgme_get_pinentry_mode(c.ctx)) -// } - -func (c *Context) SetCallback(callback Callback) error { - var err error - c.callback = callback - if c.cbc > 0 { - callbackDelete(c.cbc) - } - if callback != nil { - cbc := callbackAdd(c) - c.cbc = cbc - _, err = C.gogpgme_set_passphrase_cb(c.ctx, C.gpgme_passphrase_cb_t(C.gogpgme_passfunc), C.uintptr_t(cbc)) - } else { - c.cbc = 0 - _, err = C.gogpgme_set_passphrase_cb(c.ctx, nil, 0) - } - return err -} - -func (c *Context) EngineInfo() *EngineInfo { - return &EngineInfo{info: C.gpgme_ctx_get_engine_info(c.ctx)} -} - -func (c *Context) SetEngineInfo(proto Protocol, fileName, homeDir string) error { - var cfn, chome *C.char - if fileName != "" { - cfn = C.CString(fileName) - defer C.free(unsafe.Pointer(cfn)) - } - if homeDir != "" { - chome = C.CString(homeDir) - defer C.free(unsafe.Pointer(chome)) - } - return handleError(C.gpgme_ctx_set_engine_info(c.ctx, C.gpgme_protocol_t(proto), cfn, chome)) -} - -func (c *Context) KeyListStart(pattern string, secretOnly bool) error { - cpattern := C.CString(pattern) - defer C.free(unsafe.Pointer(cpattern)) - err := C.gpgme_op_keylist_start(c.ctx, cpattern, cbool(secretOnly)) - return handleError(err) -} - -func (c *Context) KeyListNext() bool { - c.Key = newKey() - err := handleError(C.gpgme_op_keylist_next(c.ctx, &c.Key.k)) - if err != nil { - if e, ok := err.(Error); ok && e.Code() == ErrorEOF { - c.KeyError = nil - } else { - c.KeyError = err - } - return false - } - c.KeyError = nil - return true -} - -func (c *Context) KeyListEnd() error { - return handleError(C.gpgme_op_keylist_end(c.ctx)) -} - -func (c *Context) GetKey(fingerprint string, secret bool) (*Key, error) { - key := newKey() - cfpr := C.CString(fingerprint) - defer C.free(unsafe.Pointer(cfpr)) - err := handleError(C.gpgme_get_key(c.ctx, cfpr, &key.k, cbool(secret))) - if e, ok := err.(Error); key.k == nil && ok && e.Code() == ErrorEOF { - return nil, fmt.Errorf("key %q not found", fingerprint) - } - if err != nil { - return nil, err - } - return key, nil -} - -func (c *Context) Decrypt(ciphertext, plaintext *Data) error { - return handleError(C.gpgme_op_decrypt(c.ctx, ciphertext.dh, plaintext.dh)) -} - -func (c *Context) DecryptVerify(ciphertext, plaintext *Data) error { - return handleError(C.gpgme_op_decrypt_verify(c.ctx, ciphertext.dh, plaintext.dh)) -} - -type Signature struct { - Summary SigSum - Fingerprint string - Status error - Timestamp time.Time - ExpTimestamp time.Time - WrongKeyUsage bool - PKATrust uint - ChainModel bool - Validity Validity - ValidityReason error - PubkeyAlgo PubkeyAlgo - HashAlgo HashAlgo -} - -func (c *Context) Verify(sig, signedText, plain *Data) (string, []Signature, error) { - var signedTextPtr, plainPtr C.gpgme_data_t = nil, nil - if signedText != nil { - signedTextPtr = signedText.dh - } - if plain != nil { - plainPtr = plain.dh - } - err := handleError(C.gpgme_op_verify(c.ctx, sig.dh, signedTextPtr, plainPtr)) - if err != nil { - return "", nil, err - } - res := C.gpgme_op_verify_result(c.ctx) - sigs := []Signature{} - for s := res.signatures; s != nil; s = s.next { - sig := Signature{ - Summary: SigSum(s.summary), - Fingerprint: C.GoString(s.fpr), - Status: handleError(s.status), - // s.notations not implemented - Timestamp: time.Unix(int64(s.timestamp), 0), - ExpTimestamp: time.Unix(int64(s.exp_timestamp), 0), - WrongKeyUsage: C.signature_wrong_key_usage(s) != 0, - PKATrust: uint(C.signature_pka_trust(s)), - ChainModel: C.signature_chain_model(s) != 0, - Validity: Validity(s.validity), - ValidityReason: handleError(s.validity_reason), - PubkeyAlgo: PubkeyAlgo(s.pubkey_algo), - HashAlgo: HashAlgo(s.hash_algo), - } - sigs = append(sigs, sig) - } - return C.GoString(res.file_name), sigs, nil -} - -func (c *Context) Encrypt(recipients []*Key, flags EncryptFlag, plaintext, ciphertext *Data) error { - size := unsafe.Sizeof(new(C.gpgme_key_t)) - recp := C.calloc(C.size_t(len(recipients)+1), C.size_t(size)) - defer C.free(recp) - for i := range recipients { - ptr := (*C.gpgme_key_t)(unsafe.Pointer(uintptr(recp) + size*uintptr(i))) - *ptr = recipients[i].k - } - err := C.gpgme_op_encrypt(c.ctx, (*C.gpgme_key_t)(recp), C.gpgme_encrypt_flags_t(flags), plaintext.dh, ciphertext.dh) - return handleError(err) -} - -func (c *Context) Sign(signers []*Key, plain, sig *Data, mode SigMode) error { - C.gpgme_signers_clear(c.ctx) - for _, k := range signers { - if err := handleError(C.gpgme_signers_add(c.ctx, k.k)); err != nil { - C.gpgme_signers_clear(c.ctx) - return err - } - } - return handleError(C.gpgme_op_sign(c.ctx, plain.dh, sig.dh, C.gpgme_sig_mode_t(mode))) -} - -// ImportStatusFlags describes the type of ImportStatus.Status. The C API in gpgme.h simply uses "unsigned". -type ImportStatusFlags uint - -const ( - ImportNew ImportStatusFlags = C.GPGME_IMPORT_NEW - ImportUID ImportStatusFlags = C.GPGME_IMPORT_UID - ImportSIG ImportStatusFlags = C.GPGME_IMPORT_SIG - ImportSubKey ImportStatusFlags = C.GPGME_IMPORT_SUBKEY - ImportSecret ImportStatusFlags = C.GPGME_IMPORT_SECRET -) - -type ImportStatus struct { - Fingerprint string - Result error - Status ImportStatusFlags -} - -type ImportResult struct { - Considered int - NoUserID int - Imported int - ImportedRSA int - Unchanged int - NewUserIDs int - NewSubKeys int - NewSignatures int - NewRevocations int - SecretRead int - SecretImported int - SecretUnchanged int - NotImported int - Imports []ImportStatus -} - -func (c *Context) Import(keyData *Data) (*ImportResult, error) { - err := handleError(C.gpgme_op_import(c.ctx, keyData.dh)) - if err != nil { - return nil, err - } - res := C.gpgme_op_import_result(c.ctx) - imports := []ImportStatus{} - for s := res.imports; s != nil; s = s.next { - imports = append(imports, ImportStatus{ - Fingerprint: C.GoString(s.fpr), - Result: handleError(s.result), - Status: ImportStatusFlags(s.status), - }) - } - return &ImportResult{ - Considered: int(res.considered), - NoUserID: int(res.no_user_id), - Imported: int(res.imported), - ImportedRSA: int(res.imported_rsa), - Unchanged: int(res.unchanged), - NewUserIDs: int(res.new_user_ids), - NewSubKeys: int(res.new_sub_keys), - NewSignatures: int(res.new_signatures), - NewRevocations: int(res.new_revocations), - SecretRead: int(res.secret_read), - SecretImported: int(res.secret_imported), - SecretUnchanged: int(res.secret_unchanged), - NotImported: int(res.not_imported), - Imports: imports, - }, nil -} - -type Key struct { - k C.gpgme_key_t -} - -func newKey() *Key { - k := &Key{} - runtime.SetFinalizer(k, (*Key).Release) - return k -} - -func (k *Key) Release() { - C.gpgme_key_release(k.k) - k.k = nil -} - -func (k *Key) Revoked() bool { - return C.key_revoked(k.k) != 0 -} - -func (k *Key) Expired() bool { - return C.key_expired(k.k) != 0 -} - -func (k *Key) Disabled() bool { - return C.key_disabled(k.k) != 0 -} - -func (k *Key) Invalid() bool { - return C.key_invalid(k.k) != 0 -} - -func (k *Key) CanEncrypt() bool { - return C.key_can_encrypt(k.k) != 0 -} - -func (k *Key) CanSign() bool { - return C.key_can_sign(k.k) != 0 -} - -func (k *Key) CanCertify() bool { - return C.key_can_certify(k.k) != 0 -} - -func (k *Key) Secret() bool { - return C.key_secret(k.k) != 0 -} - -func (k *Key) CanAuthenticate() bool { - return C.key_can_authenticate(k.k) != 0 -} - -func (k *Key) IsQualified() bool { - return C.key_is_qualified(k.k) != 0 -} - -func (k *Key) Protocol() Protocol { - return Protocol(k.k.protocol) -} - -func (k *Key) IssuerSerial() string { - return C.GoString(k.k.issuer_serial) -} - -func (k *Key) IssuerName() string { - return C.GoString(k.k.issuer_name) -} - -func (k *Key) ChainID() string { - return C.GoString(k.k.chain_id) -} - -func (k *Key) OwnerTrust() Validity { - return Validity(k.k.owner_trust) -} - -func (k *Key) SubKeys() *SubKey { - if k.k.subkeys == nil { - return nil - } - return &SubKey{k: k.k.subkeys, parent: k} -} - -func (k *Key) UserIDs() *UserID { - if k.k.uids == nil { - return nil - } - return &UserID{u: k.k.uids, parent: k} -} - -func (k *Key) KeyListMode() KeyListMode { - return KeyListMode(k.k.keylist_mode) -} - -type SubKey struct { - k C.gpgme_subkey_t - parent *Key // make sure the key is not released when we have a reference to a subkey -} - -func (k *SubKey) Next() *SubKey { - if k.k.next == nil { - return nil - } - return &SubKey{k: k.k.next, parent: k.parent} -} - -func (k *SubKey) Revoked() bool { - return C.subkey_revoked(k.k) != 0 -} - -func (k *SubKey) Expired() bool { - return C.subkey_expired(k.k) != 0 -} - -func (k *SubKey) Disabled() bool { - return C.subkey_disabled(k.k) != 0 -} - -func (k *SubKey) Invalid() bool { - return C.subkey_invalid(k.k) != 0 -} - -func (k *SubKey) Secret() bool { - return C.subkey_secret(k.k) != 0 -} - -func (k *SubKey) KeyID() string { - return C.GoString(k.k.keyid) -} - -func (k *SubKey) Fingerprint() string { - return C.GoString(k.k.fpr) -} - -func (k *SubKey) Created() time.Time { - if k.k.timestamp <= 0 { - return time.Time{} - } - return time.Unix(int64(k.k.timestamp), 0) -} - -func (k *SubKey) Expires() time.Time { - if k.k.expires <= 0 { - return time.Time{} - } - return time.Unix(int64(k.k.expires), 0) -} - -func (k *SubKey) CardNumber() string { - return C.GoString(k.k.card_number) -} - -type UserID struct { - u C.gpgme_user_id_t - parent *Key // make sure the key is not released when we have a reference to a user ID -} - -func (u *UserID) Next() *UserID { - if u.u.next == nil { - return nil - } - return &UserID{u: u.u.next, parent: u.parent} -} - -func (u *UserID) Revoked() bool { - return C.uid_revoked(u.u) != 0 -} - -func (u *UserID) Invalid() bool { - return C.uid_invalid(u.u) != 0 -} - -func (u *UserID) Validity() Validity { - return Validity(u.u.validity) -} - -func (u *UserID) UID() string { - return C.GoString(u.u.uid) -} - -func (u *UserID) Name() string { - return C.GoString(u.u.name) -} - -func (u *UserID) Comment() string { - return C.GoString(u.u.comment) -} - -func (u *UserID) Email() string { - return C.GoString(u.u.email) -} - -// This is somewhat of a horrible hack. We need to unset GPG_AGENT_INFO so that gpgme does not pass --use-agent to GPG. -// os.Unsetenv should be enough, but that only calls the underlying C library (which gpgme uses) if cgo is involved -// - and cgo can't be used in tests. So, provide this helper for test initialization. -func unsetenvGPGAgentInfo() { - v := C.CString("GPG_AGENT_INFO") - defer C.free(unsafe.Pointer(v)) - C.unsetenv(v) -} diff --git a/vendor/github.com/mtrmac/gpgme/gpgme_test.go b/vendor/github.com/mtrmac/gpgme/gpgme_test.go deleted file mode 100644 index 7343201b6da8..000000000000 --- a/vendor/github.com/mtrmac/gpgme/gpgme_test.go +++ /dev/null @@ -1,406 +0,0 @@ -package gpgme - -import ( - "bytes" - "flag" - "io" - "io/ioutil" - "os" - "os/exec" - "runtime" - "strings" - "testing" -) - -const ( - testGPGHome = "./testdata/gpghome" - testData = "data\n" - testCipherText = `-----BEGIN PGP MESSAGE----- -Version: GnuPG v1 - -hQEMAw4698hF4WUhAQf/SdkbF/zUE6YjBxscDurrUZunSnt87kipLXypSxTDIdgj -O9huAaQwBz4uAJf2DuEN/7iAFGhi/v45NTujrG+7ocfjM3m/A2T80g4RVF5kKXBr -pFFgH7bMRY6VdZt1GKI9izSO/uFkoKXG8M31tCX3hWntQUJ9p+n1avGpu3wo4Ru3 -CJhpL+ChDzXuZv4IK81ahrixEz4fJH0vd0TbsHpTXx4WPkTGXelM0R9PwiY7TovZ -onGZUIplrfA1HUUbQfzExyFw3oAo1/almzD5CBIfq5EnU8Siy5BNulDXm0/44h8A -lOUy6xqx7ITnWMYYf4a1cFoW80Yk+x6SYQqbcqHFQdJIAVr00V4pPV4ppFcXgdw/ -BxKERzDupyqS0tcfVFCYLRmvtQp7ceOS6jRW3geXPPIz1U/VYBvKlvFu4XTMCS6z -4qY4SzZlFEsU -=IQOA ------END PGP MESSAGE-----` - textSignedCipherText = `-----BEGIN PGP MESSAGE----- -Version: GnuPG v1 - -hQEMAw4698hF4WUhAQf9FaI8zSltIMcDxqGtpcTfcBtC30rcKqE/oa1bacxuZQKW -uT671N7J2TjU8tLIsAO9lDSRHCsf6DbB+O6qOJsu6eJownyMUEmJ2oBON9Z2x+Ci -aS0KeRqcRxNJYbIth9/ffa2I4sSBA0GS93yCfGKHNL4JvNo/dgVjJwOPWZ5TKu49 -n9h81LMwTQ8qpLGPeo7nsgBtSKNTi5s7pC0bd9HTii7gQcLUEeeqk5U7oG5CRN48 -zKPrVud8pIDvSoYuymZmjWI1MmF4xQ+6IHK7hWKMyA2RkS4CpyBPERMifc+y+K2+ -LnKLt2ul9jxgqv3fd/dU7UhyqdvJdj83tUvhN4Iru9LAuQHfVsP23keH8CHpUzCj -PkuUYvpgmRdMSC+l3yBCqnaaAI51rRdyJ+HX4DrrinOFst2JfXEICiQAg5UkO4i/ -YYHsfD3xfk7MKMOY/DfV25OyAU6Yeq/5SMisONhtlzcosVU7HULudbIKSG6q/tCx -jRGu+oMIBdiQ+eipnaLVetyGxSEDzxHH367NbxC9ffxK7I+Srmwh+oS/yMNuZA8r -bUp6NbykK8Si7VNgVG1+yOIULLuR0VBO5+e2PKQU8FP0wABTn7h3zE8z4rCrcaZl -43bc/TBUyTdWY79e2P1otaCbhQGAjfF8ClPbAdsFppykf5I2ZDDf8c67d0Nqui74 -Yry4/fF/2sAzyoyzdP6ktBw6pCA1MbwDqTGg7aEI8Es3X+jfh3qIaLnGmZ7jIyUl -D4jgGAEpFU+mpPH8eukKuT+OJ3P6gdmSiAJH7+C96tlcEg9BNxjYNkCTil/3yygQ -EV/5zFTEpzm4CtYHHdmY5uCaEJq/4hhE8BY8 -=8mxI ------END PGP MESSAGE-----` - testSignedText = `-----BEGIN PGP MESSAGE----- -Version: GnuPG v1 - -owEBQwG8/pANAwACAQMn/7Ain2E2AcsTYgBW47+PVGVzdCBtZXNzYWdlCokBHAQA -AQIABgUCVuO/jwAKCRADJ/+wIp9hNh9lCACPiDkY0CqI9ss4EBcpToqnF/8NmV99 -2wi6FmbQnUmY98OMM2VJXrX6PvfD/X+FsiLog0CZU4heMEAI3Dd3qELgTfaTFqNc -bbDkenzA0kO734WLsEU/z1F9iWAcfeF3crKqd3fBw5kZ1PkhuJFdcqQEOUQALvXY -8VAtQmQWzf3sn2KIQ7R1wyAJVoUZaN5Xwc9Y0F1l4Xxifax8nkFBl35X6gmHRxZ7 -jlfmWMcAkXASNXl9/Yso2XGJMs85JPhZPJ3KJRuuurnhZSxAbDJMNBFJ+HbQv3y6 -pupeR7ut6pWJxr6MND793yoFGoRYwKklQdfP4xzFCatYRU4RkPBp95KJ -=RMUj ------END PGP MESSAGE----- -` -) - -func TestMain(m *testing.M) { - flag.Parse() - os.Setenv("GNUPGHOME", testGPGHome) - unsetenvGPGAgentInfo() - os.Exit(m.Run()) -} - -func compareEngineInfo(t *testing.T, info *EngineInfo, proto Protocol, fileName, homeDir string) { - for info != nil && info.Protocol() != proto { - info = info.Next() - } - if info == nil { - t.Errorf("Expected engine info %d not found", proto) - return - } - if info.FileName() != fileName { - t.Errorf("Testing file name %s does not match %s", info.FileName(), fileName) - } - if info.HomeDir() != homeDir { - t.Errorf("Testing home directory %s does not match %s", info.HomeDir(), homeDir) - } -} - -func TestEngineInfo(t *testing.T) { - testProto := ProtocolOpenPGP // Careful, this is global state! - defer func() { - SetEngineInfo(testProto, "", "") // Try to reset to defaults after we are done. - }() - - testFN := "testFN" - testHomeDir := "testHomeDir" - checkError(t, SetEngineInfo(testProto, testFN, testHomeDir)) - - info, err := GetEngineInfo() - checkError(t, err) - compareEngineInfo(t, info, testProto, testFN, testHomeDir) - - // SetEngineInfo with empty strings works, using defaults which we don't know, - // so just test that it doesn't fail. - checkError(t, SetEngineInfo(testProto, testFN, "")) - checkError(t, SetEngineInfo(testProto, "", testHomeDir)) -} - -func ctxWithCallback(t *testing.T) *Context { - ensureVersion(t, "1.", "can only set password callback for GPG v1.x") - - ctx, err := New() - checkError(t, err) - - checkError(t, ctx.SetCallback(func(uid_hint string, prev_was_bad bool, f *os.File) error { - if prev_was_bad { - t.Fatal("Bad passphrase") - } - _, err := io.WriteString(f, "password\n") - return err - })) - return ctx -} - -func TestContext_Armor(t *testing.T) { - ctx, err := New() - checkError(t, err) - - ctx.SetArmor(true) - if !ctx.Armor() { - t.Error("expected armor set") - } - ctx.SetArmor(false) - if ctx.Armor() { - t.Error("expected armor not set") - } -} - -func TestContext_TextMode(t *testing.T) { - ctx, err := New() - checkError(t, err) - - ctx.SetTextMode(true) - if !ctx.TextMode() { - t.Error("expected textmode set") - } - ctx.SetTextMode(false) - if ctx.TextMode() { - t.Error("expected textmode not set") - } -} - -func TestContext_EngineInfo(t *testing.T) { - ctx, err := New() - checkError(t, err) - - testProto := ProtocolOpenPGP - testFN := "testFN" - testHomeDir := "testHomeDir" - checkError(t, ctx.SetEngineInfo(testProto, testFN, testHomeDir)) - - info := ctx.EngineInfo() - compareEngineInfo(t, info, testProto, testFN, testHomeDir) - - // SetEngineInfo with empty strings works, using defaults which we don't know, - // so just test that it doesn't fail. - checkError(t, ctx.SetEngineInfo(testProto, testFN, "")) - checkError(t, ctx.SetEngineInfo(testProto, "", testHomeDir)) -} - -func TestContext_Encrypt(t *testing.T) { - ctx, err := New() - checkError(t, err) - - keys, err := FindKeys("test@example.com", true) - checkError(t, err) - - plain, err := NewDataBytes([]byte(testData)) - checkError(t, err) - - var buf bytes.Buffer - cipher, err := NewDataWriter(&buf) - checkError(t, err) - - checkError(t, ctx.Encrypt(keys, 0, plain, cipher)) - if buf.Len() < 1 { - t.Error("Expected encrypted bytes, got empty buffer") - } -} - -func TestContext_Decrypt(t *testing.T) { - ctx := ctxWithCallback(t) - - cipher, err := NewDataBytes([]byte(testCipherText)) - checkError(t, err) - var buf bytes.Buffer - plain, err := NewDataWriter(&buf) - checkError(t, err) - checkError(t, ctx.Decrypt(cipher, plain)) - diff(t, buf.Bytes(), []byte("Test message\n")) -} - -func TestContext_DecryptVerify(t *testing.T) { - ctx := ctxWithCallback(t) - - cipher, err := NewDataBytes([]byte(textSignedCipherText)) - checkError(t, err) - var buf bytes.Buffer - plain, err := NewDataWriter(&buf) - checkError(t, err) - checkError(t, ctx.DecryptVerify(cipher, plain)) - diff(t, buf.Bytes(), []byte("Test message\n")) -} - -func TestContext_Sign(t *testing.T) { - ctx := ctxWithCallback(t) - - key, err := ctx.GetKey("test@example.com", true) - checkError(t, err) - - plain, err := NewDataBytes([]byte(testData)) - checkError(t, err) - - var buf bytes.Buffer - signed, err := NewDataWriter(&buf) - checkError(t, err) - - checkError(t, ctx.Sign([]*Key{key}, plain, signed, SigModeNormal)) - if buf.Len() < 1 { - t.Error("Expected signed bytes, got empty buffer") - } -} - -func TestContext_Verify(t *testing.T) { - ctx, err := New() - checkError(t, err) - - _, err = ctx.GetKey("test@example.com", false) - checkError(t, err) - - signed, err := NewDataBytes([]byte(testSignedText)) - checkError(t, err) - - var buf bytes.Buffer - plain, err := NewDataWriter(&buf) - checkError(t, err) - - _, sigs, err := ctx.Verify(signed, nil, plain) - checkError(t, err) - - if len(sigs) != 1 { - t.Error("Expected 1 signature") - } - sig := sigs[0] - // Normalize - expectedSig := Signature{ - Summary: SigSumValid | SigSumGreen, - Fingerprint: "44B646DC347C31E867FF4F450327FFB0229F6136", - Status: nil, - Timestamp: sig.Timestamp, // Ignore in comparison - ExpTimestamp: sig.ExpTimestamp, // Ignore in comparison - WrongKeyUsage: false, - PKATrust: 0, - ChainModel: false, - Validity: ValidityFull, - ValidityReason: nil, - PubkeyAlgo: sig.PubkeyAlgo, // Ignore in comparison - HashAlgo: sig.HashAlgo, // Ignore in comparison - } - if sig != expectedSig { - t.Errorf("Signature verification does not match: %#v vs. %#v", sig, expectedSig) - } - - diff(t, buf.Bytes(), []byte("Test message\n")) -} - -func TestContext_Import(t *testing.T) { - homeDir, err := ioutil.TempDir("", "gpgme-import-test") - checkError(t, err) - defer os.RemoveAll(homeDir) - - ctx, err := New() - checkError(t, err) - checkError(t, ctx.SetEngineInfo(ProtocolOpenPGP, "", homeDir)) - - f, err := os.Open("./testdata/pubkeys.gpg") - checkError(t, err) - defer f.Close() - dh, err := NewDataFile(f) - checkError(t, err) - defer dh.Close() - - res, err := ctx.Import(dh) - checkError(t, err) - - for _, v := range []struct { - Name string - Value int - Expected int - }{ - {"Considered", res.Considered, 1}, - {"NoUserID", res.NoUserID, 0}, - {"Imported", res.Imported, 1}, - // Do not test ImportedRSA, as of gnupg 2.1.11 the value is always 0. - {"Unchanged", res.Unchanged, 0}, - {"NewUserIDs", res.NewUserIDs, 0}, - {"NewSubKeys", res.NewSubKeys, 0}, - {"NewSignatures", res.NewSignatures, 0}, - {"NewRevocations", res.NewRevocations, 0}, - {"SecretRead", res.SecretRead, 0}, - {"SecretImported", res.SecretImported, 0}, - {"SecretUnchanged", res.SecretUnchanged, 0}, - {"NotImported", res.NotImported, 0}, - } { - if v.Value != v.Expected { - t.Errorf("Unexpected import result field %s, value %d, expected %d", v.Name, v.Value, v.Expected) - } - } - expectedStatus := ImportStatus{ - Fingerprint: "44B646DC347C31E867FF4F450327FFB0229F6136", - Result: nil, - Status: ImportNew, - } - if len(res.Imports) != 1 { - t.Errorf("Unexpected number of import status values %d", len(res.Imports)) - } else if res.Imports[0] != expectedStatus { - t.Errorf("Import status does not match: %#v vs. %#v", res.Imports[0], expectedStatus) - } -} - -func isVersion(t testing.TB, version string) bool { - var info *EngineInfo - info, err := GetEngineInfo() - checkError(t, err) - for info != nil { - if info.Protocol() == ProtocolOpenPGP { - if strings.Contains(info.FileName(), "gpg") && strings.HasPrefix(info.Version(), version) { - return true - } - return false - } - info = info.Next() - } - return false -} - -var gpgBins = []string{"gpg2", "gpg1", "gpg"} - -// ensureVersion tries to setup gpgme with a specific version or skip -func ensureVersion(t testing.TB, version, msg string) { - if isVersion(t, version) { - return - } - for _, bin := range gpgBins { - path, err := exec.LookPath(bin) - if err != nil { - continue - } - if err := SetEngineInfo(ProtocolOpenPGP, path, testGPGHome); err != nil { - continue - } - if isVersion(t, version) { - return - } - } - t.Skip(msg) -} - -func diff(t *testing.T, dst, src []byte) { - line := 1 - offs := 0 // line offset - for i := 0; i < len(dst) && i < len(src); i++ { - d := dst[i] - s := src[i] - if d != s { - t.Errorf("dst:%d: %s\n", line, dst[offs:i+1]) - t.Errorf("src:%d: %s\n", line, src[offs:i+1]) - return - } - if s == '\n' { - line++ - offs = i + 1 - } - } - if len(dst) != len(src) { - t.Errorf("len(dst) = %d, len(src) = %d\nsrc = %q", len(dst), len(src), src) - } -} - -func checkError(t testing.TB, err error) { - if err == nil { - return - } - _, file, line, ok := runtime.Caller(1) - if ok { - // Truncate file name at last file name separator. - if index := strings.LastIndex(file, "/"); index >= 0 { - file = file[index+1:] - } else if index = strings.LastIndex(file, "\\"); index >= 0 { - file = file[index+1:] - } - } else { - file = "???" - line = 1 - } - t.Fatalf("%s:%d: %s", file, line, err) -} diff --git a/vendor/github.com/mtrmac/gpgme/testdata/gpghome/gpg-agent.conf b/vendor/github.com/mtrmac/gpgme/testdata/gpghome/gpg-agent.conf deleted file mode 100644 index d1b6ae315769..000000000000 --- a/vendor/github.com/mtrmac/gpgme/testdata/gpghome/gpg-agent.conf +++ /dev/null @@ -1 +0,0 @@ -allow-loopback-pinentry diff --git a/vendor/github.com/mtrmac/gpgme/testdata/gpghome/gpg.conf b/vendor/github.com/mtrmac/gpgme/testdata/gpghome/gpg.conf deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/vendor/github.com/mtrmac/gpgme/testdata/gpghome/pubring.gpg b/vendor/github.com/mtrmac/gpgme/testdata/gpghome/pubring.gpg deleted file mode 100644 index f1efe5369b09..000000000000 Binary files a/vendor/github.com/mtrmac/gpgme/testdata/gpghome/pubring.gpg and /dev/null differ diff --git a/vendor/github.com/mtrmac/gpgme/testdata/gpghome/secring.gpg b/vendor/github.com/mtrmac/gpgme/testdata/gpghome/secring.gpg deleted file mode 100644 index f37122d28861..000000000000 Binary files a/vendor/github.com/mtrmac/gpgme/testdata/gpghome/secring.gpg and /dev/null differ diff --git a/vendor/github.com/mtrmac/gpgme/testdata/gpghome/trustdb.gpg b/vendor/github.com/mtrmac/gpgme/testdata/gpghome/trustdb.gpg deleted file mode 100644 index 9c17a6328c66..000000000000 Binary files a/vendor/github.com/mtrmac/gpgme/testdata/gpghome/trustdb.gpg and /dev/null differ diff --git a/vendor/github.com/mtrmac/gpgme/testdata/pubkeys.gpg b/vendor/github.com/mtrmac/gpgme/testdata/pubkeys.gpg deleted file mode 100644 index 453e4b17c29b..000000000000 --- a/vendor/github.com/mtrmac/gpgme/testdata/pubkeys.gpg +++ /dev/null @@ -1,30 +0,0 @@ ------BEGIN PGP PUBLIC KEY BLOCK----- -Version: GnuPG v1 - -mQENBFQnVRMBCADJCrvBzeItL55ZQ1jgYV5NOPo1q0O/aQzUxQ9gOMBiKkFIDWBj -z6PrehVyYByKNkpMMVwF2VtvqA3+pW8e+0EV633y3UQvSVLNlWmYerk683HsAeyI -GZS7oOigrX0NYOw8RsqKlz3rTXC4NbMB5m7CA+L7XdsCzH9njy9D/48RuDczywHb -/QWCcXO+/xMt32KqZh+/wOi5bO6v+i0HuzijoKtPGRjHg3+Ar7EIlEgU4T1TYYsO -uzB+OmNF7EAALg2Qt9BBzbTnQQl7rNWH9i+kWq13BVf9Ug+CerCB05IwNSeIU71K -gQlIzZfY2C+040AG5GDYDPaHB1tKaPd1Yf8JABEBAAG0G1Rlc3QgS2V5IDx0ZXN0 -QGV4YW1wbGUuY29tPokBOQQTAQIAIwUCVCdVEwIbAwcLCQgHAwIBBhUIAgkKCwQW -AgMBAh4BAheAAAoJEAMn/7Ain2E2IesH/2s61KJAyow+ZuX6ryRFlhm0irEi0m9t -LOWa7X2qNLt31XsHSc2ZL+jRj5nnQYv7y3Fbd+gIgHb03Kap10i//ukwPbX0h+oj -xAEKH9tTpRzYm+MKOrdfrB+OUXZBX63oYooLMTcSKxk6C6881Es8yFIDOVGCSUuu -JUNrojPVMdRVD8MHlDPpg0SFrC4s7tLQaQ7L2sFipCPUwLuPV91ionPv7sv8q3LR -D3O5CoaC/nl1XNIDVmXHACwyRWw/iCbQ9L7xbeiEgJkZLCQHanhbvFPMA4hFdQQX -6qcYh6vRzOJv+UyAORfKpIu5avnqo7GziADhf4qJVxQ0sSV/DutG9765AQ0EVCdV -EwEIALyTAnCvrjPJUeFJMR9m6cSFrTVzMrZDbyydBdNtwgZM9DNUCPe+os6p6/fq -UVTceICHlI8ogDAIct983mfu5xb0YPvOHIc9PnPoRrRHlVcYsT4fIHeeR2YLjg3e -6AxS/8D9Hx/FrTu9tTF1wxOuN0I8ViPKu90c8v9neCGgHkYptTj40IHXoXdlwjI7 -vkls6aXb3T4hsvRZHBeGeW2vMKXYHpw7VO3+MJWiOb7s5g2KFfKzXmf3Ir3MIOXV -MTGMcCRfnyElv+aCaWISByNChb3xVWx2u03fJYG3/JoGg5yPieEjnSeFP1sUklSs -dNjf/k87FsfcEvMW+63LxrIzHIcAEQEAAYkBHwQYAQIACQUCVCdVEwIbDAAKCRAD -J/+wIp9hNmiiB/9v2CpbNuiSxpe8YSWzwsyll0OwZCespQUr9kzGRlrtYKy7Geuj -DeE7WJS9C4ULMIS2wCpWcbeHucwrB+QylONQ5yxZeivFxR8yyZev+axGyKr3qMYI -pPbkhlilpwWJyJV6HzIUjTfRUKVt7ySqAo4VzNjwu7r2E5IdSjecm3ywLB5hxZjZ -dgTxUghdQYS+U6JMod55dcMzMvGm8WiZqp7CaxmfW56bgfJYxfGqL+MXEYiKB/Yl -/S8773XlVA7g9JOPnmkuG7NpZb3onKkcmwmq0JcOQyUuClLx26DMW6wn0XBZOe1X -WzpclcM/UTuDk7w5/PVCorGZAONowX5zB0GQ -=TA2b ------END PGP PUBLIC KEY BLOCK----- diff --git a/vendor/github.com/openshift/api/config/v1/types_ingress.go b/vendor/github.com/openshift/api/config/v1/types_ingress.go index 484a1af0bc24..d161eb8476cb 100644 --- a/vendor/github.com/openshift/api/config/v1/types_ingress.go +++ b/vendor/github.com/openshift/api/config/v1/types_ingress.go @@ -6,8 +6,8 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// Ingress holds cluster-wide information about Ingress. The canonical name is `cluster` -// TODO this object is an example of a possible grouping and is subject to change or removal +// Ingress holds cluster-wide information about ingress, including the default ingress domain +// used for routes. The canonical name is `cluster`. type Ingress struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -24,8 +24,13 @@ type Ingress struct { type IngressSpec struct { // domain is used to generate a default host name for a route when the - // route's host name is empty. The generated host name will follow this + // route's host name is empty. The generated host name will follow this // pattern: "..". + // + // It is also used as the default wildcard domain suffix for ingress. The + // default ingresscontroller domain will follow this pattern: "*.". + // + // Once set, changing domain is not currently supported. Domain string `json:"domain"` } diff --git a/vendor/github.com/openshift/api/config/v1/types_operatorhub.go b/vendor/github.com/openshift/api/config/v1/types_operatorhub.go index cf821f9e376a..31291dec2f60 100644 --- a/vendor/github.com/openshift/api/config/v1/types_operatorhub.go +++ b/vendor/github.com/openshift/api/config/v1/types_operatorhub.go @@ -6,10 +6,19 @@ import ( // OperatorHubSpec defines the desired state of OperatorHub type OperatorHubSpec struct { + // disableAllDefaultSources allows you to disable all the default hub + // sources. If this is true, a specific entry in sources can be used to + // enable a default source. If this is false, a specific entry in + // sources can be used to disable or enable a default source. + // +optional + DisableAllDefaultSources bool `json:"disableAllDefaultSources,omitempty"` // sources is the list of default hub sources and their configuration. - // If the list is empty, it indicates that the default hub sources are - // enabled on the cluster. The list of default hub sources and their - // current state will always be reflected in the status block. + // If the list is empty, it implies that the default hub sources are + // enabled on the cluster unless disableAllDefaultSources is true. + // If disableAllDefaultSources is true and sources is not empty, + // the configuration present in sources will take precedence. The list of + // default hub sources and their current state will always be reflected in + // the status block. // +optional Sources []HubSource `json:"sources,omitempty"` } @@ -61,9 +70,9 @@ type HubSource struct { // HubSourceStatus is used to reflect the current state of applying the // configuration to a default source type HubSourceStatus struct { - HubSource + HubSource `json:"",omitempty` // status indicates success or failure in applying the configuration - Status string `json:"status"` + Status string `json:"status,omitempty"` // message provides more information regarding failures Message string `json:"message,omitempty"` } diff --git a/vendor/github.com/openshift/api/config/v1/types_proxy.go b/vendor/github.com/openshift/api/config/v1/types_proxy.go index 398470148292..1413a48caa4e 100644 --- a/vendor/github.com/openshift/api/config/v1/types_proxy.go +++ b/vendor/github.com/openshift/api/config/v1/types_proxy.go @@ -42,17 +42,17 @@ type ProxySpec struct { // trustedCA is a reference to a ConfigMap containing a CA certificate bundle used // for client egress HTTPS connections. The certificate bundle must be from the CA - // that signed the proxy's certificate and be signed for everything. trustedCA should - // only be consumed by a proxy validator. The validator is responsible for reading - // ConfigMapNameReference, validating the certificate and copying "ca-bundle.crt" - // from data to a ConfigMap in the namespace of an operator configured for proxy. - // The namespace for this ConfigMap is "openshift-config". Here is an example - // ConfigMap (in yaml): + // that signed the proxy's certificate and be signed for everything. The trustedCA + // field should only be consumed by a proxy validator. The validator is responsible + // for reading the certificate bundle from required key "ca-bundle.crt" and copying + // it to a ConfigMap named "trusted-ca-bundle" in the "openshift-config-managed" + // namespace. The namespace for the ConfigMap referenced by trustedCA is + // "openshift-config". Here is an example ConfigMap (in yaml): // // apiVersion: v1 // kind: ConfigMap // metadata: - // name: trusted-ca-bundle + // name: user-ca-bundle // namespace: openshift-config // data: // ca-bundle.crt: | diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go index 537fb82005e3..7cd8d829717a 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go @@ -828,7 +828,7 @@ func (PlatformStatus) SwaggerDoc() map[string]string { } var map_Ingress = map[string]string{ - "": "Ingress holds cluster-wide information about Ingress. The canonical name is `cluster`", + "": "Ingress holds cluster-wide information about ingress, including the default ingress domain used for routes. The canonical name is `cluster`.", "metadata": "Standard object's metadata.", "spec": "spec holds user settable values for configuration", "status": "status holds observed values from the cluster. They may not be overridden.", @@ -847,7 +847,7 @@ func (IngressList) SwaggerDoc() map[string]string { } var map_IngressSpec = map[string]string{ - "domain": "domain is used to generate a default host name for a route when the route's host name is empty. The generated host name will follow this pattern: \"..\".", + "domain": "domain is used to generate a default host name for a route when the route's host name is empty. The generated host name will follow this pattern: \"..\".\n\nIt is also used as the default wildcard domain suffix for ingress. The default ingresscontroller domain will follow this pattern: \"*.\".\n\nOnce set, changing domain is not currently supported.", } func (IngressSpec) SwaggerDoc() map[string]string { @@ -1183,8 +1183,9 @@ func (OperatorHubList) SwaggerDoc() map[string]string { } var map_OperatorHubSpec = map[string]string{ - "": "OperatorHubSpec defines the desired state of OperatorHub", - "sources": "sources is the list of default hub sources and their configuration. If the list is empty, it indicates that the default hub sources are enabled on the cluster. The list of default hub sources and their current state will always be reflected in the status block.", + "": "OperatorHubSpec defines the desired state of OperatorHub", + "disableAllDefaultSources": "disableAllDefaultSources allows you to disable all the default hub sources. If this is true, a specific entry in sources can be used to enable a default source. If this is false, a specific entry in sources can be used to disable or enable a default source.", + "sources": "sources is the list of default hub sources and their configuration. If the list is empty, it implies that the default hub sources are enabled on the cluster unless disableAllDefaultSources is true. If disableAllDefaultSources is true and sources is not empty, the configuration present in sources will take precedence. The list of default hub sources and their current state will always be reflected in the status block.", } func (OperatorHubSpec) SwaggerDoc() map[string]string { @@ -1262,7 +1263,7 @@ var map_ProxySpec = map[string]string{ "httpsProxy": "httpsProxy is the URL of the proxy for HTTPS requests. Empty means unset and will not result in an env var.", "noProxy": "noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used. Empty means unset and will not result in an env var.", "readinessEndpoints": "readinessEndpoints is a list of endpoints used to verify readiness of the proxy.", - "trustedCA": "trustedCA is a reference to a ConfigMap containing a CA certificate bundle used for client egress HTTPS connections. The certificate bundle must be from the CA that signed the proxy's certificate and be signed for everything. trustedCA should only be consumed by a proxy validator. The validator is responsible for reading ConfigMapNameReference, validating the certificate and copying \"ca-bundle.crt\" from data to a ConfigMap in the namespace of an operator configured for proxy. The namespace for this ConfigMap is \"openshift-config\". Here is an example ConfigMap (in yaml):\n\napiVersion: v1 kind: ConfigMap metadata:\n name: trusted-ca-bundle\n namespace: openshift-config\n data:\n ca-bundle.crt: |", + "trustedCA": "trustedCA is a reference to a ConfigMap containing a CA certificate bundle used for client egress HTTPS connections. The certificate bundle must be from the CA that signed the proxy's certificate and be signed for everything. The trustedCA field should only be consumed by a proxy validator. The validator is responsible for reading the certificate bundle from required key \"ca-bundle.crt\" and copying it to a ConfigMap named \"trusted-ca-bundle\" in the \"openshift-config-managed\" namespace. The namespace for the ConfigMap referenced by trustedCA is \"openshift-config\". Here is an example ConfigMap (in yaml):\n\napiVersion: v1 kind: ConfigMap metadata:\n name: user-ca-bundle\n namespace: openshift-config\n data:\n ca-bundle.crt: |", } func (ProxySpec) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/client-go/glide.lock b/vendor/github.com/openshift/client-go/glide.lock index f5c8c1538d6e..1608395ba753 100644 --- a/vendor/github.com/openshift/client-go/glide.lock +++ b/vendor/github.com/openshift/client-go/glide.lock @@ -1,5 +1,5 @@ hash: 595563cffda70c75833adcf07415011d115db7218cbbddc4c14f1684ad39638a -updated: 2019-08-06T10:39:29.760053178-04:00 +updated: 2019-08-13T12:05:19.648281606-04:00 imports: - name: github.com/davecgh/go-spew version: 782f4967f2dc4564575ca782fe2d04090b5faca8 @@ -41,7 +41,7 @@ imports: - name: github.com/modern-go/reflect2 version: 94122c33edd36123c84d5368cfb2b69df93a0ec8 - name: github.com/openshift/api - version: 9ef0612c775d8571388e16d1bc68c4b3df83e7fb + version: b5570061b31fed3b06c24077c534b1a1bf7ecf8b subpackages: - apps/v1 - authorization/v1 diff --git a/vendor/github.com/openshift/library-go/Makefile b/vendor/github.com/openshift/library-go/Makefile index c7f0ce5dfcfc..2704d89e4752 100644 --- a/vendor/github.com/openshift/library-go/Makefile +++ b/vendor/github.com/openshift/library-go/Makefile @@ -2,7 +2,7 @@ all: build .PHONY: all # All the go packages (e.g. for verfy) -GO_PACKAGES :=./pkg/... ./cmd/... +GO_PACKAGES :=./pkg/... # Packages to be compiled GO_BUILD_PACKAGES :=$(GO_PACKAGES) # Do not auto-expand packages for libraries or it would compile them separately diff --git a/vendor/github.com/openshift/library-go/cmd/crd-schema-gen/generator/generator.go b/vendor/github.com/openshift/library-go/cmd/crd-schema-gen/generator/generator.go deleted file mode 100755 index cb37958a2337..000000000000 --- a/vendor/github.com/openshift/library-go/cmd/crd-schema-gen/generator/generator.go +++ /dev/null @@ -1,391 +0,0 @@ -package generator - -import ( - "flag" - "fmt" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "reflect" - "strings" - - "github.com/evanphx/json-patch" - "gopkg.in/yaml.v2" - - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/serializer" - utilyaml "k8s.io/apimachinery/pkg/util/yaml" - crdgenerator "sigs.k8s.io/controller-tools/pkg/crd/generator" -) - -var ( - scheme = runtime.NewScheme() - codecs = serializer.NewCodecFactory(scheme) -) - -func init() { - v1beta1.AddToScheme(scheme) -} - -func Run() error { - apisDir := flag.String("apis-dir", "pkg/apis", "the (relative) path to the package with API definitions") - apis := flag.String("apis", "*", "the apis to generate from the apis-dir, in bash glob syntax") - manifestDir := flag.String("manifests-dir", "manifests", "the directory with existing CRD manifests") - outputDir := flag.String("output-dir", "", "optional directory to output the kubebuilder CRDs. By default a temporary directory is used.") - verifyOnly := flag.Bool("verify-only", false, "do not write files, only compare and return with return code 1 if dirty") - domain := flag.String("domain", "", "the domain appended to group names.") - repo := flag.String("repo", "", "the repository package name (optional).") - - flag.Parse() - - // load existing manifests from manifests/ dir - existing, err := crdsFromDirectory(*manifestDir) - if err != nil { - return err - } - - // create temp dir - pwd, err := os.Getwd() - if err != nil { - return err - } - tmpDir, err := ioutil.TempDir(pwd, "") - if err != nil { - return fmt.Errorf("error creating temp directory: %v\n", err) - } - defer os.RemoveAll(tmpDir) - relTmpDir := tmpDir[len(pwd)+1:] - - // find repo in GOPATH - sep := string([]rune{os.PathSeparator}) - GOPATH := strings.TrimRight(os.Getenv("GOPATH"), sep) - if len(*repo) == 0 && len(GOPATH) > 0 && strings.HasPrefix(pwd, filepath.Join(GOPATH, "src")+sep) { - *repo = pwd[len(filepath.Join(GOPATH, "src")+sep):] - fmt.Printf("Derived repo %q from GOPATH and working directory.\n", *repo) - } - - // validate params - if len(*repo) == 0 { - return fmt.Errorf("repo cannot be empty. Run crd-schema-gen in GOPATH or specify repo explicitly.") - } - if len(*domain) == 0 { - return fmt.Errorf("domain cannot be empty.") - } - - // copy APIs to temp dir - fmt.Printf("Copying vendor/github.com/openshift/api/config to temporary pkg/apis...\n") - if err := os.MkdirAll(filepath.Join(tmpDir, "pkg/apis"), 0755); err != nil { - return err - } - cmd := fmt.Sprintf("cp -av \"%s/\"%s \"%s\"", *apisDir, *apis, filepath.Join(tmpDir, "pkg/apis")) - out, err := exec.Command("/bin/bash", "-c", cmd).CombinedOutput() - if err != nil { - fmt.Print(string(out)) - return err - } - if err := ioutil.WriteFile(filepath.Join(tmpDir, "PROJECT"), []byte(fmt.Sprintf(` -domain: %s -repo: %s/%s -`, *domain, *repo, relTmpDir)), 0644); err != nil { - return err - } - - // generate kubebuilder KindGroupYaml manifests into temp dir - g := crdgenerator.Generator{ - RootPath: tmpDir, - OutputDir: filepath.Join(tmpDir, "manifests"), - SkipMapValidation: true, - } - - if len(*outputDir) != 0 { - g.OutputDir = *outputDir - fmt.Printf("Creating kubebuilder manifests %q ...\n", *outputDir) - } else { - fmt.Printf("Creating kubebuilder manifests ...\n") - } - - if err := g.ValidateAndInitFields(); err != nil { - return err - } - if err := g.Do(); err != nil { - return err - } - - // the generator changes the directory for some reason - os.Chdir(pwd) - - // load kubebuilder manifests from temp dir - fromKubebuilder, err := crdsFromDirectory(g.OutputDir) - if err != nil { - return err - } - - existingFileNames := map[string]string{} - for fn, crd := range existing { - existingFileNames[crd.KindGroup] = fn - } - - // update existing manifests with validations of kubebuilder output - dirty := false - noneFound := true - for fn, withValidation := range fromKubebuilder { - existingFileName, ok := existingFileNames[withValidation.KindGroup] - if !ok { - continue - } - noneFound = false - - crd := existing[existingFileName] - - // TODO: support multiple versions - validation, _, err := nested(withValidation.Yaml, "spec", "validation") - if err != nil { - return fmt.Errorf("failed to access spec.validation in %s: %v", fn, err) - } - - // yaml merge patch exists? - patchFileName := existingFileName + "-merge-patch" - if _, err := os.Stat(patchFileName); err == nil { - fmt.Printf("Applying patch %q ...\n", patchFileName) - - yamlPatch, err := ioutil.ReadFile(patchFileName) - if err != nil { - return fmt.Errorf("failed to read yaml-merge-patch %q: %v", patchFileName, err) - } - var patch yaml.MapSlice - if err := yaml.Unmarshal(yamlPatch, &patch); err != nil { - return fmt.Errorf("failed to unmarshal yaml merge patch %q: %v", patchFileName, err) - } - if !onlyHasNoneOr(patch, "spec", "validation") { - return fmt.Errorf("patch in %q can only have spec.validation", patchFileName) - } - validationPatch, _, err := nested(patch, "spec", "validation") - if err != nil { - return fmt.Errorf("failed to get spec.validation from %q: %v", patchFileName, err) - } - if yamlPatch, err = yaml.Marshal(validationPatch); err != nil { - return fmt.Errorf("failed to marshal spec.validation of %q: %v", patchFileName, err) - } - jsonPatch, err := utilyaml.ToJSON(yamlPatch) - if err != nil { - return fmt.Errorf("failed to convert yaml of %q to json: %v", patchFileName, err) - } - yamlValidation, err := yaml.Marshal(validation) - if err != nil { - return fmt.Errorf("failed to marshal generated validation schema of %q: %v", existingFileName, err) - } - jsonValidation, err := utilyaml.ToJSON(yamlValidation) - if err != nil { - return fmt.Errorf("failed to convert yaml validation of %q to json: %v", existingFileName, err) - } - if jsonValidation, err = jsonpatch.MergePatch(jsonValidation, jsonPatch); err != nil { - return fmt.Errorf("failed to patch %q with %q: %v", existingFileName, patchFileName, err) - } - if err := yaml.Unmarshal(jsonValidation, &validation); err != nil { - return fmt.Errorf("failed to unmarshal patched validation schema of %q: %v", existingFileName, err) - } - } - - if validation == nil { - continue - } - - updated, err := set(crd.Yaml, validation, "spec", "validation") - if err != nil { - return fmt.Errorf("failed to set spec.validation in %s: %v", existingFileName, err) - } - if reflect.DeepEqual(updated, crd.Yaml) { - fmt.Printf("Validation of %s in %s did not change.\n", crd.KindGroup, existingFileName) - continue - } - - bs, err := yaml.Marshal(updated) - if err != nil { - return err - } - - // write updated file, either to old location, or to temp dir in verify mode - newFn := existingFileName - if *verifyOnly { - newFn = filepath.Join(tmpDir, filepath.Base(existingFileName)) - } else { - fmt.Printf("Updating validation of %s in %s.\n", crd.KindGroup, existingFileName) - } - if err := ioutil.WriteFile(newFn, bs, 0644); err != nil { - return err - } - - // compare old and new file - if *verifyOnly { - out, err := exec.Command("diff", "-u", existingFileName, newFn).CombinedOutput() - if err != nil { - fmt.Println(string(out)) - dirty = true - } - } - } - - if noneFound { - fmt.Printf("None of the found API types has a corresponding CRD manifest. These API types where found:\n\n") - for _, withValidation := range fromKubebuilder { - fmt.Printf(" %s\n", withValidation.KindGroup) - } - fmt.Printf("These CRDs were found:\n\n") - for existingKindGroup := range existingFileNames { - fmt.Printf(" %s\n", existingKindGroup) - } - return fmt.Errorf("no API type for found CRD manifests") - } - - if *verifyOnly && dirty { - return fmt.Errorf("verification failed") - } - - return nil -} - -func nested(x interface{}, pth ...string) (interface{}, bool, error) { - if len(pth) == 0 { - return x, true, nil - } - m, ok := x.(yaml.MapSlice) - if !ok { - return nil, false, fmt.Errorf("%s is not an object, but %T", strings.Join(pth, "."), x) - } - for _, item := range m { - s, ok := item.Key.(string) - if !ok { - continue - } - if s == pth[0] { - ret, found, err := nested(item.Value, pth[1:]...) - if err != nil { - return ret, found, fmt.Errorf("%s.%s", pth[0], err) - } - return ret, found, nil - } - } - return nil, false, nil -} - -func set(x interface{}, v interface{}, pth ...string) (interface{}, error) { - if len(pth) == 0 { - return v, nil - } - - if x == nil { - result, err := set(nil, v, pth[1:]...) - if err != nil { - return nil, fmt.Errorf("%s.%s", pth[0], err) - } - return yaml.MapSlice{yaml.MapItem{Key: pth[0], Value: result}}, nil - } - - m, ok := x.(yaml.MapSlice) - if !ok { - return nil, fmt.Errorf("%s is not an object", strings.Join(pth, ".")) - } - - foundAt := -1 - for i, item := range m { - s, ok := item.Key.(string) - if !ok { - continue - } - if s == pth[0] { - foundAt = i - break - } - } - - if foundAt < 0 { - ret := make(yaml.MapSlice, len(m), len(m)+1) - copy(ret, m) - result, err := set(nil, v, pth[1:]...) - if err != nil { - return nil, fmt.Errorf("%s.%s", pth[0], err) - } - return append(ret, yaml.MapItem{Key: pth[0], Value: result}), nil - } - - result, err := set(m[foundAt].Value, v, pth[1:]...) - ret := make(yaml.MapSlice, len(m)) - copy(ret, m) - if err != nil { - return nil, fmt.Errorf("%s.%s", pth[0], err) - } - ret[foundAt].Value = result - return ret, nil -} - -// onlyHasNoneOr checks for existance of the given path, but nothing next to it is allowed -func onlyHasNoneOr(x interface{}, pth ...string) bool { - if len(pth) == 0 { - return true - } - m, ok := x.(yaml.MapSlice) - if !ok { - return false - } - switch len(m) { - case 0: - return true - case 1: - s, ok := m[0].Key.(string) - if !ok || s != pth[0] { - return false - } - return onlyHasNoneOr(m[0].Value, pth[1:]...) - default: - return false - } -} - -type KindGroupYaml struct { - KindGroup string - Yaml interface{} -} - -// crdsFromDirectory returns CRDs by file path -func crdsFromDirectory(dir string) (map[string]KindGroupYaml, error) { - ret := map[string]KindGroupYaml{} - infos, err := ioutil.ReadDir(dir) - if err != nil { - return nil, err - } - for _, info := range infos { - if info.IsDir() { - continue - } - if !strings.HasSuffix(info.Name(), ".yaml") { - continue - } - bs, err := ioutil.ReadFile(filepath.Join(dir, info.Name())) - if err != nil { - return nil, err - } - - obj, _, err := codecs.UniversalDeserializer().Decode(bs, nil, nil) - if err != nil { - continue - } - crd, ok := obj.(*v1beta1.CustomResourceDefinition) - if !ok { - continue - } - - var y yaml.MapSlice - if err := yaml.Unmarshal(bs, &y); err != nil { - fmt.Printf("Warning: failed to unmarshal %q, skipping\n", info.Name()) - continue - } - key := crd.Spec.Names.Kind + "." + crd.Spec.Group - ret[filepath.Join(dir, info.Name())] = KindGroupYaml{key, y} - } - if err != nil { - return nil, err - } - return ret, err -} diff --git a/vendor/github.com/openshift/library-go/cmd/crd-schema-gen/main.go b/vendor/github.com/openshift/library-go/cmd/crd-schema-gen/main.go deleted file mode 100755 index 228a800cca18..000000000000 --- a/vendor/github.com/openshift/library-go/cmd/crd-schema-gen/main.go +++ /dev/null @@ -1,15 +0,0 @@ -package main - -import ( - "fmt" - "os" - - "github.com/openshift/library-go/cmd/crd-schema-gen/generator" -) - -func main() { - if err := generator.Run(); err != nil { - fmt.Println(err) - os.Exit(1) - } -} diff --git a/vendor/github.com/openshift/library-go/glide.lock b/vendor/github.com/openshift/library-go/glide.lock index 2c67dcfaf1ad..f448210d46bf 100644 --- a/vendor/github.com/openshift/library-go/glide.lock +++ b/vendor/github.com/openshift/library-go/glide.lock @@ -1,5 +1,5 @@ -hash: 14182a87b2489ea8cd2db705bf09aad592752d9c9f7cc6cc840a76bcb179a2e8 -updated: 2019-07-14T22:28:29.452706+02:00 +hash: 4812b0cc8114a9f73471b786f4f760b3c363b7fa56a0f8fd83b56d263bc2e616 +updated: 2019-08-15T14:58:38.31621537-04:00 imports: - name: bitbucket.org/ww/goautoneg version: 75cd24fc2f2c2a2088577d12123ddee5f54e0675 @@ -17,7 +17,7 @@ imports: - name: github.com/certifi/gocertifi version: ee1a9a0726d2ae45f54118cac878c990d4016ded - name: github.com/containerd/continuity - version: aaeac12a7ffcd198ae25440a9dff125c2e2703a7 + version: f2a389ac0a02ce21c09edd7344677a601970f41c subpackages: - pathdriver - name: github.com/coreos/etcd @@ -159,7 +159,7 @@ imports: - name: github.com/docker/go-units version: 519db1ee28dcc9fd2474ae59fca29a810482bfb1 - name: github.com/docker/libnetwork - version: 14f9d751adc2d51b38d14b4e14419b76466d3b94 + version: 7f13a5c99f4bb76a4122035d495984b6a09739bb subpackages: - ipamutils - name: github.com/docker/libtrust @@ -175,7 +175,7 @@ imports: - name: github.com/getsentry/raven-go version: c977f96e109525a5d8fa10a19165341f601f38b0 - name: github.com/ghodss/yaml - version: c7ce16629ff4cd059ed96ed06419dd3856fd3577 + version: 25d852aebe32c875e9c044af3eef9c7dc6bc777f - name: github.com/go-openapi/jsonpointer version: ef5f0afec364d3b9396b7b77b43dbe26bf1f8004 - name: github.com/go-openapi/jsonreference @@ -184,8 +184,6 @@ imports: version: 5bae59e25b21498baea7f9d46e9c147ec106a42e - name: github.com/go-openapi/swag version: 5899d5c5e619fda5fa86e14795a835f473ca284c -- name: github.com/gobuffalo/envy - version: 043cb4b8af871b49563291e32c66bb84378a60ac - name: github.com/gogo/protobuf version: 342cbe0a04158f6dcb03ca0079991a51a4248c02 subpackages: @@ -200,12 +198,10 @@ imports: - name: github.com/golang/protobuf version: b4deda0973fb4c70b50d226b1af49f3da59f5265 subpackages: - - jsonpb - proto - ptypes - ptypes/any - ptypes/duration - - ptypes/struct - ptypes/timestamp - name: github.com/gonum/blas version: f22b278b28ac9805aadd613a754a60c35b24ae69 @@ -251,7 +247,7 @@ imports: - compiler - extensions - name: github.com/gorilla/mux - version: d83b6ffe499a29cc05fc977988d0392851779620 + version: e67b3c02c7195c052acff13261f0c9fd1ba53011 - name: github.com/grpc-ecosystem/go-grpc-prometheus version: 2500245aa6110c562d17020fb31a2c133d737799 - name: github.com/hashicorp/golang-lru @@ -262,8 +258,6 @@ imports: version: 9316a62528ac99aaecb4e47eadd6dc8aa6533d58 - name: github.com/inconshreveable/mousetrap version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 -- name: github.com/joho/godotenv - version: 5c0e6c6ab1a0a9ef0a8822cba3a05d62f7dad941 - name: github.com/json-iterator/go version: ab8a2e0c74be9d3be70b3184d9acc634935ded82 - name: github.com/jteeuwen/go-bindata @@ -274,14 +268,12 @@ imports: - buffer - jlexer - jwriter -- name: github.com/markbates/inflect - version: d582c680dc4d29c2279628ae00e743005bfcd4fe - name: github.com/matttproud/golang_protobuf_extensions version: c12348ce28de40eed0136aa2b644d0ee0650e56c subpackages: - pbutil - name: github.com/Microsoft/go-winio - version: 881e3d46423d592d11da9873ff6581dc577a1d0f + version: 6c72808b55902eae4c5943626030429ff20f3b63 subpackages: - pkg/guid - name: github.com/modern-go/concurrent @@ -302,12 +294,12 @@ imports: - specs-go - specs-go/v1 - name: github.com/opencontainers/runc - version: 6cccc1760d57d9e1bc856b96eeb7ee02b7b8101d + version: 2e94378464ae22b92e1335c200edb37ebc94a1b7 subpackages: - libcontainer/system - libcontainer/user - name: github.com/openshift/api - version: f15120709e0ac8de84e11616d8f0cac54e8f52e3 + version: a94e914914f4228d0bcba6fc8a22614c5f5e2dad subpackages: - apps - apps/v1 @@ -354,7 +346,7 @@ imports: - webconsole - webconsole/v1 - name: github.com/openshift/client-go - version: c44a8b61b9f46cd9e802384dfeda0bc9942db68a + version: 5a5508328169b8a6992ea4ef711add89ddce3c6d subpackages: - apps/clientset/versioned/scheme - apps/clientset/versioned/typed/apps/v1 @@ -411,20 +403,10 @@ imports: version: 8a290539e2e8629dbc4e6bad948158f790ec31f4 - name: github.com/PuerkitoBio/urlesc version: 5bd2802263f21d8788851d5305584c82a5c75d7e -- name: github.com/rogpeppe/go-internal - version: 6f68bf1e81f8552c7dbd47f3bc4371c2db0941a6 - subpackages: - - modfile - - module - - semver - name: github.com/sigma/go-inotify version: c87b6cf5033d2c6486046f045eeebdc3d910fd38 - name: github.com/sirupsen/logrus version: 89742aefa4b206dcf400792f3bd35b542998eb3b -- name: github.com/spf13/afero - version: 588a75ec4f32903aa5e39a2619ba6a4631e28424 - subpackages: - - mem - name: github.com/spf13/cobra version: c439c4fa093711d42e1b01acb1235b52004753c1 - name: github.com/spf13/pflag @@ -434,12 +416,22 @@ imports: subpackages: - bcrypt - blowfish + - cryptobyte + - cryptobyte/asn1 + - ed25519 + - ed25519/internal/edwards25519 + - internal/subtle + - nacl/secretbox + - poly1305 + - salsa20/salsa - ssh/terminal - name: golang.org/x/net version: 65e2d4e15006aab9813ff8769e768bbf4bb667a0 subpackages: - context - context/ctxhttp + - html + - html/atom - http/httpguts - http2 - http2/hpack @@ -478,10 +470,9 @@ imports: version: 2382e3994d48b1d22acc2c86bcad0a2aff028e32 subpackages: - container/intsets - - go/ast/astutil - imports - name: google.golang.org/appengine - version: b2f4a3cf3c67576a2ee09e1fe62656a5086ce880 + version: 54a98f90d1c46b7731eb8fb305d2a321c30ef610 subpackages: - internal - internal/base @@ -532,7 +523,7 @@ imports: - name: gopkg.in/natefinch/lumberjack.v2 version: 20b71e5b60d756d3d2f80def009790325acc2b23 - name: gopkg.in/yaml.v2 - version: 51d6538a90f86fe93ac480b35f37b2be17fef232 + version: 5420a8b6744d3b0345ab293f6fcba19c978f1183 - name: k8s.io/api version: 40a48860b5abbba9aa891b02b32da429b08d96a0 subpackages: @@ -737,6 +728,8 @@ imports: - discovery - discovery/fake - dynamic + - dynamic/dynamicinformer + - dynamic/dynamiclister - dynamic/fake - informers - informers/admissionregistration @@ -942,14 +935,6 @@ imports: subpackages: - cli/flag - logs -- name: k8s.io/gengo - version: e17681d19d3ac4837a019ece36c2a0ec31ffe985 - subpackages: - - args - - generator - - namer - - parser - - types - name: k8s.io/klog version: 8e90cee79f823779174776412c13478955131846 - name: k8s.io/kube-aggregator @@ -973,18 +958,11 @@ imports: version: c2654d5206da6b7b6ace12841e8f359bb89b443c subpackages: - buffer + - diff + - field - integer + - pointer - trace -- name: sigs.k8s.io/controller-tools - version: 72ae52c08b9dd626cfb64ebef0fbf40ce667939b - repo: https://github.com/openshift/kubernetes-sigs-controller-tools - subpackages: - - pkg/crd/generator - - pkg/crd/util - - pkg/internal/codegen - - pkg/internal/codegen/parse - - pkg/internal/general - - pkg/util - name: sigs.k8s.io/structured-merge-diff version: e85c7b244fd2cc57bb829d73a061f93a441e63ce subpackages: diff --git a/vendor/github.com/openshift/library-go/glide.yaml b/vendor/github.com/openshift/library-go/glide.yaml index 920b7ca6e8e7..6d08aa413c91 100644 --- a/vendor/github.com/openshift/library-go/glide.yaml +++ b/vendor/github.com/openshift/library-go/glide.yaml @@ -17,14 +17,6 @@ import: - package: github.com/openshift/client-go version: master -# crd-schema-gen - # TODO: we need to this to get nullable patch, but we will replace this with new repo soon. -- package: sigs.k8s.io/controller-tools - repo: https://github.com/openshift/kubernetes-sigs-controller-tools - version: origin-4.1-kubernetes-1.13.4 -- package: k8s.io/gengo - version: e17681d19d3ac4837a019ece36c2a0ec31ffe985 - # sig-master - needed for file observer - package: github.com/sigma/go-inotify version: c87b6cf5033d2c6486046f045eeebdc3d910fd38 diff --git a/vendor/github.com/openshift/library-go/pkg/operator/genericoperatorclient/dynamic_operator_client.go b/vendor/github.com/openshift/library-go/pkg/operator/genericoperatorclient/dynamic_operator_client.go index e15a691a4959..e93572cdc1d8 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/genericoperatorclient/dynamic_operator_client.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/genericoperatorclient/dynamic_operator_client.go @@ -1,10 +1,10 @@ package genericoperatorclient import ( + "reflect" + "strings" "time" - "github.com/imdario/mergo" - "k8s.io/apimachinery/pkg/runtime" operatorv1 "github.com/openshift/api/operator/v1" @@ -65,6 +65,9 @@ func (c dynamicOperatorClient) GetOperatorState() (*operatorv1.OperatorSpec, *op return spec, status, instance.GetResourceVersion(), nil } +// UpdateOperatorSpec overwrites the operator object spec with the values given +// in operatorv1.OperatorSpec while preserving pre-existing spec fields that have +// no correspondence in operatorv1.OperatorSpec. func (c dynamicOperatorClient) UpdateOperatorSpec(resourceVersion string, spec *operatorv1.OperatorSpec) (*operatorv1.OperatorSpec, string, error) { uncastOriginal, err := c.informer.Lister().Get(globalConfigName) if err != nil { @@ -90,6 +93,9 @@ func (c dynamicOperatorClient) UpdateOperatorSpec(resourceVersion string, spec * return retSpec, ret.GetResourceVersion(), nil } +// UpdateOperatorStatus overwrites the operator object status with the values given +// in operatorv1.OperatorStatus while preserving pre-existing status fields that have +// no correspondence in operatorv1.OperatorStatus. func (c dynamicOperatorClient) UpdateOperatorStatus(resourceVersion string, status *operatorv1.OperatorStatus) (*operatorv1.OperatorStatus, error) { uncastOriginal, err := c.informer.Lister().Get(globalConfigName) if err != nil { @@ -132,24 +138,28 @@ func getOperatorSpecFromUnstructured(obj map[string]interface{}) (*operatorv1.Op } func setOperatorSpecFromUnstructured(obj map[string]interface{}, spec *operatorv1.OperatorSpec) error { - // we cannot simply set the entire map because doing so would stomp unknown fields, like say a static pod operator spec when cast as an operator spec - newUnstructuredSpec, err := runtime.DefaultUnstructuredConverter.ToUnstructured(spec) + // we cannot simply set the entire map because doing so would stomp unknown fields, + // like say a static pod operator spec when cast as an operator spec + newSpec, err := runtime.DefaultUnstructuredConverter.ToUnstructured(spec) if err != nil { return err } - originalUnstructuredSpec, exists, err := unstructured.NestedMap(obj, "spec") - if !exists { - return unstructured.SetNestedMap(obj, newUnstructuredSpec, "spec") - } + origSpec, preExistingSpec, err := unstructured.NestedMap(obj, "spec") if err != nil { return err } - if err := mergo.Merge(&originalUnstructuredSpec, newUnstructuredSpec, mergo.WithOverride); err != nil { - return err - } - - return unstructured.SetNestedMap(obj, originalUnstructuredSpec, "spec") + if preExistingSpec { + flds := topLevelFields(*spec) + for k, v := range origSpec { + if !flds[k] { + if err := unstructured.SetNestedField(newSpec, v, k); err != nil { + return err + } + } + } + } + return unstructured.SetNestedMap(obj, newSpec, "spec") } func getOperatorStatusFromUnstructured(obj map[string]interface{}) (*operatorv1.OperatorStatus, error) { @@ -168,23 +178,48 @@ func getOperatorStatusFromUnstructured(obj map[string]interface{}) (*operatorv1. return ret, nil } -func setOperatorStatusFromUnstructured(obj map[string]interface{}, spec *operatorv1.OperatorStatus) error { - // we cannot simply set the entire map because doing so would stomp unknown fields, like say a static pod operator spec when cast as an operator spec - newUnstructuredStatus, err := runtime.DefaultUnstructuredConverter.ToUnstructured(spec) +func setOperatorStatusFromUnstructured(obj map[string]interface{}, status *operatorv1.OperatorStatus) error { + // we cannot simply set the entire map because doing so would stomp unknown fields, + // like say a static pod operator status when cast as an operator status + newStatus, err := runtime.DefaultUnstructuredConverter.ToUnstructured(status) if err != nil { return err } - originalUnstructuredStatus, exists, err := unstructured.NestedMap(obj, "status") - if !exists { - return unstructured.SetNestedMap(obj, newUnstructuredStatus, "status") - } + origStatus, preExistingStatus, err := unstructured.NestedMap(obj, "status") if err != nil { return err } - if err := mergo.Merge(&originalUnstructuredStatus, newUnstructuredStatus, mergo.WithOverride); err != nil { - return err - } + if preExistingStatus { + flds := topLevelFields(*status) + for k, v := range origStatus { + if !flds[k] { + if err := unstructured.SetNestedField(newStatus, v, k); err != nil { + return err + } + } + } + } + return unstructured.SetNestedMap(obj, newStatus, "status") +} - return unstructured.SetNestedMap(obj, originalUnstructuredStatus, "status") +func topLevelFields(obj interface{}) map[string]bool { + ret := map[string]bool{} + t := reflect.TypeOf(obj) + for i := 0; i < t.NumField(); i++ { + fld := t.Field(i) + fieldName := fld.Name + if jsonTag := fld.Tag.Get("json"); jsonTag == "-" { + continue + } else if jsonTag != "" { + // check for possible comma as in "...,omitempty" + var commaIdx int + if commaIdx = strings.Index(jsonTag, ","); commaIdx < 0 { + commaIdx = len(jsonTag) + } + fieldName = jsonTag[:commaIdx] + } + ret[fieldName] = true + } + return ret } diff --git a/vendor/github.com/openshift/library-go/pkg/operator/genericoperatorclient/dynamic_operator_client_test.go b/vendor/github.com/openshift/library-go/pkg/operator/genericoperatorclient/dynamic_operator_client_test.go index 15e39e8962a3..df2550573479 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/genericoperatorclient/dynamic_operator_client_test.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/genericoperatorclient/dynamic_operator_client_test.go @@ -4,6 +4,7 @@ import ( "reflect" "testing" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/utils/diff" operatorv1 "github.com/openshift/api/operator/v1" @@ -29,10 +30,74 @@ func TestSetOperatorSpecFromUnstructured(t *testing.T) { }, expected: map[string]interface{}{ "spec": map[string]interface{}{ - "non-standard-field": "value", - "logLevel": "Trace", - "managementState": "", - "operatorLogLevel": "", + "non-standard-field": "value", + "logLevel": "Trace", + "managementState": "", + "operatorLogLevel": "", + "unsupportedConfigOverrides": nil, + "observedConfig": nil, + }, + }, + }, + { + name: "keep-everything-outside-of-spec", + in: map[string]interface{}{ + "kind": "Foo", + "apiVersion": "bar/v1", + "status": map[string]interface{}{"foo": "bar"}, + "spec": map[string]interface{}{}, + }, + spec: &operatorv1.OperatorSpec{}, + expected: map[string]interface{}{ + "kind": "Foo", + "apiVersion": "bar/v1", + "status": map[string]interface{}{"foo": "bar"}, + "spec": map[string]interface{}{ + "logLevel": "", + "managementState": "", + "operatorLogLevel": "", + "unsupportedConfigOverrides": nil, + "observedConfig": nil, + }, + }, + }, + { + name: "replace-rawextensions", + in: map[string]interface{}{ + "spec": map[string]interface{}{ + "unsupportedConfigOverrides": map[string]interface{}{"foo": "bar"}, + }, + }, + spec: &operatorv1.OperatorSpec{ + LogLevel: operatorv1.Trace, + }, + expected: map[string]interface{}{ + "spec": map[string]interface{}{ + "logLevel": "Trace", + "managementState": "", + "operatorLogLevel": "", + "unsupportedConfigOverrides": nil, + "observedConfig": nil, + }, + }, + }, + { + name: "remove-observed-fields", + in: map[string]interface{}{ + "spec": map[string]interface{}{ + "observedConfig": map[string]interface{}{"a": "1", "b": "2"}, + }, + }, + spec: &operatorv1.OperatorSpec{ + ObservedConfig: runtime.RawExtension{Raw: []byte(`{"a":1}`)}, + }, + expected: map[string]interface{}{ + "spec": map[string]interface{}{ + "logLevel": "", + "managementState": "", + "operatorLogLevel": "", + "unsupportedConfigOverrides": nil, + "observedConfig": map[string]interface{}{"a": int64(1)}, }, }, }, @@ -87,6 +152,24 @@ func TestSetOperatorStatusFromUnstructured(t *testing.T) { }, }, }, + { + name: "keep-everything-outside-of-status", + in: map[string]interface{}{ + "kind": "Foo", + "apiVersion": "bar/v1", + "spec": map[string]interface{}{"foo": "bar"}, + "status": map[string]interface{}{}, + }, + status: &operatorv1.OperatorStatus{}, + expected: map[string]interface{}{ + "kind": "Foo", + "apiVersion": "bar/v1", + "spec": map[string]interface{}{"foo": "bar"}, + "status": map[string]interface{}{ + "readyReplicas": int64(0), + }, + }, + }, { name: "replace-condition", in: map[string]interface{}{ diff --git a/vendor/github.com/openshift/oc/.gitignore b/vendor/github.com/openshift/oc/.gitignore deleted file mode 100644 index da9d64d49738..000000000000 --- a/vendor/github.com/openshift/oc/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -/oc -/clicheck -/gendocs -/genman -/_output diff --git a/vendor/github.com/openshift/oc/LICENSE b/vendor/github.com/openshift/oc/LICENSE deleted file mode 100644 index c4ea8b6f9d88..000000000000 --- a/vendor/github.com/openshift/oc/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2014 Red Hat, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/openshift/oc/Makefile b/vendor/github.com/openshift/oc/Makefile deleted file mode 100644 index 84e1743f9351..000000000000 --- a/vendor/github.com/openshift/oc/Makefile +++ /dev/null @@ -1,89 +0,0 @@ -all: build -.PHONY: all - -# Include the library makefile -include $(addprefix ./vendor/github.com/openshift/library-go/alpha-build-machinery/make/, \ - golang.mk \ - targets/openshift/deps.mk \ - targets/openshift/images.mk \ - targets/openshift/rpm.mk \ -) - -GO_LD_EXTRAFLAGS :=-X github.com/openshift/oc/vendor/k8s.io/kubernetes/pkg/version.gitMajor="1" \ - -X github.com/openshift/oc/vendor/k8s.io/kubernetes/pkg/version.gitMinor="14" \ - -X github.com/openshift/oc/vendor/k8s.io/kubernetes/pkg/version.gitVersion="v1.14.0+724e12f93f" \ - -X github.com/openshift/oc/vendor/k8s.io/kubernetes/pkg/version.gitCommit="$(SOURCE_GIT_COMMIT)" \ - -X github.com/openshift/oc/vendor/k8s.io/kubernetes/pkg/version.buildDate="$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')" \ - -X github.com/openshift/oc/vendor/k8s.io/kubernetes/pkg/version.gitTreeState="clean" - -GO_BUILD_PACKAGES :=$(strip \ - ./cmd/... \ - $(wildcard ./tools/*) \ -) -# These tags make sure we can statically link and avoid shared dependencies -GO_BUILD_FLAGS :=-tags 'include_gcs include_oss containers_image_openpgp gssapi' -GO_BUILD_FLAGS_DARWIN :=-tags 'include_gcs include_oss containers_image_openpgp' -GO_BUILD_FLAGS_WINDOWS :=-tags 'include_gcs include_oss containers_image_openpgp' - -OUTPUT_DIR :=_output -CROSS_BUILD_BINDIR :=$(OUTPUT_DIR)/bin -RPM_VERSION :=$(shell set -o pipefail && echo '$(SOURCE_GIT_TAG)' | sed -E 's/v([0-9]+\.[0-9]+\.[0-9]+)-.*/\1/') -RPM_EXTRAFLAGS := \ - --define 'version $(RPM_VERSION)' \ - --define 'dist .el7' \ - --define 'release 1' - -IMAGE_REGISTRY :=registry.svc.ci.openshift.org - -# This will call a macro called "build-image" which will generate image specific targets based on the parameters: -# $1 - target name -# $2 - image ref -# $3 - Dockerfile path -# $4 - context -$(call build-image,ocp-cli,$(IMAGE_REGISTRY)/ocp/4.2:cli,./images/cli/Dockerfile.rhel,.) - -$(call build-image,ocp-cli-artifacts,$(IMAGE_REGISTRY)/ocp/4.2:cli-artifacts,./images/cli-artifacts/Dockerfile.rhel,.) -image-ocp-cli-artifacts: image-ocp-cli - -$(call build-image,ocp-deployer,$(IMAGE_REGISTRY)/ocp/4.2:deployer,./images/deployer/Dockerfile.rhel,.) -image-ocp-deployer: image-ocp-cli - -$(call build-image,ocp-recycler,$(IMAGE_REGISTRY)/ocp/4.2:recycler,./images/recycler/Dockerfile.rhel,.) -image-ocp-recycler: image-ocp-cli - -update: update-generated-completions -.PHONY: update - -verify: verify-cli-conventions verify-generated-completions -.PHONY: verify - -verify-cli-conventions: - go run ./tools/clicheck -.PHONY: verify-cli-conventions - -update-generated-completions: build - hack/update-generated-completions.sh -.PHONY: update-generated-completions - -verify-generated-completions: build - hack/verify-generated-completions.sh -.PHONY: verify-generated-completions - - -cross-build-darwin-amd64: - +@GOOS=darwin GOARCH=amd64 $(MAKE) --no-print-directory build GO_BUILD_PACKAGES:=./cmd/oc GO_BUILD_FLAGS:="$(GO_BUILD_FLAGS_DARWIN)" GO_BUILD_BINDIR:=$(CROSS_BUILD_BINDIR)/darwin_amd64 -.PHONY: cross-build-darwin-amd64 - -cross-build-windows-amd64: - +@GOOS=windows GOARCH=amd64 $(MAKE) --no-print-directory build GO_BUILD_PACKAGES:=./cmd/oc GO_BUILD_FLAGS:="$(GO_BUILD_FLAGS_WINDOWS)" GO_BUILD_BINDIR:=$(CROSS_BUILD_BINDIR)/windows_amd64 -.PHONY: cross-build-windows-amd64 - -cross-build: cross-build-darwin-amd64 cross-build-windows-amd64 -.PHONY: cross-build - -clean-cross-build: - $(RM) -r '$(CROSS_BUILD_BINDIR)' - if [ -d '$(OUTPUT_DIR)' ]; then rmdir --ignore-fail-on-non-empty '$(OUTPUT_DIR)'; fi -.PHONY: clean-cross-build - -clean: clean-cross-build diff --git a/vendor/github.com/openshift/oc/OWNERS b/vendor/github.com/openshift/oc/OWNERS deleted file mode 100644 index 88a6b481dcf2..000000000000 --- a/vendor/github.com/openshift/oc/OWNERS +++ /dev/null @@ -1,12 +0,0 @@ -reviewers: - - deads2k - - mfojtik - - smarterclayton - - soltysh - - tnozicka -approvers: - - deads2k - - mfojtik - - smarterclayton - - soltysh - - tnozicka diff --git a/vendor/github.com/openshift/oc/README.md b/vendor/github.com/openshift/oc/README.md deleted file mode 100644 index 875b41d96781..000000000000 --- a/vendor/github.com/openshift/oc/README.md +++ /dev/null @@ -1 +0,0 @@ -### TODO: ADD README.md here diff --git a/vendor/github.com/openshift/oc/cmd/oc/oc.go b/vendor/github.com/openshift/oc/cmd/oc/oc.go deleted file mode 100644 index bc0905b30e23..000000000000 --- a/vendor/github.com/openshift/oc/cmd/oc/oc.go +++ /dev/null @@ -1,121 +0,0 @@ -package main - -import ( - goflag "flag" - "math/rand" - "os" - "path/filepath" - "runtime" - "time" - - "github.com/spf13/pflag" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - apimachineryruntime "k8s.io/apimachinery/pkg/runtime" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - utilflag "k8s.io/component-base/cli/flag" - "k8s.io/component-base/logs" - "k8s.io/klog" - "k8s.io/kubernetes/pkg/api/legacyscheme" - "k8s.io/kubernetes/pkg/kubectl/scheme" - - "github.com/openshift/api/apps" - "github.com/openshift/api/authorization" - "github.com/openshift/api/build" - "github.com/openshift/api/image" - "github.com/openshift/api/network" - "github.com/openshift/api/oauth" - "github.com/openshift/api/project" - "github.com/openshift/api/quota" - "github.com/openshift/api/route" - securityv1 "github.com/openshift/api/security/v1" - "github.com/openshift/api/template" - "github.com/openshift/api/user" - "github.com/openshift/library-go/pkg/serviceability" - - "github.com/openshift/oc/pkg/cli" - "github.com/openshift/oc/pkg/helpers/legacy" - "github.com/openshift/oc/pkg/version" -) - -func injectLoglevelFlag(flags *pflag.FlagSet) { - from := goflag.CommandLine - if flag := from.Lookup("v"); flag != nil { - level := flag.Value.(*klog.Level) - levelPtr := (*int32)(level) - flags.Int32Var(levelPtr, "loglevel", 0, "Set the level of log output (0-10)") - if flags.Lookup("v") == nil { - flags.Int32Var(levelPtr, "v", 0, "Set the level of log output (0-10)") - } - } -} - -func main() { - logs.InitLogs() - defer logs.FlushLogs() - defer serviceability.BehaviorOnPanic(os.Getenv("OPENSHIFT_ON_PANIC"), version.Get())() - defer serviceability.Profile(os.Getenv("OPENSHIFT_PROFILE")).Stop() - - rand.Seed(time.Now().UTC().UnixNano()) - if len(os.Getenv("GOMAXPROCS")) == 0 { - runtime.GOMAXPROCS(runtime.NumCPU()) - } - - pflag.CommandLine.SetNormalizeFunc(utilflag.WordSepNormalizeFunc) - pflag.CommandLine.AddGoFlagSet(goflag.CommandLine) - injectLoglevelFlag(pflag.CommandLine) - - // the kubectl scheme expects to have all the recognizable external types it needs to consume. Install those here. - // We can't use the "normal" scheme because apply will use that to build stategic merge patches on CustomResources - utilruntime.Must(apps.Install(scheme.Scheme)) - utilruntime.Must(authorization.Install(scheme.Scheme)) - utilruntime.Must(build.Install(scheme.Scheme)) - utilruntime.Must(image.Install(scheme.Scheme)) - utilruntime.Must(network.Install(scheme.Scheme)) - utilruntime.Must(oauth.Install(scheme.Scheme)) - utilruntime.Must(project.Install(scheme.Scheme)) - utilruntime.Must(quota.Install(scheme.Scheme)) - utilruntime.Must(route.Install(scheme.Scheme)) - utilruntime.Must(installNonCRDSecurity(scheme.Scheme)) - utilruntime.Must(template.Install(scheme.Scheme)) - utilruntime.Must(user.Install(scheme.Scheme)) - legacy.InstallExternalLegacyAll(scheme.Scheme) - - // the legacyscheme is used in kubectl convert and get, so we need to - // explicitly install all types there too - utilruntime.Must(apps.Install(legacyscheme.Scheme)) - utilruntime.Must(authorization.Install(legacyscheme.Scheme)) - utilruntime.Must(build.Install(legacyscheme.Scheme)) - utilruntime.Must(image.Install(legacyscheme.Scheme)) - utilruntime.Must(network.Install(legacyscheme.Scheme)) - utilruntime.Must(oauth.Install(legacyscheme.Scheme)) - utilruntime.Must(project.Install(legacyscheme.Scheme)) - utilruntime.Must(quota.Install(legacyscheme.Scheme)) - utilruntime.Must(route.Install(legacyscheme.Scheme)) - utilruntime.Must(installNonCRDSecurity(legacyscheme.Scheme)) - utilruntime.Must(template.Install(legacyscheme.Scheme)) - utilruntime.Must(user.Install(legacyscheme.Scheme)) - legacy.InstallExternalLegacyAll(legacyscheme.Scheme) - - basename := filepath.Base(os.Args[0]) - command := cli.CommandFor(basename) - if err := command.Execute(); err != nil { - os.Exit(1) - } -} - -func installNonCRDSecurity(scheme *apimachineryruntime.Scheme) error { - scheme.AddKnownTypes(securityv1.GroupVersion, - &securityv1.PodSecurityPolicySubjectReview{}, - &securityv1.PodSecurityPolicySelfSubjectReview{}, - &securityv1.PodSecurityPolicyReview{}, - &securityv1.RangeAllocation{}, - &securityv1.RangeAllocationList{}, - ) - if err := corev1.AddToScheme(scheme); err != nil { - return err - } - metav1.AddToGroupVersion(scheme, securityv1.GroupVersion) - return nil -} diff --git a/vendor/github.com/openshift/oc/cmd/oc/oc_test.go b/vendor/github.com/openshift/oc/cmd/oc/oc_test.go deleted file mode 100644 index a56716ad73eb..000000000000 --- a/vendor/github.com/openshift/oc/cmd/oc/oc_test.go +++ /dev/null @@ -1,37 +0,0 @@ -package main - -import ( - "reflect" - "testing" - - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/sets" - - "github.com/openshift/api/security" -) - -func TestInstallNonCRDSecurity(t *testing.T) { - withoutCRDs := runtime.NewScheme() - utilruntime.Must(installNonCRDSecurity(withoutCRDs)) - nonCRDTypes := gvks(withoutCRDs.AllKnownTypes()) - - complete := runtime.NewScheme() - utilruntime.Must(security.Install(complete)) - expected := gvks(complete.AllKnownTypes()) - expected.Delete("security.openshift.io/v1, Kind=SecurityContextConstraints") - expected.Delete("security.openshift.io/v1, Kind=SecurityContextConstraintsList") - - if !reflect.DeepEqual(expected, nonCRDTypes) { - t.Errorf("unexpected security/v1 scheme without CRD types\nunexpected: %v\nmissing: %v", nonCRDTypes.Difference(expected).List(), expected.Difference(nonCRDTypes).List()) - } -} - -func gvks(types map[schema.GroupVersionKind]reflect.Type) sets.String { - ret := sets.NewString() - for k := range types { - ret.Insert(k.String()) - } - return ret -} diff --git a/vendor/github.com/openshift/oc/contrib/completions/bash/.files_generated b/vendor/github.com/openshift/oc/contrib/completions/bash/.files_generated deleted file mode 100644 index 415f1771963f..000000000000 --- a/vendor/github.com/openshift/oc/contrib/completions/bash/.files_generated +++ /dev/null @@ -1 +0,0 @@ -oc diff --git a/vendor/github.com/openshift/oc/contrib/completions/bash/oc b/vendor/github.com/openshift/oc/contrib/completions/bash/oc deleted file mode 100644 index f913b3ad26dd..000000000000 --- a/vendor/github.com/openshift/oc/contrib/completions/bash/oc +++ /dev/null @@ -1,14870 +0,0 @@ - -# bash completion for oc -*- shell-script -*- - -__oc_debug() -{ - if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then - echo "$*" >> "${BASH_COMP_DEBUG_FILE}" - fi -} - -# Homebrew on Macs have version 1.3 of bash-completion which doesn't include -# _init_completion. This is a very minimal version of that function. -__oc_init_completion() -{ - COMPREPLY=() - _get_comp_words_by_ref "$@" cur prev words cword -} - -__oc_index_of_word() -{ - local w word=$1 - shift - index=0 - for w in "$@"; do - [[ $w = "$word" ]] && return - index=$((index+1)) - done - index=-1 -} - -__oc_contains_word() -{ - local w word=$1; shift - for w in "$@"; do - [[ $w = "$word" ]] && return - done - return 1 -} - -__oc_handle_reply() -{ - __oc_debug "${FUNCNAME[0]}" - case $cur in - -*) - if [[ $(type -t compopt) = "builtin" ]]; then - compopt -o nospace - fi - local allflags - if [ ${#must_have_one_flag[@]} -ne 0 ]; then - allflags=("${must_have_one_flag[@]}") - else - allflags=("${flags[*]} ${two_word_flags[*]}") - fi - COMPREPLY=( $(compgen -W "${allflags[*]}" -- "$cur") ) - if [[ $(type -t compopt) = "builtin" ]]; then - [[ "${COMPREPLY[0]}" == *= ]] || compopt +o nospace - fi - - # complete after --flag=abc - if [[ $cur == *=* ]]; then - if [[ $(type -t compopt) = "builtin" ]]; then - compopt +o nospace - fi - - local index flag - flag="${cur%=*}" - __oc_index_of_word "${flag}" "${flags_with_completion[@]}" - COMPREPLY=() - if [[ ${index} -ge 0 ]]; then - PREFIX="" - cur="${cur#*=}" - ${flags_completion[${index}]} - if [ -n "${ZSH_VERSION}" ]; then - # zsh completion needs --flag= prefix - eval "COMPREPLY=( \"\${COMPREPLY[@]/#/${flag}=}\" )" - fi - fi - fi - return 0; - ;; - esac - - # check if we are handling a flag with special work handling - local index - __oc_index_of_word "${prev}" "${flags_with_completion[@]}" - if [[ ${index} -ge 0 ]]; then - ${flags_completion[${index}]} - return - fi - - # we are parsing a flag and don't have a special handler, no completion - if [[ ${cur} != "${words[cword]}" ]]; then - return - fi - - local completions - completions=("${commands[@]}") - if [[ ${#must_have_one_noun[@]} -ne 0 ]]; then - completions=("${must_have_one_noun[@]}") - fi - if [[ ${#must_have_one_flag[@]} -ne 0 ]]; then - completions+=("${must_have_one_flag[@]}") - fi - COMPREPLY=( $(compgen -W "${completions[*]}" -- "$cur") ) - - if [[ ${#COMPREPLY[@]} -eq 0 && ${#noun_aliases[@]} -gt 0 && ${#must_have_one_noun[@]} -ne 0 ]]; then - COMPREPLY=( $(compgen -W "${noun_aliases[*]}" -- "$cur") ) - fi - - if [[ ${#COMPREPLY[@]} -eq 0 ]]; then - declare -F __custom_func >/dev/null && __custom_func - fi - - # available in bash-completion >= 2, not always present on macOS - if declare -F __ltrim_colon_completions >/dev/null; then - __ltrim_colon_completions "$cur" - fi - - # If there is only 1 completion and it is a flag with an = it will be completed - # but we don't want a space after the = - if [[ "${#COMPREPLY[@]}" -eq "1" ]] && [[ $(type -t compopt) = "builtin" ]] && [[ "${COMPREPLY[0]}" == --*= ]]; then - compopt -o nospace - fi -} - -# The arguments should be in the form "ext1|ext2|extn" -__oc_handle_filename_extension_flag() -{ - local ext="$1" - _filedir "@(${ext})" -} - -__oc_handle_subdirs_in_dir_flag() -{ - local dir="$1" - pushd "${dir}" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 -} - -__oc_handle_flag() -{ - __oc_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" - - # if a command required a flag, and we found it, unset must_have_one_flag() - local flagname=${words[c]} - local flagvalue - # if the word contained an = - if [[ ${words[c]} == *"="* ]]; then - flagvalue=${flagname#*=} # take in as flagvalue after the = - flagname=${flagname%=*} # strip everything after the = - flagname="${flagname}=" # but put the = back - fi - __oc_debug "${FUNCNAME[0]}: looking for ${flagname}" - if __oc_contains_word "${flagname}" "${must_have_one_flag[@]}"; then - must_have_one_flag=() - fi - - # if you set a flag which only applies to this command, don't show subcommands - if __oc_contains_word "${flagname}" "${local_nonpersistent_flags[@]}"; then - commands=() - fi - - # keep flag value with flagname as flaghash - # flaghash variable is an associative array which is only supported in bash > 3. - if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then - if [ -n "${flagvalue}" ] ; then - flaghash[${flagname}]=${flagvalue} - elif [ -n "${words[ $((c+1)) ]}" ] ; then - flaghash[${flagname}]=${words[ $((c+1)) ]} - else - flaghash[${flagname}]="true" # pad "true" for bool flag - fi - fi - - # skip the argument to a two word flag - if __oc_contains_word "${words[c]}" "${two_word_flags[@]}"; then - c=$((c+1)) - # if we are looking for a flags value, don't show commands - if [[ $c -eq $cword ]]; then - commands=() - fi - fi - - c=$((c+1)) - -} - -__oc_handle_noun() -{ - __oc_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" - - if __oc_contains_word "${words[c]}" "${must_have_one_noun[@]}"; then - must_have_one_noun=() - elif __oc_contains_word "${words[c]}" "${noun_aliases[@]}"; then - must_have_one_noun=() - fi - - nouns+=("${words[c]}") - c=$((c+1)) -} - -__oc_handle_command() -{ - __oc_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" - - local next_command - if [[ -n ${last_command} ]]; then - next_command="_${last_command}_${words[c]//:/__}" - else - if [[ $c -eq 0 ]]; then - next_command="_oc_root_command" - else - next_command="_${words[c]//:/__}" - fi - fi - c=$((c+1)) - __oc_debug "${FUNCNAME[0]}: looking for ${next_command}" - declare -F "$next_command" >/dev/null && $next_command -} - -__oc_handle_word() -{ - if [[ $c -ge $cword ]]; then - __oc_handle_reply - return - fi - __oc_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" - if [[ "${words[c]}" == -* ]]; then - __oc_handle_flag - elif __oc_contains_word "${words[c]}" "${commands[@]}"; then - __oc_handle_command - elif [[ $c -eq 0 ]]; then - __oc_handle_command - else - __oc_handle_noun - fi - __oc_handle_word -} - -# call oc get $1, -__oc_override_flag_list=(config cluster user context namespace server) -__oc_override_flags() -{ - local ${__oc_override_flag_list[*]} two_word_of of - for w in "${words[@]}"; do - if [ -n "${two_word_of}" ]; then - eval "${two_word_of}=\"--${two_word_of}=\${w}\"" - two_word_of= - continue - fi - for of in "${__oc_override_flag_list[@]}"; do - case "${w}" in - --${of}=*) - eval "${of}=\"${w}\"" - ;; - --${of}) - two_word_of="${of}" - ;; - esac - done - done - for of in "${__oc_override_flag_list[@]}"; do - if eval "test -n \"\$${of}\""; then - eval "echo \${${of}}" - fi - done -} -__oc_parse_get() -{ - - local template - template="{{ range .items }}{{ .metadata.name }} {{ end }}" - local oc_out - if oc_out=$(oc get $(__oc_override_flags) -o template --template="${template}" "$1" 2>/dev/null); then - COMPREPLY=( $( compgen -W "${oc_out[*]}" -- "$cur" ) ) - fi -} - -__oc_get_namespaces() -{ - local template oc_out - template="{{ range .items }}{{ .metadata.name }} {{ end }}" - if oc_out=$(oc get -o template --template="${template}" namespace 2>/dev/null); then - COMPREPLY=( $( compgen -W "${oc_out[*]}" -- "$cur" ) ) - fi -} - -__oc_get_resource() -{ - if [[ ${#nouns[@]} -eq 0 ]]; then - local oc_out - if oc_out=$(oc api-resources $(__oc_override_flags) -o name --cached --request-timeout=5s --verbs=get 2>/dev/null); then - COMPREPLY=( $( compgen -W "${oc_out[*]}" -- "$cur" ) ) - return 0 - fi - return 1 - fi - __oc_parse_get "${nouns[${#nouns[@]} -1]}" -} - -# $1 is the name of the pod we want to get the list of containers inside -__oc_get_containers() -{ - local template - template="{{ range .spec.containers }}{{ .name }} {{ end }}" - __oc_debug "${FUNCNAME} nouns are ${nouns[@]}" - - local len="${#nouns[@]}" - if [[ ${len} -ne 1 ]]; then - return - fi - local last=${nouns[${len} -1]} - local oc_out - if oc_out=$(oc get -o template --template="${template}" pods "${last}" 2>/dev/null); then - COMPREPLY=( $( compgen -W "${oc_out[*]}" -- "$cur" ) ) - fi -} - -# Require both a pod and a container to be specified -__oc_require_pod_and_container() -{ - if [[ ${#nouns[@]} -eq 0 ]]; then - __oc_parse_get pods - return 0 - fi; - __oc_get_containers - return 0 -} - -__custom_func() { - case ${last_command} in - - # first arg is the kind according to ValidArgs, second is resource name - oc_get | oc_describe | oc_delete | oc_label | oc_expose | oc_export | oc_patch | oc_annotate | oc_edit | oc_scale | oc_autoscale | oc_observe ) - __oc_get_resource - return - ;; - - # first arg is a pod name - oc_rsh | oc_exec | oc_port-forward | oc_attach) - if [[ ${#nouns[@]} -eq 0 ]]; then - __oc_parse_get pods - fi; - return - ;; - - # first arg is a pod name, second is a container name - oc_logs) - __oc_require_pod_and_container - return - ;; - - # first arg is a build config name - oc_start-build | oc_cancel-build) - if [[ ${#nouns[@]} -eq 0 ]]; then - __oc_parse_get buildconfigs - fi; - return - ;; - - # first arg is a deployment config OR deployment - oc_rollback) - if [[ ${#nouns[@]} -eq 0 ]]; then - __oc_parse_get deploymentconfigs,replicationcontrollers - fi; - return - ;; - - # first arg is a project name - oc_project) - if [[ ${#nouns[@]} -eq 0 ]]; then - __oc_parse_get projects - fi; - return - ;; - - # first arg is an image stream - oc_import-image) - if [[ ${#nouns[@]} -eq 0 ]]; then - __oc_parse_get imagestreams - fi; - return - ;; - - *) - ;; - esac -} - -_oc_adm_build-chain() -{ - last_command="oc_adm_build-chain" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--reverse") - local_nonpersistent_flags+=("--reverse") - flags+=("--trigger-only") - local_nonpersistent_flags+=("--trigger-only") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_certificate_approve() -{ - last_command="oc_adm_certificate_approve" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--force") - local_nonpersistent_flags+=("--force") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_certificate_deny() -{ - last_command="oc_adm_certificate_deny" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--force") - local_nonpersistent_flags+=("--force") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_certificate() -{ - last_command="oc_adm_certificate" - commands=() - commands+=("approve") - commands+=("deny") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_completion() -{ - last_command="oc_adm_completion" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - must_have_one_noun+=("bash") - must_have_one_noun+=("zsh") - noun_aliases=() -} - -_oc_adm_config_current-context() -{ - last_command="oc_adm_config_current-context" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_config_delete-cluster() -{ - last_command="oc_adm_config_delete-cluster" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_config_delete-context() -{ - last_command="oc_adm_config_delete-context" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_config_get-clusters() -{ - last_command="oc_adm_config_get-clusters" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_config_get-contexts() -{ - last_command="oc_adm_config_get-contexts" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--no-headers") - local_nonpersistent_flags+=("--no-headers") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_config_rename-context() -{ - last_command="oc_adm_config_rename-context" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_config_set() -{ - last_command="oc_adm_config_set" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--set-raw-bytes") - local_nonpersistent_flags+=("--set-raw-bytes") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_config_set-cluster() -{ - last_command="oc_adm_config_set-cluster" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--embed-certs") - local_nonpersistent_flags+=("--embed-certs") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_config_set-context() -{ - last_command="oc_adm_config_set-context" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--current") - local_nonpersistent_flags+=("--current") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_config_set-credentials() -{ - last_command="oc_adm_config_set-credentials" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--auth-provider=") - local_nonpersistent_flags+=("--auth-provider=") - flags+=("--auth-provider-arg=") - local_nonpersistent_flags+=("--auth-provider-arg=") - flags+=("--embed-certs") - local_nonpersistent_flags+=("--embed-certs") - flags+=("--password=") - local_nonpersistent_flags+=("--password=") - flags+=("--username=") - local_nonpersistent_flags+=("--username=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_config_unset() -{ - last_command="oc_adm_config_unset" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_config_use-context() -{ - last_command="oc_adm_config_use-context" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_config_view() -{ - last_command="oc_adm_config_view" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--flatten") - local_nonpersistent_flags+=("--flatten") - flags+=("--merge") - local_nonpersistent_flags+=("--merge") - flags+=("--minify") - local_nonpersistent_flags+=("--minify") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--raw") - local_nonpersistent_flags+=("--raw") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_config() -{ - last_command="oc_adm_config" - commands=() - commands+=("current-context") - commands+=("delete-cluster") - commands+=("delete-context") - commands+=("get-clusters") - commands+=("get-contexts") - commands+=("rename-context") - commands+=("set") - commands+=("set-cluster") - commands+=("set-context") - commands+=("set-credentials") - commands+=("unset") - commands+=("use-context") - commands+=("view") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_cordon() -{ - last_command="oc_adm_cordon" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_create-bootstrap-project-template() -{ - last_command="oc_adm_create-bootstrap-project-template" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--name=") - local_nonpersistent_flags+=("--name=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_create-error-template() -{ - last_command="oc_adm_create-error-template" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_create-kubeconfig() -{ - last_command="oc_adm_create-kubeconfig" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--master=") - local_nonpersistent_flags+=("--master=") - flags+=("--public-master=") - local_nonpersistent_flags+=("--public-master=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_create-login-template() -{ - last_command="oc_adm_create-login-template" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_create-provider-selection-template() -{ - last_command="oc_adm_create-provider-selection-template" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_drain() -{ - last_command="oc_adm_drain" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--delete-local-data") - local_nonpersistent_flags+=("--delete-local-data") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--force") - local_nonpersistent_flags+=("--force") - flags+=("--grace-period=") - local_nonpersistent_flags+=("--grace-period=") - flags+=("--ignore-daemonsets") - local_nonpersistent_flags+=("--ignore-daemonsets") - flags+=("--pod-selector=") - local_nonpersistent_flags+=("--pod-selector=") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--timeout=") - local_nonpersistent_flags+=("--timeout=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_groups_add-users() -{ - last_command="oc_adm_groups_add-users" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_groups_new() -{ - last_command="oc_adm_groups_new" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_groups_prune() -{ - last_command="oc_adm_groups_prune" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--blacklist=") - flags_with_completion+=("--blacklist") - flags_completion+=("__oc_handle_filename_extension_flag txt") - local_nonpersistent_flags+=("--blacklist=") - flags+=("--confirm") - local_nonpersistent_flags+=("--confirm") - flags+=("--sync-config=") - flags_with_completion+=("--sync-config") - flags_completion+=("__oc_handle_filename_extension_flag yaml|yml") - local_nonpersistent_flags+=("--sync-config=") - flags+=("--whitelist=") - flags_with_completion+=("--whitelist") - flags_completion+=("__oc_handle_filename_extension_flag txt") - local_nonpersistent_flags+=("--whitelist=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_groups_remove-users() -{ - last_command="oc_adm_groups_remove-users" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_groups_sync() -{ - last_command="oc_adm_groups_sync" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--blacklist=") - flags_with_completion+=("--blacklist") - flags_completion+=("__oc_handle_filename_extension_flag txt") - local_nonpersistent_flags+=("--blacklist=") - flags+=("--confirm") - local_nonpersistent_flags+=("--confirm") - flags+=("--sync-config=") - flags_with_completion+=("--sync-config") - flags_completion+=("__oc_handle_filename_extension_flag yaml|yml") - local_nonpersistent_flags+=("--sync-config=") - flags+=("--type=") - local_nonpersistent_flags+=("--type=") - flags+=("--whitelist=") - flags_with_completion+=("--whitelist") - flags_completion+=("__oc_handle_filename_extension_flag txt") - local_nonpersistent_flags+=("--whitelist=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_groups() -{ - last_command="oc_adm_groups" - commands=() - commands+=("add-users") - commands+=("new") - commands+=("prune") - commands+=("remove-users") - commands+=("sync") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_migrate_etcd-ttl() -{ - last_command="oc_adm_migrate_etcd-ttl" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--cacert=") - local_nonpersistent_flags+=("--cacert=") - flags+=("--cert=") - local_nonpersistent_flags+=("--cert=") - flags+=("--etcd-address=") - local_nonpersistent_flags+=("--etcd-address=") - flags+=("--key=") - local_nonpersistent_flags+=("--key=") - flags+=("--lease-duration=") - local_nonpersistent_flags+=("--lease-duration=") - flags+=("--ttl-keys-prefix=") - local_nonpersistent_flags+=("--ttl-keys-prefix=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_migrate_image-references() -{ - last_command="oc_adm_migrate_image-references" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all-namespaces") - flags+=("-A") - local_nonpersistent_flags+=("--all-namespaces") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--confirm") - local_nonpersistent_flags+=("--confirm") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--from-key=") - local_nonpersistent_flags+=("--from-key=") - flags+=("--include=") - local_nonpersistent_flags+=("--include=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--to-key=") - local_nonpersistent_flags+=("--to-key=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_migrate_legacy-hpa() -{ - last_command="oc_adm_migrate_legacy-hpa" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all-namespaces") - flags+=("-A") - local_nonpersistent_flags+=("--all-namespaces") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--confirm") - local_nonpersistent_flags+=("--confirm") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--from-key=") - local_nonpersistent_flags+=("--from-key=") - flags+=("--include=") - local_nonpersistent_flags+=("--include=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--to-key=") - local_nonpersistent_flags+=("--to-key=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_migrate_storage() -{ - last_command="oc_adm_migrate_storage" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all-namespaces") - flags+=("-A") - local_nonpersistent_flags+=("--all-namespaces") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--bandwidth=") - local_nonpersistent_flags+=("--bandwidth=") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--from-key=") - local_nonpersistent_flags+=("--from-key=") - flags+=("--include=") - local_nonpersistent_flags+=("--include=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--to-key=") - local_nonpersistent_flags+=("--to-key=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_migrate_template-instances() -{ - last_command="oc_adm_migrate_template-instances" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all-namespaces") - flags+=("-A") - local_nonpersistent_flags+=("--all-namespaces") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--confirm") - local_nonpersistent_flags+=("--confirm") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--from-key=") - local_nonpersistent_flags+=("--from-key=") - flags+=("--include=") - local_nonpersistent_flags+=("--include=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--to-key=") - local_nonpersistent_flags+=("--to-key=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_migrate() -{ - last_command="oc_adm_migrate" - commands=() - commands+=("etcd-ttl") - commands+=("image-references") - commands+=("legacy-hpa") - commands+=("storage") - commands+=("template-instances") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_must-gather() -{ - last_command="oc_adm_must-gather" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--dest-dir=") - local_nonpersistent_flags+=("--dest-dir=") - flags+=("--image=") - local_nonpersistent_flags+=("--image=") - flags+=("--node-name=") - local_nonpersistent_flags+=("--node-name=") - flags+=("--source-dir=") - local_nonpersistent_flags+=("--source-dir=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_new-project() -{ - last_command="oc_adm_new-project" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--admin=") - local_nonpersistent_flags+=("--admin=") - flags+=("--admin-role=") - local_nonpersistent_flags+=("--admin-role=") - flags+=("--description=") - local_nonpersistent_flags+=("--description=") - flags+=("--display-name=") - local_nonpersistent_flags+=("--display-name=") - flags+=("--node-selector=") - local_nonpersistent_flags+=("--node-selector=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_node-logs() -{ - last_command="oc_adm_node-logs" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--case-sensitive") - local_nonpersistent_flags+=("--case-sensitive") - flags+=("--grep=") - two_word_flags+=("-g") - local_nonpersistent_flags+=("--grep=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--path=") - local_nonpersistent_flags+=("--path=") - flags+=("--raw") - local_nonpersistent_flags+=("--raw") - flags+=("--role=") - local_nonpersistent_flags+=("--role=") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--since=") - local_nonpersistent_flags+=("--since=") - flags+=("--tail=") - local_nonpersistent_flags+=("--tail=") - flags+=("--unify") - local_nonpersistent_flags+=("--unify") - flags+=("--unit=") - two_word_flags+=("-u") - local_nonpersistent_flags+=("--unit=") - flags+=("--until=") - local_nonpersistent_flags+=("--until=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_options() -{ - last_command="oc_adm_options" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_pod-network_isolate-projects() -{ - last_command="oc_adm_pod-network_isolate-projects" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--selector=") - local_nonpersistent_flags+=("--selector=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_pod-network_join-projects() -{ - last_command="oc_adm_pod-network_join-projects" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--selector=") - local_nonpersistent_flags+=("--selector=") - flags+=("--to=") - local_nonpersistent_flags+=("--to=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_pod-network_make-projects-global() -{ - last_command="oc_adm_pod-network_make-projects-global" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--selector=") - local_nonpersistent_flags+=("--selector=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_pod-network() -{ - last_command="oc_adm_pod-network" - commands=() - commands+=("isolate-projects") - commands+=("join-projects") - commands+=("make-projects-global") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_policy_add-cluster-role-to-group() -{ - last_command="oc_adm_policy_add-cluster-role-to-group" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--rolebinding-name=") - local_nonpersistent_flags+=("--rolebinding-name=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_policy_add-cluster-role-to-user() -{ - last_command="oc_adm_policy_add-cluster-role-to-user" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--rolebinding-name=") - local_nonpersistent_flags+=("--rolebinding-name=") - flags+=("--serviceaccount=") - two_word_flags+=("-z") - local_nonpersistent_flags+=("--serviceaccount=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_policy_add-role-to-group() -{ - last_command="oc_adm_policy_add-role-to-group" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--role-namespace=") - local_nonpersistent_flags+=("--role-namespace=") - flags+=("--rolebinding-name=") - local_nonpersistent_flags+=("--rolebinding-name=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_policy_add-role-to-user() -{ - last_command="oc_adm_policy_add-role-to-user" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--role-namespace=") - local_nonpersistent_flags+=("--role-namespace=") - flags+=("--rolebinding-name=") - local_nonpersistent_flags+=("--rolebinding-name=") - flags+=("--serviceaccount=") - two_word_flags+=("-z") - local_nonpersistent_flags+=("--serviceaccount=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_policy_add-scc-to-group() -{ - last_command="oc_adm_policy_add-scc-to-group" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_policy_add-scc-to-user() -{ - last_command="oc_adm_policy_add-scc-to-user" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--serviceaccount=") - two_word_flags+=("-z") - local_nonpersistent_flags+=("--serviceaccount=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_policy_remove-cluster-role-from-group() -{ - last_command="oc_adm_policy_remove-cluster-role-from-group" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--rolebinding-name=") - local_nonpersistent_flags+=("--rolebinding-name=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_policy_remove-cluster-role-from-user() -{ - last_command="oc_adm_policy_remove-cluster-role-from-user" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--rolebinding-name=") - local_nonpersistent_flags+=("--rolebinding-name=") - flags+=("--serviceaccount=") - two_word_flags+=("-z") - local_nonpersistent_flags+=("--serviceaccount=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_policy_remove-group() -{ - last_command="oc_adm_policy_remove-group" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_policy_remove-role-from-group() -{ - last_command="oc_adm_policy_remove-role-from-group" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--role-namespace=") - local_nonpersistent_flags+=("--role-namespace=") - flags+=("--rolebinding-name=") - local_nonpersistent_flags+=("--rolebinding-name=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_policy_remove-role-from-user() -{ - last_command="oc_adm_policy_remove-role-from-user" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--role-namespace=") - local_nonpersistent_flags+=("--role-namespace=") - flags+=("--rolebinding-name=") - local_nonpersistent_flags+=("--rolebinding-name=") - flags+=("--serviceaccount=") - two_word_flags+=("-z") - local_nonpersistent_flags+=("--serviceaccount=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_policy_remove-scc-from-group() -{ - last_command="oc_adm_policy_remove-scc-from-group" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_policy_remove-scc-from-user() -{ - last_command="oc_adm_policy_remove-scc-from-user" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--serviceaccount=") - two_word_flags+=("-z") - local_nonpersistent_flags+=("--serviceaccount=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_policy_remove-user() -{ - last_command="oc_adm_policy_remove-user" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_policy_scc-review() -{ - last_command="oc_adm_policy_scc-review" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--no-headers") - local_nonpersistent_flags+=("--no-headers") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--serviceaccount=") - two_word_flags+=("-z") - local_nonpersistent_flags+=("--serviceaccount=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_policy_scc-subject-review() -{ - last_command="oc_adm_policy_scc-subject-review" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--groups=") - two_word_flags+=("-g") - local_nonpersistent_flags+=("--groups=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--no-headers") - local_nonpersistent_flags+=("--no-headers") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--serviceaccount=") - two_word_flags+=("-z") - local_nonpersistent_flags+=("--serviceaccount=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_policy_who-can() -{ - last_command="oc_adm_policy_who-can" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all-namespaces") - flags+=("-A") - local_nonpersistent_flags+=("--all-namespaces") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_policy() -{ - last_command="oc_adm_policy" - commands=() - commands+=("add-cluster-role-to-group") - commands+=("add-cluster-role-to-user") - commands+=("add-role-to-group") - commands+=("add-role-to-user") - commands+=("add-scc-to-group") - commands+=("add-scc-to-user") - commands+=("remove-cluster-role-from-group") - commands+=("remove-cluster-role-from-user") - commands+=("remove-group") - commands+=("remove-role-from-group") - commands+=("remove-role-from-user") - commands+=("remove-scc-from-group") - commands+=("remove-scc-from-user") - commands+=("remove-user") - commands+=("scc-review") - commands+=("scc-subject-review") - commands+=("who-can") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_prune_auth() -{ - last_command="oc_adm_prune_auth" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_prune_builds() -{ - last_command="oc_adm_prune_builds" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--confirm") - local_nonpersistent_flags+=("--confirm") - flags+=("--keep-complete=") - local_nonpersistent_flags+=("--keep-complete=") - flags+=("--keep-failed=") - local_nonpersistent_flags+=("--keep-failed=") - flags+=("--keep-younger-than=") - local_nonpersistent_flags+=("--keep-younger-than=") - flags+=("--orphans") - local_nonpersistent_flags+=("--orphans") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_prune_deployments() -{ - last_command="oc_adm_prune_deployments" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--confirm") - local_nonpersistent_flags+=("--confirm") - flags+=("--keep-complete=") - local_nonpersistent_flags+=("--keep-complete=") - flags+=("--keep-failed=") - local_nonpersistent_flags+=("--keep-failed=") - flags+=("--keep-younger-than=") - local_nonpersistent_flags+=("--keep-younger-than=") - flags+=("--orphans") - local_nonpersistent_flags+=("--orphans") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_prune_groups() -{ - last_command="oc_adm_prune_groups" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--blacklist=") - flags_with_completion+=("--blacklist") - flags_completion+=("__oc_handle_filename_extension_flag txt") - local_nonpersistent_flags+=("--blacklist=") - flags+=("--confirm") - local_nonpersistent_flags+=("--confirm") - flags+=("--sync-config=") - flags_with_completion+=("--sync-config") - flags_completion+=("__oc_handle_filename_extension_flag yaml|yml") - local_nonpersistent_flags+=("--sync-config=") - flags+=("--whitelist=") - flags_with_completion+=("--whitelist") - flags_completion+=("__oc_handle_filename_extension_flag txt") - local_nonpersistent_flags+=("--whitelist=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_prune_images() -{ - last_command="oc_adm_prune_images" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--confirm") - local_nonpersistent_flags+=("--confirm") - flags+=("--force-insecure") - local_nonpersistent_flags+=("--force-insecure") - flags+=("--ignore-invalid-refs") - local_nonpersistent_flags+=("--ignore-invalid-refs") - flags+=("--keep-tag-revisions=") - local_nonpersistent_flags+=("--keep-tag-revisions=") - flags+=("--keep-younger-than=") - local_nonpersistent_flags+=("--keep-younger-than=") - flags+=("--prune-over-size-limit") - local_nonpersistent_flags+=("--prune-over-size-limit") - flags+=("--prune-registry") - local_nonpersistent_flags+=("--prune-registry") - flags+=("--registry-url=") - local_nonpersistent_flags+=("--registry-url=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_prune() -{ - last_command="oc_adm_prune" - commands=() - commands+=("auth") - commands+=("builds") - commands+=("deployments") - commands+=("groups") - commands+=("images") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_release_extract() -{ - last_command="oc_adm_release_extract" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--command=") - local_nonpersistent_flags+=("--command=") - flags+=("--command-os=") - local_nonpersistent_flags+=("--command-os=") - flags+=("--file=") - local_nonpersistent_flags+=("--file=") - flags+=("--from=") - local_nonpersistent_flags+=("--from=") - flags+=("--git=") - local_nonpersistent_flags+=("--git=") - flags+=("--insecure") - local_nonpersistent_flags+=("--insecure") - flags+=("--max-per-registry=") - local_nonpersistent_flags+=("--max-per-registry=") - flags+=("--registry-config=") - two_word_flags+=("-a") - local_nonpersistent_flags+=("--registry-config=") - flags+=("--signing-key=") - local_nonpersistent_flags+=("--signing-key=") - flags+=("--skip-verification") - local_nonpersistent_flags+=("--skip-verification") - flags+=("--to=") - local_nonpersistent_flags+=("--to=") - flags+=("--tools") - local_nonpersistent_flags+=("--tools") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_release_info() -{ - last_command="oc_adm_release_info" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--bugs=") - local_nonpersistent_flags+=("--bugs=") - flags+=("--changelog=") - local_nonpersistent_flags+=("--changelog=") - flags+=("--changes-from=") - local_nonpersistent_flags+=("--changes-from=") - flags+=("--commits") - local_nonpersistent_flags+=("--commits") - flags+=("--contents") - local_nonpersistent_flags+=("--contents") - flags+=("--image-for=") - local_nonpersistent_flags+=("--image-for=") - flags+=("--include-images") - local_nonpersistent_flags+=("--include-images") - flags+=("--insecure") - local_nonpersistent_flags+=("--insecure") - flags+=("--max-per-registry=") - local_nonpersistent_flags+=("--max-per-registry=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--pullspecs") - local_nonpersistent_flags+=("--pullspecs") - flags+=("--registry-config=") - two_word_flags+=("-a") - local_nonpersistent_flags+=("--registry-config=") - flags+=("--size") - local_nonpersistent_flags+=("--size") - flags+=("--skip-verification") - local_nonpersistent_flags+=("--skip-verification") - flags+=("--verify") - local_nonpersistent_flags+=("--verify") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_release_mirror() -{ - last_command="oc_adm_release_mirror" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--from=") - local_nonpersistent_flags+=("--from=") - flags+=("--insecure") - local_nonpersistent_flags+=("--insecure") - flags+=("--max-per-registry=") - local_nonpersistent_flags+=("--max-per-registry=") - flags+=("--registry-config=") - two_word_flags+=("-a") - local_nonpersistent_flags+=("--registry-config=") - flags+=("--skip-release-image") - local_nonpersistent_flags+=("--skip-release-image") - flags+=("--skip-verification") - local_nonpersistent_flags+=("--skip-verification") - flags+=("--to=") - local_nonpersistent_flags+=("--to=") - flags+=("--to-image-stream=") - local_nonpersistent_flags+=("--to-image-stream=") - flags+=("--to-release-image=") - local_nonpersistent_flags+=("--to-release-image=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_release_new() -{ - last_command="oc_adm_release_new" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-images") - local_nonpersistent_flags+=("--allow-missing-images") - flags+=("--component-versions=") - local_nonpersistent_flags+=("--component-versions=") - flags+=("--dir=") - local_nonpersistent_flags+=("--dir=") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--exclude=") - local_nonpersistent_flags+=("--exclude=") - flags+=("--from-dir=") - local_nonpersistent_flags+=("--from-dir=") - flags+=("--from-image-stream=") - local_nonpersistent_flags+=("--from-image-stream=") - flags+=("--from-image-stream-file=") - two_word_flags+=("-f") - local_nonpersistent_flags+=("--from-image-stream-file=") - flags+=("--from-release=") - local_nonpersistent_flags+=("--from-release=") - flags+=("--include=") - local_nonpersistent_flags+=("--include=") - flags+=("--insecure") - local_nonpersistent_flags+=("--insecure") - flags+=("--mapping-file=") - local_nonpersistent_flags+=("--mapping-file=") - flags+=("--max-per-registry=") - local_nonpersistent_flags+=("--max-per-registry=") - flags+=("--metadata=") - local_nonpersistent_flags+=("--metadata=") - flags+=("--mirror=") - local_nonpersistent_flags+=("--mirror=") - flags+=("--name=") - local_nonpersistent_flags+=("--name=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--previous=") - local_nonpersistent_flags+=("--previous=") - flags+=("--reference-mode=") - local_nonpersistent_flags+=("--reference-mode=") - flags+=("--registry-config=") - two_word_flags+=("-a") - local_nonpersistent_flags+=("--registry-config=") - flags+=("--release-manifest") - local_nonpersistent_flags+=("--release-manifest") - flags+=("--skip-manifest-check") - local_nonpersistent_flags+=("--skip-manifest-check") - flags+=("--skip-verification") - local_nonpersistent_flags+=("--skip-verification") - flags+=("--to-dir=") - local_nonpersistent_flags+=("--to-dir=") - flags+=("--to-file=") - local_nonpersistent_flags+=("--to-file=") - flags+=("--to-image=") - local_nonpersistent_flags+=("--to-image=") - flags+=("--to-image-base=") - local_nonpersistent_flags+=("--to-image-base=") - flags+=("--to-image-base-tag=") - local_nonpersistent_flags+=("--to-image-base-tag=") - flags+=("--to-signature=") - local_nonpersistent_flags+=("--to-signature=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_release() -{ - last_command="oc_adm_release" - commands=() - commands+=("extract") - commands+=("info") - commands+=("mirror") - commands+=("new") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_taint() -{ - last_command="oc_adm_taint" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--overwrite") - local_nonpersistent_flags+=("--overwrite") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - must_have_one_noun+=("node") - noun_aliases=() -} - -_oc_adm_top_images() -{ - last_command="oc_adm_top_images" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_top_imagestreams() -{ - last_command="oc_adm_top_imagestreams" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_top_node() -{ - last_command="oc_adm_top_node" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--heapster-namespace=") - local_nonpersistent_flags+=("--heapster-namespace=") - flags+=("--heapster-port=") - local_nonpersistent_flags+=("--heapster-port=") - flags+=("--heapster-scheme=") - local_nonpersistent_flags+=("--heapster-scheme=") - flags+=("--heapster-service=") - local_nonpersistent_flags+=("--heapster-service=") - flags+=("--no-headers") - local_nonpersistent_flags+=("--no-headers") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_top_pod() -{ - last_command="oc_adm_top_pod" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all-namespaces") - flags+=("-A") - local_nonpersistent_flags+=("--all-namespaces") - flags+=("--containers") - local_nonpersistent_flags+=("--containers") - flags+=("--heapster-namespace=") - local_nonpersistent_flags+=("--heapster-namespace=") - flags+=("--heapster-port=") - local_nonpersistent_flags+=("--heapster-port=") - flags+=("--heapster-scheme=") - local_nonpersistent_flags+=("--heapster-scheme=") - flags+=("--heapster-service=") - local_nonpersistent_flags+=("--heapster-service=") - flags+=("--no-headers") - local_nonpersistent_flags+=("--no-headers") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_top() -{ - last_command="oc_adm_top" - commands=() - commands+=("images") - commands+=("imagestreams") - commands+=("node") - commands+=("pod") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_uncordon() -{ - last_command="oc_adm_uncordon" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_upgrade() -{ - last_command="oc_adm_upgrade" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--clear") - local_nonpersistent_flags+=("--clear") - flags+=("--force") - local_nonpersistent_flags+=("--force") - flags+=("--to=") - local_nonpersistent_flags+=("--to=") - flags+=("--to-image=") - local_nonpersistent_flags+=("--to-image=") - flags+=("--to-latest") - local_nonpersistent_flags+=("--to-latest") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_verify-image-signature() -{ - last_command="oc_adm_verify-image-signature" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--expected-identity=") - local_nonpersistent_flags+=("--expected-identity=") - flags+=("--insecure") - local_nonpersistent_flags+=("--insecure") - flags+=("--public-key=") - local_nonpersistent_flags+=("--public-key=") - flags+=("--registry-url=") - local_nonpersistent_flags+=("--registry-url=") - flags+=("--remove-all") - local_nonpersistent_flags+=("--remove-all") - flags+=("--save") - local_nonpersistent_flags+=("--save") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm() -{ - last_command="oc_adm" - commands=() - commands+=("build-chain") - commands+=("certificate") - commands+=("completion") - commands+=("config") - commands+=("cordon") - commands+=("create-bootstrap-project-template") - commands+=("create-error-template") - commands+=("create-kubeconfig") - commands+=("create-login-template") - commands+=("create-provider-selection-template") - commands+=("drain") - commands+=("groups") - commands+=("migrate") - commands+=("must-gather") - commands+=("new-project") - commands+=("node-logs") - commands+=("options") - commands+=("pod-network") - commands+=("policy") - commands+=("prune") - commands+=("release") - commands+=("taint") - commands+=("top") - commands+=("uncordon") - commands+=("upgrade") - commands+=("verify-image-signature") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_annotate() -{ - last_command="oc_annotate" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--field-selector=") - local_nonpersistent_flags+=("--field-selector=") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--local") - local_nonpersistent_flags+=("--local") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--overwrite") - local_nonpersistent_flags+=("--overwrite") - flags+=("--record") - local_nonpersistent_flags+=("--record") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--resource-version=") - local_nonpersistent_flags+=("--resource-version=") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_api-resources() -{ - last_command="oc_api-resources" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--api-group=") - local_nonpersistent_flags+=("--api-group=") - flags+=("--cached") - local_nonpersistent_flags+=("--cached") - flags+=("--namespaced") - local_nonpersistent_flags+=("--namespaced") - flags+=("--no-headers") - local_nonpersistent_flags+=("--no-headers") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--verbs=") - local_nonpersistent_flags+=("--verbs=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_api-versions() -{ - last_command="oc_api-versions" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_apply_edit-last-applied() -{ - last_command="oc_apply_edit-last-applied" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--record") - local_nonpersistent_flags+=("--record") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--windows-line-endings") - local_nonpersistent_flags+=("--windows-line-endings") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_apply_set-last-applied() -{ - last_command="oc_apply_set-last-applied" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--create-annotation") - local_nonpersistent_flags+=("--create-annotation") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_apply_view-last-applied() -{ - last_command="oc_apply_view-last-applied" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_apply() -{ - last_command="oc_apply" - commands=() - commands+=("edit-last-applied") - commands+=("set-last-applied") - commands+=("view-last-applied") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--cascade") - local_nonpersistent_flags+=("--cascade") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--experimental-field-manager=") - local_nonpersistent_flags+=("--experimental-field-manager=") - flags+=("--experimental-force-conflicts") - local_nonpersistent_flags+=("--experimental-force-conflicts") - flags+=("--experimental-server-side") - local_nonpersistent_flags+=("--experimental-server-side") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--force") - local_nonpersistent_flags+=("--force") - flags+=("--grace-period=") - local_nonpersistent_flags+=("--grace-period=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--openapi-patch") - local_nonpersistent_flags+=("--openapi-patch") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--overwrite") - local_nonpersistent_flags+=("--overwrite") - flags+=("--prune") - local_nonpersistent_flags+=("--prune") - flags+=("--prune-whitelist=") - local_nonpersistent_flags+=("--prune-whitelist=") - flags+=("--record") - local_nonpersistent_flags+=("--record") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--server-dry-run") - local_nonpersistent_flags+=("--server-dry-run") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--timeout=") - local_nonpersistent_flags+=("--timeout=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--wait") - local_nonpersistent_flags+=("--wait") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_attach() -{ - last_command="oc_attach" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--container=") - two_word_flags+=("-c") - local_nonpersistent_flags+=("--container=") - flags+=("--pod-running-timeout=") - local_nonpersistent_flags+=("--pod-running-timeout=") - flags+=("--stdin") - flags+=("-i") - local_nonpersistent_flags+=("--stdin") - flags+=("--tty") - flags+=("-t") - local_nonpersistent_flags+=("--tty") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_auth_can-i() -{ - last_command="oc_auth_can-i" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all-namespaces") - flags+=("-A") - local_nonpersistent_flags+=("--all-namespaces") - flags+=("--list") - local_nonpersistent_flags+=("--list") - flags+=("--no-headers") - local_nonpersistent_flags+=("--no-headers") - flags+=("--quiet") - flags+=("-q") - local_nonpersistent_flags+=("--quiet") - flags+=("--subresource=") - local_nonpersistent_flags+=("--subresource=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_auth_reconcile() -{ - last_command="oc_auth_reconcile" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--remove-extra-permissions") - local_nonpersistent_flags+=("--remove-extra-permissions") - flags+=("--remove-extra-subjects") - local_nonpersistent_flags+=("--remove-extra-subjects") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_auth() -{ - last_command="oc_auth" - commands=() - commands+=("can-i") - commands+=("reconcile") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_autoscale() -{ - last_command="oc_autoscale" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--cpu-percent=") - local_nonpersistent_flags+=("--cpu-percent=") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--generator=") - local_nonpersistent_flags+=("--generator=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--max=") - local_nonpersistent_flags+=("--max=") - flags+=("--min=") - local_nonpersistent_flags+=("--min=") - flags+=("--name=") - local_nonpersistent_flags+=("--name=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--record") - local_nonpersistent_flags+=("--record") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_flag+=("--max=") - must_have_one_noun=() - must_have_one_noun+=("deployment") - must_have_one_noun+=("deploymentconfig") - must_have_one_noun+=("replicaset") - must_have_one_noun+=("replicationcontroller") - noun_aliases=() -} - -_oc_cancel-build() -{ - last_command="oc_cancel-build" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--dump-logs") - local_nonpersistent_flags+=("--dump-logs") - flags+=("--restart") - local_nonpersistent_flags+=("--restart") - flags+=("--state=") - local_nonpersistent_flags+=("--state=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_cluster-info_dump() -{ - last_command="oc_cluster-info_dump" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all-namespaces") - flags+=("-A") - local_nonpersistent_flags+=("--all-namespaces") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--namespaces=") - local_nonpersistent_flags+=("--namespaces=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--output-directory=") - local_nonpersistent_flags+=("--output-directory=") - flags+=("--pod-running-timeout=") - local_nonpersistent_flags+=("--pod-running-timeout=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_cluster-info() -{ - last_command="oc_cluster-info" - commands=() - commands+=("dump") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_completion() -{ - last_command="oc_completion" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--help") - flags+=("-h") - local_nonpersistent_flags+=("--help") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - must_have_one_noun+=("bash") - must_have_one_noun+=("zsh") - noun_aliases=() -} - -_oc_config_current-context() -{ - last_command="oc_config_current-context" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_config_delete-cluster() -{ - last_command="oc_config_delete-cluster" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_config_delete-context() -{ - last_command="oc_config_delete-context" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_config_get-clusters() -{ - last_command="oc_config_get-clusters" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_config_get-contexts() -{ - last_command="oc_config_get-contexts" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--no-headers") - local_nonpersistent_flags+=("--no-headers") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_config_rename-context() -{ - last_command="oc_config_rename-context" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_config_set() -{ - last_command="oc_config_set" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--set-raw-bytes") - local_nonpersistent_flags+=("--set-raw-bytes") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_config_set-cluster() -{ - last_command="oc_config_set-cluster" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--embed-certs") - local_nonpersistent_flags+=("--embed-certs") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_config_set-context() -{ - last_command="oc_config_set-context" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--current") - local_nonpersistent_flags+=("--current") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_config_set-credentials() -{ - last_command="oc_config_set-credentials" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--auth-provider=") - local_nonpersistent_flags+=("--auth-provider=") - flags+=("--auth-provider-arg=") - local_nonpersistent_flags+=("--auth-provider-arg=") - flags+=("--embed-certs") - local_nonpersistent_flags+=("--embed-certs") - flags+=("--password=") - local_nonpersistent_flags+=("--password=") - flags+=("--username=") - local_nonpersistent_flags+=("--username=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_config_unset() -{ - last_command="oc_config_unset" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_config_use-context() -{ - last_command="oc_config_use-context" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_config_view() -{ - last_command="oc_config_view" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--flatten") - local_nonpersistent_flags+=("--flatten") - flags+=("--merge") - local_nonpersistent_flags+=("--merge") - flags+=("--minify") - local_nonpersistent_flags+=("--minify") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--raw") - local_nonpersistent_flags+=("--raw") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_config() -{ - last_command="oc_config" - commands=() - commands+=("current-context") - commands+=("delete-cluster") - commands+=("delete-context") - commands+=("get-clusters") - commands+=("get-contexts") - commands+=("rename-context") - commands+=("set") - commands+=("set-cluster") - commands+=("set-context") - commands+=("set-credentials") - commands+=("unset") - commands+=("use-context") - commands+=("view") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_convert() -{ - last_command="oc_convert" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--local") - local_nonpersistent_flags+=("--local") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--output-version=") - local_nonpersistent_flags+=("--output-version=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_cp() -{ - last_command="oc_cp" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--container=") - two_word_flags+=("-c") - local_nonpersistent_flags+=("--container=") - flags+=("--no-preserve") - local_nonpersistent_flags+=("--no-preserve") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_clusterresourcequota() -{ - last_command="oc_create_clusterresourcequota" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--hard=") - local_nonpersistent_flags+=("--hard=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--project-annotation-selector=") - local_nonpersistent_flags+=("--project-annotation-selector=") - flags+=("--project-label-selector=") - local_nonpersistent_flags+=("--project-label-selector=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_clusterrole() -{ - last_command="oc_create_clusterrole" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--aggregation-rule=") - local_nonpersistent_flags+=("--aggregation-rule=") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--non-resource-url=") - local_nonpersistent_flags+=("--non-resource-url=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--resource=") - local_nonpersistent_flags+=("--resource=") - flags+=("--resource-name=") - local_nonpersistent_flags+=("--resource-name=") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--verb=") - local_nonpersistent_flags+=("--verb=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_clusterrolebinding() -{ - last_command="oc_create_clusterrolebinding" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--clusterrole=") - flags_with_completion+=("--clusterrole") - flags_completion+=("__kubectl_get_resource_clusterrole") - local_nonpersistent_flags+=("--clusterrole=") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--generator=") - local_nonpersistent_flags+=("--generator=") - flags+=("--group=") - local_nonpersistent_flags+=("--group=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--serviceaccount=") - local_nonpersistent_flags+=("--serviceaccount=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_configmap() -{ - last_command="oc_create_configmap" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--append-hash") - local_nonpersistent_flags+=("--append-hash") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--from-env-file=") - local_nonpersistent_flags+=("--from-env-file=") - flags+=("--from-file=") - local_nonpersistent_flags+=("--from-file=") - flags+=("--from-literal=") - local_nonpersistent_flags+=("--from-literal=") - flags+=("--generator=") - local_nonpersistent_flags+=("--generator=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_cronjob() -{ - last_command="oc_create_cronjob" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--image=") - local_nonpersistent_flags+=("--image=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--restart=") - local_nonpersistent_flags+=("--restart=") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--schedule=") - local_nonpersistent_flags+=("--schedule=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_deployment() -{ - last_command="oc_create_deployment" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--generator=") - local_nonpersistent_flags+=("--generator=") - flags+=("--image=") - local_nonpersistent_flags+=("--image=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_flag+=("--image=") - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_deploymentconfig() -{ - last_command="oc_create_deploymentconfig" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--image=") - local_nonpersistent_flags+=("--image=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_flag+=("--image=") - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_identity() -{ - last_command="oc_create_identity" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_imagestream() -{ - last_command="oc_create_imagestream" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--lookup-local") - local_nonpersistent_flags+=("--lookup-local") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_imagestreamtag() -{ - last_command="oc_create_imagestreamtag" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--annotation=") - two_word_flags+=("-A") - local_nonpersistent_flags+=("--annotation=") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--from=") - local_nonpersistent_flags+=("--from=") - flags+=("--from-image=") - local_nonpersistent_flags+=("--from-image=") - flags+=("--insecure") - local_nonpersistent_flags+=("--insecure") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--reference") - local_nonpersistent_flags+=("--reference") - flags+=("--reference-policy=") - local_nonpersistent_flags+=("--reference-policy=") - flags+=("--scheduled") - local_nonpersistent_flags+=("--scheduled") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_job() -{ - last_command="oc_create_job" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--from=") - local_nonpersistent_flags+=("--from=") - flags+=("--image=") - local_nonpersistent_flags+=("--image=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_namespace() -{ - last_command="oc_create_namespace" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--generator=") - local_nonpersistent_flags+=("--generator=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_poddisruptionbudget() -{ - last_command="oc_create_poddisruptionbudget" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--generator=") - local_nonpersistent_flags+=("--generator=") - flags+=("--max-unavailable=") - local_nonpersistent_flags+=("--max-unavailable=") - flags+=("--min-available=") - local_nonpersistent_flags+=("--min-available=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--selector=") - local_nonpersistent_flags+=("--selector=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_priorityclass() -{ - last_command="oc_create_priorityclass" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--description=") - local_nonpersistent_flags+=("--description=") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--generator=") - local_nonpersistent_flags+=("--generator=") - flags+=("--global-default") - local_nonpersistent_flags+=("--global-default") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--value=") - local_nonpersistent_flags+=("--value=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_quota() -{ - last_command="oc_create_quota" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--generator=") - local_nonpersistent_flags+=("--generator=") - flags+=("--hard=") - local_nonpersistent_flags+=("--hard=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--scopes=") - local_nonpersistent_flags+=("--scopes=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_role() -{ - last_command="oc_create_role" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--resource=") - local_nonpersistent_flags+=("--resource=") - flags+=("--resource-name=") - local_nonpersistent_flags+=("--resource-name=") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--verb=") - local_nonpersistent_flags+=("--verb=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_rolebinding() -{ - last_command="oc_create_rolebinding" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--clusterrole=") - local_nonpersistent_flags+=("--clusterrole=") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--generator=") - local_nonpersistent_flags+=("--generator=") - flags+=("--group=") - local_nonpersistent_flags+=("--group=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--role=") - local_nonpersistent_flags+=("--role=") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--serviceaccount=") - local_nonpersistent_flags+=("--serviceaccount=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_route_edge() -{ - last_command="oc_create_route_edge" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--ca-cert=") - flags_with_completion+=("--ca-cert") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--ca-cert=") - flags+=("--cert=") - flags_with_completion+=("--cert") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--cert=") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--hostname=") - local_nonpersistent_flags+=("--hostname=") - flags+=("--insecure-policy=") - local_nonpersistent_flags+=("--insecure-policy=") - flags+=("--key=") - flags_with_completion+=("--key") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--key=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--path=") - local_nonpersistent_flags+=("--path=") - flags+=("--port=") - local_nonpersistent_flags+=("--port=") - flags+=("--service=") - local_nonpersistent_flags+=("--service=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--wildcard-policy=") - local_nonpersistent_flags+=("--wildcard-policy=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_flag+=("--service=") - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_route_passthrough() -{ - last_command="oc_create_route_passthrough" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--hostname=") - local_nonpersistent_flags+=("--hostname=") - flags+=("--insecure-policy=") - local_nonpersistent_flags+=("--insecure-policy=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--port=") - local_nonpersistent_flags+=("--port=") - flags+=("--service=") - local_nonpersistent_flags+=("--service=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--wildcard-policy=") - local_nonpersistent_flags+=("--wildcard-policy=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_flag+=("--service=") - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_route_reencrypt() -{ - last_command="oc_create_route_reencrypt" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--ca-cert=") - flags_with_completion+=("--ca-cert") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--ca-cert=") - flags+=("--cert=") - flags_with_completion+=("--cert") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--cert=") - flags+=("--dest-ca-cert=") - flags_with_completion+=("--dest-ca-cert") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--dest-ca-cert=") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--hostname=") - local_nonpersistent_flags+=("--hostname=") - flags+=("--insecure-policy=") - local_nonpersistent_flags+=("--insecure-policy=") - flags+=("--key=") - flags_with_completion+=("--key") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--key=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--path=") - local_nonpersistent_flags+=("--path=") - flags+=("--port=") - local_nonpersistent_flags+=("--port=") - flags+=("--service=") - local_nonpersistent_flags+=("--service=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--wildcard-policy=") - local_nonpersistent_flags+=("--wildcard-policy=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_flag+=("--service=") - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_route() -{ - last_command="oc_create_route" - commands=() - commands+=("edge") - commands+=("passthrough") - commands+=("reencrypt") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_secret_docker-registry() -{ - last_command="oc_create_secret_docker-registry" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--append-hash") - local_nonpersistent_flags+=("--append-hash") - flags+=("--docker-email=") - local_nonpersistent_flags+=("--docker-email=") - flags+=("--docker-password=") - local_nonpersistent_flags+=("--docker-password=") - flags+=("--docker-server=") - local_nonpersistent_flags+=("--docker-server=") - flags+=("--docker-username=") - local_nonpersistent_flags+=("--docker-username=") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--from-file=") - local_nonpersistent_flags+=("--from-file=") - flags+=("--generator=") - local_nonpersistent_flags+=("--generator=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_flag+=("--docker-password=") - must_have_one_flag+=("--docker-username=") - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_secret_generic() -{ - last_command="oc_create_secret_generic" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--append-hash") - local_nonpersistent_flags+=("--append-hash") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--from-env-file=") - local_nonpersistent_flags+=("--from-env-file=") - flags+=("--from-file=") - local_nonpersistent_flags+=("--from-file=") - flags+=("--from-literal=") - local_nonpersistent_flags+=("--from-literal=") - flags+=("--generator=") - local_nonpersistent_flags+=("--generator=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--type=") - local_nonpersistent_flags+=("--type=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_secret_tls() -{ - last_command="oc_create_secret_tls" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--append-hash") - local_nonpersistent_flags+=("--append-hash") - flags+=("--cert=") - local_nonpersistent_flags+=("--cert=") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--generator=") - local_nonpersistent_flags+=("--generator=") - flags+=("--key=") - local_nonpersistent_flags+=("--key=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_secret() -{ - last_command="oc_create_secret" - commands=() - commands+=("docker-registry") - commands+=("generic") - commands+=("tls") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_service_clusterip() -{ - last_command="oc_create_service_clusterip" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--clusterip=") - local_nonpersistent_flags+=("--clusterip=") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--generator=") - local_nonpersistent_flags+=("--generator=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--tcp=") - local_nonpersistent_flags+=("--tcp=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_service_externalname() -{ - last_command="oc_create_service_externalname" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--external-name=") - local_nonpersistent_flags+=("--external-name=") - flags+=("--generator=") - local_nonpersistent_flags+=("--generator=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--tcp=") - local_nonpersistent_flags+=("--tcp=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_flag+=("--external-name=") - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_service_loadbalancer() -{ - last_command="oc_create_service_loadbalancer" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--generator=") - local_nonpersistent_flags+=("--generator=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--tcp=") - local_nonpersistent_flags+=("--tcp=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_service_nodeport() -{ - last_command="oc_create_service_nodeport" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--generator=") - local_nonpersistent_flags+=("--generator=") - flags+=("--node-port=") - local_nonpersistent_flags+=("--node-port=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--tcp=") - local_nonpersistent_flags+=("--tcp=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_service() -{ - last_command="oc_create_service" - commands=() - commands+=("clusterip") - commands+=("externalname") - commands+=("loadbalancer") - commands+=("nodeport") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_serviceaccount() -{ - last_command="oc_create_serviceaccount" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--generator=") - local_nonpersistent_flags+=("--generator=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_user() -{ - last_command="oc_create_user" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--full-name=") - local_nonpersistent_flags+=("--full-name=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_useridentitymapping() -{ - last_command="oc_create_useridentitymapping" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create() -{ - last_command="oc_create" - commands=() - commands+=("clusterresourcequota") - commands+=("clusterrole") - commands+=("clusterrolebinding") - commands+=("configmap") - commands+=("cronjob") - commands+=("deployment") - commands+=("deploymentconfig") - commands+=("identity") - commands+=("imagestream") - commands+=("imagestreamtag") - commands+=("job") - commands+=("namespace") - commands+=("poddisruptionbudget") - commands+=("priorityclass") - commands+=("quota") - commands+=("role") - commands+=("rolebinding") - commands+=("route") - commands+=("secret") - commands+=("service") - commands+=("serviceaccount") - commands+=("user") - commands+=("useridentitymapping") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--edit") - local_nonpersistent_flags+=("--edit") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--raw=") - local_nonpersistent_flags+=("--raw=") - flags+=("--record") - local_nonpersistent_flags+=("--record") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--windows-line-endings") - local_nonpersistent_flags+=("--windows-line-endings") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_debug() -{ - last_command="oc_debug" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--as-root") - local_nonpersistent_flags+=("--as-root") - flags+=("--as-user=") - local_nonpersistent_flags+=("--as-user=") - flags+=("--container=") - two_word_flags+=("-c") - local_nonpersistent_flags+=("--container=") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--image=") - local_nonpersistent_flags+=("--image=") - flags+=("--keep-annotations") - local_nonpersistent_flags+=("--keep-annotations") - flags+=("--keep-init-containers") - local_nonpersistent_flags+=("--keep-init-containers") - flags+=("--keep-liveness") - local_nonpersistent_flags+=("--keep-liveness") - flags+=("--keep-readiness") - local_nonpersistent_flags+=("--keep-readiness") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--no-stdin") - flags+=("-I") - local_nonpersistent_flags+=("--no-stdin") - flags+=("--no-tty") - flags+=("-T") - local_nonpersistent_flags+=("--no-tty") - flags+=("--node-name=") - local_nonpersistent_flags+=("--node-name=") - flags+=("--one-container") - local_nonpersistent_flags+=("--one-container") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--show-labels") - local_nonpersistent_flags+=("--show-labels") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--tty") - flags+=("-t") - local_nonpersistent_flags+=("--tty") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_delete() -{ - last_command="oc_delete" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--all-namespaces") - flags+=("-A") - local_nonpersistent_flags+=("--all-namespaces") - flags+=("--cascade") - local_nonpersistent_flags+=("--cascade") - flags+=("--field-selector=") - local_nonpersistent_flags+=("--field-selector=") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--force") - local_nonpersistent_flags+=("--force") - flags+=("--grace-period=") - local_nonpersistent_flags+=("--grace-period=") - flags+=("--ignore-not-found") - local_nonpersistent_flags+=("--ignore-not-found") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--now") - local_nonpersistent_flags+=("--now") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--timeout=") - local_nonpersistent_flags+=("--timeout=") - flags+=("--wait") - local_nonpersistent_flags+=("--wait") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_describe() -{ - last_command="oc_describe" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all-namespaces") - flags+=("-A") - local_nonpersistent_flags+=("--all-namespaces") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--show-events") - local_nonpersistent_flags+=("--show-events") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_diff() -{ - last_command="oc_diff" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--experimental-field-manager=") - local_nonpersistent_flags+=("--experimental-field-manager=") - flags+=("--experimental-force-conflicts") - local_nonpersistent_flags+=("--experimental-force-conflicts") - flags+=("--experimental-server-side") - local_nonpersistent_flags+=("--experimental-server-side") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_edit() -{ - last_command="oc_edit" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--output-patch") - local_nonpersistent_flags+=("--output-patch") - flags+=("--record") - local_nonpersistent_flags+=("--record") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--windows-line-endings") - local_nonpersistent_flags+=("--windows-line-endings") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_ex_build-chain() -{ - last_command="oc_ex_build-chain" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--reverse") - local_nonpersistent_flags+=("--reverse") - flags+=("--trigger-only") - local_nonpersistent_flags+=("--trigger-only") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_ex_dockergc() -{ - last_command="oc_ex_dockergc" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--image-gc-high-threshold=") - local_nonpersistent_flags+=("--image-gc-high-threshold=") - flags+=("--image-gc-low-threshold=") - local_nonpersistent_flags+=("--image-gc-low-threshold=") - flags+=("--minimum-ttl-duration=") - local_nonpersistent_flags+=("--minimum-ttl-duration=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_ex_options() -{ - last_command="oc_ex_options" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_ex_prune-groups() -{ - last_command="oc_ex_prune-groups" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--blacklist=") - flags_with_completion+=("--blacklist") - flags_completion+=("__oc_handle_filename_extension_flag txt") - local_nonpersistent_flags+=("--blacklist=") - flags+=("--confirm") - local_nonpersistent_flags+=("--confirm") - flags+=("--sync-config=") - flags_with_completion+=("--sync-config") - flags_completion+=("__oc_handle_filename_extension_flag yaml|yml") - local_nonpersistent_flags+=("--sync-config=") - flags+=("--whitelist=") - flags_with_completion+=("--whitelist") - flags_completion+=("__oc_handle_filename_extension_flag txt") - local_nonpersistent_flags+=("--whitelist=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_ex_sync-groups() -{ - last_command="oc_ex_sync-groups" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--blacklist=") - flags_with_completion+=("--blacklist") - flags_completion+=("__oc_handle_filename_extension_flag txt") - local_nonpersistent_flags+=("--blacklist=") - flags+=("--confirm") - local_nonpersistent_flags+=("--confirm") - flags+=("--sync-config=") - flags_with_completion+=("--sync-config") - flags_completion+=("__oc_handle_filename_extension_flag yaml|yml") - local_nonpersistent_flags+=("--sync-config=") - flags+=("--type=") - local_nonpersistent_flags+=("--type=") - flags+=("--whitelist=") - flags_with_completion+=("--whitelist") - flags_completion+=("__oc_handle_filename_extension_flag txt") - local_nonpersistent_flags+=("--whitelist=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_ex() -{ - last_command="oc_ex" - commands=() - commands+=("build-chain") - commands+=("dockergc") - commands+=("options") - commands+=("prune-groups") - commands+=("sync-groups") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_exec() -{ - last_command="oc_exec" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--container=") - two_word_flags+=("-c") - local_nonpersistent_flags+=("--container=") - flags+=("--stdin") - flags+=("-i") - local_nonpersistent_flags+=("--stdin") - flags+=("--tty") - flags+=("-t") - local_nonpersistent_flags+=("--tty") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_explain() -{ - last_command="oc_explain" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--api-version=") - local_nonpersistent_flags+=("--api-version=") - flags+=("--recursive") - local_nonpersistent_flags+=("--recursive") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_expose() -{ - last_command="oc_expose" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--cluster-ip=") - local_nonpersistent_flags+=("--cluster-ip=") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--external-ip=") - local_nonpersistent_flags+=("--external-ip=") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--generator=") - local_nonpersistent_flags+=("--generator=") - flags+=("--hostname=") - local_nonpersistent_flags+=("--hostname=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--labels=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--labels=") - flags+=("--load-balancer-ip=") - local_nonpersistent_flags+=("--load-balancer-ip=") - flags+=("--name=") - local_nonpersistent_flags+=("--name=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--overrides=") - local_nonpersistent_flags+=("--overrides=") - flags+=("--path=") - local_nonpersistent_flags+=("--path=") - flags+=("--port=") - local_nonpersistent_flags+=("--port=") - flags+=("--protocol=") - local_nonpersistent_flags+=("--protocol=") - flags+=("--record") - local_nonpersistent_flags+=("--record") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--selector=") - local_nonpersistent_flags+=("--selector=") - flags+=("--session-affinity=") - local_nonpersistent_flags+=("--session-affinity=") - flags+=("--target-port=") - local_nonpersistent_flags+=("--target-port=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--type=") - local_nonpersistent_flags+=("--type=") - flags+=("--wildcard-policy=") - local_nonpersistent_flags+=("--wildcard-policy=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - must_have_one_noun+=("deployment") - must_have_one_noun+=("pod") - must_have_one_noun+=("replicaset") - must_have_one_noun+=("replicationcontroller") - must_have_one_noun+=("service") - noun_aliases=() -} - -_oc_extract() -{ - last_command="oc_extract" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--confirm") - local_nonpersistent_flags+=("--confirm") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("_filedir") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--filename=") - flags+=("--keys=") - local_nonpersistent_flags+=("--keys=") - flags+=("--to=") - local_nonpersistent_flags+=("--to=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_get() -{ - last_command="oc_get" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all-namespaces") - flags+=("-A") - local_nonpersistent_flags+=("--all-namespaces") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--chunk-size=") - local_nonpersistent_flags+=("--chunk-size=") - flags+=("--field-selector=") - local_nonpersistent_flags+=("--field-selector=") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--ignore-not-found") - local_nonpersistent_flags+=("--ignore-not-found") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--label-columns=") - two_word_flags+=("-L") - local_nonpersistent_flags+=("--label-columns=") - flags+=("--no-headers") - local_nonpersistent_flags+=("--no-headers") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--raw=") - local_nonpersistent_flags+=("--raw=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--server-print") - local_nonpersistent_flags+=("--server-print") - flags+=("--show-kind") - local_nonpersistent_flags+=("--show-kind") - flags+=("--show-labels") - local_nonpersistent_flags+=("--show-labels") - flags+=("--sort-by=") - local_nonpersistent_flags+=("--sort-by=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--watch") - flags+=("-w") - local_nonpersistent_flags+=("--watch") - flags+=("--watch-only") - local_nonpersistent_flags+=("--watch-only") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_idle() -{ - last_command="oc_idle" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--all-namespaces") - flags+=("-A") - local_nonpersistent_flags+=("--all-namespaces") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--resource-names-file=") - flags_with_completion+=("--resource-names-file") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--resource-names-file=") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_image_append() -{ - last_command="oc_image_append" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--created-at=") - local_nonpersistent_flags+=("--created-at=") - flags+=("--drop-history") - local_nonpersistent_flags+=("--drop-history") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--filter-by-os=") - local_nonpersistent_flags+=("--filter-by-os=") - flags+=("--force") - local_nonpersistent_flags+=("--force") - flags+=("--from=") - local_nonpersistent_flags+=("--from=") - flags+=("--image=") - local_nonpersistent_flags+=("--image=") - flags+=("--insecure") - local_nonpersistent_flags+=("--insecure") - flags+=("--max-per-registry=") - local_nonpersistent_flags+=("--max-per-registry=") - flags+=("--meta=") - local_nonpersistent_flags+=("--meta=") - flags+=("--registry-config=") - two_word_flags+=("-a") - local_nonpersistent_flags+=("--registry-config=") - flags+=("--skip-verification") - local_nonpersistent_flags+=("--skip-verification") - flags+=("--to=") - local_nonpersistent_flags+=("--to=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_image_extract() -{ - last_command="oc_image_extract" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all-layers") - local_nonpersistent_flags+=("--all-layers") - flags+=("--confirm") - local_nonpersistent_flags+=("--confirm") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--file=") - local_nonpersistent_flags+=("--file=") - flags+=("--filter-by-os=") - local_nonpersistent_flags+=("--filter-by-os=") - flags+=("--insecure") - local_nonpersistent_flags+=("--insecure") - flags+=("--only-files") - local_nonpersistent_flags+=("--only-files") - flags+=("--path=") - local_nonpersistent_flags+=("--path=") - flags+=("--preserve-ownership") - flags+=("-p") - local_nonpersistent_flags+=("--preserve-ownership") - flags+=("--registry-config=") - two_word_flags+=("-a") - local_nonpersistent_flags+=("--registry-config=") - flags+=("--skip-verification") - local_nonpersistent_flags+=("--skip-verification") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_image_info() -{ - last_command="oc_image_info" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--filter-by-os=") - local_nonpersistent_flags+=("--filter-by-os=") - flags+=("--insecure") - local_nonpersistent_flags+=("--insecure") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--registry-config=") - two_word_flags+=("-a") - local_nonpersistent_flags+=("--registry-config=") - flags+=("--skip-verification") - local_nonpersistent_flags+=("--skip-verification") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_image_mirror() -{ - last_command="oc_image_mirror" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--filename=") - two_word_flags+=("-f") - local_nonpersistent_flags+=("--filename=") - flags+=("--filter-by-os=") - local_nonpersistent_flags+=("--filter-by-os=") - flags+=("--force") - local_nonpersistent_flags+=("--force") - flags+=("--insecure") - local_nonpersistent_flags+=("--insecure") - flags+=("--max-per-registry=") - local_nonpersistent_flags+=("--max-per-registry=") - flags+=("--max-registry=") - local_nonpersistent_flags+=("--max-registry=") - flags+=("--registry-config=") - two_word_flags+=("-a") - local_nonpersistent_flags+=("--registry-config=") - flags+=("--s3-source-bucket=") - local_nonpersistent_flags+=("--s3-source-bucket=") - flags+=("--skip-missing") - local_nonpersistent_flags+=("--skip-missing") - flags+=("--skip-mount") - local_nonpersistent_flags+=("--skip-mount") - flags+=("--skip-multiple-scopes") - local_nonpersistent_flags+=("--skip-multiple-scopes") - flags+=("--skip-verification") - local_nonpersistent_flags+=("--skip-verification") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_image() -{ - last_command="oc_image" - commands=() - commands+=("append") - commands+=("extract") - commands+=("info") - commands+=("mirror") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_import-image() -{ - last_command="oc_import-image" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--confirm") - local_nonpersistent_flags+=("--confirm") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--from=") - local_nonpersistent_flags+=("--from=") - flags+=("--insecure") - local_nonpersistent_flags+=("--insecure") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--reference-policy=") - local_nonpersistent_flags+=("--reference-policy=") - flags+=("--scheduled") - local_nonpersistent_flags+=("--scheduled") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_kustomize() -{ - last_command="oc_kustomize" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_label() -{ - last_command="oc_label" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--field-selector=") - local_nonpersistent_flags+=("--field-selector=") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--list") - local_nonpersistent_flags+=("--list") - flags+=("--local") - local_nonpersistent_flags+=("--local") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--overwrite") - local_nonpersistent_flags+=("--overwrite") - flags+=("--record") - local_nonpersistent_flags+=("--record") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--resource-version=") - local_nonpersistent_flags+=("--resource-version=") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_login() -{ - last_command="oc_login" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--password=") - two_word_flags+=("-p") - local_nonpersistent_flags+=("--password=") - flags+=("--username=") - two_word_flags+=("-u") - local_nonpersistent_flags+=("--username=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_logout() -{ - last_command="oc_logout" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_logs() -{ - last_command="oc_logs" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all-containers") - local_nonpersistent_flags+=("--all-containers") - flags+=("--container=") - two_word_flags+=("-c") - local_nonpersistent_flags+=("--container=") - flags+=("--follow") - flags+=("-f") - local_nonpersistent_flags+=("--follow") - flags+=("--limit-bytes=") - local_nonpersistent_flags+=("--limit-bytes=") - flags+=("--max-log-requests=") - local_nonpersistent_flags+=("--max-log-requests=") - flags+=("--pod-running-timeout=") - local_nonpersistent_flags+=("--pod-running-timeout=") - flags+=("--previous") - flags+=("-p") - local_nonpersistent_flags+=("--previous") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--since=") - local_nonpersistent_flags+=("--since=") - flags+=("--since-time=") - local_nonpersistent_flags+=("--since-time=") - flags+=("--tail=") - local_nonpersistent_flags+=("--tail=") - flags+=("--timestamps") - local_nonpersistent_flags+=("--timestamps") - flags+=("--version=") - local_nonpersistent_flags+=("--version=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_new-app() -{ - last_command="oc_new-app" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-images") - local_nonpersistent_flags+=("--allow-missing-images") - flags+=("--allow-missing-imagestream-tags") - local_nonpersistent_flags+=("--allow-missing-imagestream-tags") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--as-test") - local_nonpersistent_flags+=("--as-test") - flags+=("--binary") - local_nonpersistent_flags+=("--binary") - flags+=("--build-env=") - local_nonpersistent_flags+=("--build-env=") - flags+=("--build-env-file=") - flags_with_completion+=("--build-env-file") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--build-env-file=") - flags+=("--code=") - local_nonpersistent_flags+=("--code=") - flags+=("--context-dir=") - local_nonpersistent_flags+=("--context-dir=") - flags+=("--docker-image=") - local_nonpersistent_flags+=("--docker-image=") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--env=") - two_word_flags+=("-e") - local_nonpersistent_flags+=("--env=") - flags+=("--env-file=") - flags_with_completion+=("--env-file") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--env-file=") - flags+=("--file=") - flags_with_completion+=("--file") - flags_completion+=("__oc_handle_filename_extension_flag yaml|yml|json") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag yaml|yml|json") - local_nonpersistent_flags+=("--file=") - flags+=("--grant-install-rights") - local_nonpersistent_flags+=("--grant-install-rights") - flags+=("--group=") - local_nonpersistent_flags+=("--group=") - flags+=("--ignore-unknown-parameters") - local_nonpersistent_flags+=("--ignore-unknown-parameters") - flags+=("--image-stream=") - two_word_flags+=("-i") - local_nonpersistent_flags+=("--image-stream=") - flags+=("--insecure-registry") - local_nonpersistent_flags+=("--insecure-registry") - flags+=("--labels=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--labels=") - flags+=("--list") - flags+=("-L") - local_nonpersistent_flags+=("--list") - flags+=("--name=") - local_nonpersistent_flags+=("--name=") - flags+=("--no-install") - local_nonpersistent_flags+=("--no-install") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--output-version=") - local_nonpersistent_flags+=("--output-version=") - flags+=("--param=") - two_word_flags+=("-p") - local_nonpersistent_flags+=("--param=") - flags+=("--param-file=") - flags_with_completion+=("--param-file") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--param-file=") - flags+=("--search") - flags+=("-S") - local_nonpersistent_flags+=("--search") - flags+=("--show-all") - flags+=("-a") - local_nonpersistent_flags+=("--show-all") - flags+=("--show-labels") - local_nonpersistent_flags+=("--show-labels") - flags+=("--sort-by=") - local_nonpersistent_flags+=("--sort-by=") - flags+=("--source-secret=") - local_nonpersistent_flags+=("--source-secret=") - flags+=("--strategy=") - local_nonpersistent_flags+=("--strategy=") - flags+=("--template=") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_new-build() -{ - last_command="oc_new-build" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-images") - local_nonpersistent_flags+=("--allow-missing-images") - flags+=("--allow-missing-imagestream-tags") - local_nonpersistent_flags+=("--allow-missing-imagestream-tags") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--binary") - local_nonpersistent_flags+=("--binary") - flags+=("--build-arg=") - local_nonpersistent_flags+=("--build-arg=") - flags+=("--build-config-map=") - local_nonpersistent_flags+=("--build-config-map=") - flags+=("--build-secret=") - local_nonpersistent_flags+=("--build-secret=") - flags+=("--code=") - local_nonpersistent_flags+=("--code=") - flags+=("--context-dir=") - local_nonpersistent_flags+=("--context-dir=") - flags+=("--docker-image=") - local_nonpersistent_flags+=("--docker-image=") - flags+=("--dockerfile=") - two_word_flags+=("-D") - local_nonpersistent_flags+=("--dockerfile=") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--env=") - two_word_flags+=("-e") - local_nonpersistent_flags+=("--env=") - flags+=("--env-file=") - flags_with_completion+=("--env-file") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--env-file=") - flags+=("--image-stream=") - two_word_flags+=("-i") - local_nonpersistent_flags+=("--image-stream=") - flags+=("--labels=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--labels=") - flags+=("--name=") - local_nonpersistent_flags+=("--name=") - flags+=("--no-output") - local_nonpersistent_flags+=("--no-output") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--output-version=") - local_nonpersistent_flags+=("--output-version=") - flags+=("--push-secret=") - local_nonpersistent_flags+=("--push-secret=") - flags+=("--show-all") - flags+=("-a") - local_nonpersistent_flags+=("--show-all") - flags+=("--show-labels") - local_nonpersistent_flags+=("--show-labels") - flags+=("--sort-by=") - local_nonpersistent_flags+=("--sort-by=") - flags+=("--source-image=") - local_nonpersistent_flags+=("--source-image=") - flags+=("--source-image-path=") - local_nonpersistent_flags+=("--source-image-path=") - flags+=("--source-secret=") - local_nonpersistent_flags+=("--source-secret=") - flags+=("--strategy=") - local_nonpersistent_flags+=("--strategy=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--to=") - local_nonpersistent_flags+=("--to=") - flags+=("--to-docker") - local_nonpersistent_flags+=("--to-docker") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_new-project() -{ - last_command="oc_new-project" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--description=") - local_nonpersistent_flags+=("--description=") - flags+=("--display-name=") - local_nonpersistent_flags+=("--display-name=") - flags+=("--skip-config-write") - local_nonpersistent_flags+=("--skip-config-write") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_observe() -{ - last_command="oc_observe" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all-namespaces") - flags+=("-A") - local_nonpersistent_flags+=("--all-namespaces") - flags+=("--argument=") - two_word_flags+=("-a") - local_nonpersistent_flags+=("--argument=") - flags+=("--delete=") - two_word_flags+=("-d") - local_nonpersistent_flags+=("--delete=") - flags+=("--exit-after=") - local_nonpersistent_flags+=("--exit-after=") - flags+=("--listen-addr=") - local_nonpersistent_flags+=("--listen-addr=") - flags+=("--maximum-errors=") - local_nonpersistent_flags+=("--maximum-errors=") - flags+=("--names=") - local_nonpersistent_flags+=("--names=") - flags+=("--no-headers") - local_nonpersistent_flags+=("--no-headers") - flags+=("--object-env-var=") - local_nonpersistent_flags+=("--object-env-var=") - flags+=("--once") - local_nonpersistent_flags+=("--once") - flags+=("--output=") - local_nonpersistent_flags+=("--output=") - flags+=("--print-metrics-on-exit") - local_nonpersistent_flags+=("--print-metrics-on-exit") - flags+=("--resync-period=") - local_nonpersistent_flags+=("--resync-period=") - flags+=("--retry-count=") - local_nonpersistent_flags+=("--retry-count=") - flags+=("--retry-on-exit-code=") - local_nonpersistent_flags+=("--retry-on-exit-code=") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--strict-templates") - local_nonpersistent_flags+=("--strict-templates") - flags+=("--type-env-var=") - local_nonpersistent_flags+=("--type-env-var=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_options() -{ - last_command="oc_options" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_patch() -{ - last_command="oc_patch" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--local") - local_nonpersistent_flags+=("--local") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--patch=") - two_word_flags+=("-p") - local_nonpersistent_flags+=("--patch=") - flags+=("--record") - local_nonpersistent_flags+=("--record") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--type=") - local_nonpersistent_flags+=("--type=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_flag+=("--patch=") - must_have_one_flag+=("-p") - must_have_one_noun=() - noun_aliases=() -} - -_oc_plugin_list() -{ - last_command="oc_plugin_list" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--name-only") - local_nonpersistent_flags+=("--name-only") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_plugin() -{ - last_command="oc_plugin" - commands=() - commands+=("list") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_policy_add-role-to-group() -{ - last_command="oc_policy_add-role-to-group" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--role-namespace=") - local_nonpersistent_flags+=("--role-namespace=") - flags+=("--rolebinding-name=") - local_nonpersistent_flags+=("--rolebinding-name=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_policy_add-role-to-user() -{ - last_command="oc_policy_add-role-to-user" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--role-namespace=") - local_nonpersistent_flags+=("--role-namespace=") - flags+=("--rolebinding-name=") - local_nonpersistent_flags+=("--rolebinding-name=") - flags+=("--serviceaccount=") - two_word_flags+=("-z") - local_nonpersistent_flags+=("--serviceaccount=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_policy_remove-group() -{ - last_command="oc_policy_remove-group" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_policy_remove-role-from-group() -{ - last_command="oc_policy_remove-role-from-group" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--role-namespace=") - local_nonpersistent_flags+=("--role-namespace=") - flags+=("--rolebinding-name=") - local_nonpersistent_flags+=("--rolebinding-name=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_policy_remove-role-from-user() -{ - last_command="oc_policy_remove-role-from-user" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--role-namespace=") - local_nonpersistent_flags+=("--role-namespace=") - flags+=("--rolebinding-name=") - local_nonpersistent_flags+=("--rolebinding-name=") - flags+=("--serviceaccount=") - two_word_flags+=("-z") - local_nonpersistent_flags+=("--serviceaccount=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_policy_remove-user() -{ - last_command="oc_policy_remove-user" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_policy_scc-review() -{ - last_command="oc_policy_scc-review" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--no-headers") - local_nonpersistent_flags+=("--no-headers") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--serviceaccount=") - two_word_flags+=("-z") - local_nonpersistent_flags+=("--serviceaccount=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_policy_scc-subject-review() -{ - last_command="oc_policy_scc-subject-review" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--groups=") - two_word_flags+=("-g") - local_nonpersistent_flags+=("--groups=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--no-headers") - local_nonpersistent_flags+=("--no-headers") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--serviceaccount=") - two_word_flags+=("-z") - local_nonpersistent_flags+=("--serviceaccount=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_policy_who-can() -{ - last_command="oc_policy_who-can" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all-namespaces") - flags+=("-A") - local_nonpersistent_flags+=("--all-namespaces") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_policy() -{ - last_command="oc_policy" - commands=() - commands+=("add-role-to-group") - commands+=("add-role-to-user") - commands+=("remove-group") - commands+=("remove-role-from-group") - commands+=("remove-role-from-user") - commands+=("remove-user") - commands+=("scc-review") - commands+=("scc-subject-review") - commands+=("who-can") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_port-forward() -{ - last_command="oc_port-forward" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--address=") - local_nonpersistent_flags+=("--address=") - flags+=("--pod-running-timeout=") - local_nonpersistent_flags+=("--pod-running-timeout=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_process() -{ - last_command="oc_process" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag yaml|yml|json") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag yaml|yml|json") - local_nonpersistent_flags+=("--filename=") - flags+=("--ignore-unknown-parameters") - local_nonpersistent_flags+=("--ignore-unknown-parameters") - flags+=("--labels=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--labels=") - flags+=("--local") - local_nonpersistent_flags+=("--local") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--param=") - two_word_flags+=("-p") - local_nonpersistent_flags+=("--param=") - flags+=("--param-file=") - flags_with_completion+=("--param-file") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--param-file=") - flags+=("--parameters") - local_nonpersistent_flags+=("--parameters") - flags+=("--raw") - local_nonpersistent_flags+=("--raw") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - two_word_flags+=("-t") - flags_with_completion+=("-t") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_project() -{ - last_command="oc_project" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--short") - flags+=("-q") - local_nonpersistent_flags+=("--short") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_projects() -{ - last_command="oc_projects" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--short") - flags+=("-q") - local_nonpersistent_flags+=("--short") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_proxy() -{ - last_command="oc_proxy" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--accept-hosts=") - local_nonpersistent_flags+=("--accept-hosts=") - flags+=("--accept-paths=") - local_nonpersistent_flags+=("--accept-paths=") - flags+=("--address=") - local_nonpersistent_flags+=("--address=") - flags+=("--api-prefix=") - local_nonpersistent_flags+=("--api-prefix=") - flags+=("--disable-filter") - local_nonpersistent_flags+=("--disable-filter") - flags+=("--keepalive=") - local_nonpersistent_flags+=("--keepalive=") - flags+=("--port=") - two_word_flags+=("-p") - local_nonpersistent_flags+=("--port=") - flags+=("--reject-methods=") - local_nonpersistent_flags+=("--reject-methods=") - flags+=("--reject-paths=") - local_nonpersistent_flags+=("--reject-paths=") - flags+=("--unix-socket=") - two_word_flags+=("-u") - local_nonpersistent_flags+=("--unix-socket=") - flags+=("--www=") - two_word_flags+=("-w") - local_nonpersistent_flags+=("--www=") - flags+=("--www-prefix=") - two_word_flags+=("-P") - local_nonpersistent_flags+=("--www-prefix=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_registry_info() -{ - last_command="oc_registry_info" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--check") - local_nonpersistent_flags+=("--check") - flags+=("--internal") - local_nonpersistent_flags+=("--internal") - flags+=("--public") - local_nonpersistent_flags+=("--public") - flags+=("--quiet") - flags+=("-q") - local_nonpersistent_flags+=("--quiet") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_registry_login() -{ - last_command="oc_registry_login" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--insecure") - local_nonpersistent_flags+=("--insecure") - flags+=("--registry=") - local_nonpersistent_flags+=("--registry=") - flags+=("--registry-config=") - two_word_flags+=("-a") - local_nonpersistent_flags+=("--registry-config=") - flags+=("--service-account=") - two_word_flags+=("-z") - local_nonpersistent_flags+=("--service-account=") - flags+=("--skip-check") - local_nonpersistent_flags+=("--skip-check") - flags+=("--to=") - local_nonpersistent_flags+=("--to=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_registry() -{ - last_command="oc_registry" - commands=() - commands+=("info") - commands+=("login") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_replace() -{ - last_command="oc_replace" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--cascade") - local_nonpersistent_flags+=("--cascade") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--force") - local_nonpersistent_flags+=("--force") - flags+=("--grace-period=") - local_nonpersistent_flags+=("--grace-period=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--timeout=") - local_nonpersistent_flags+=("--timeout=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--wait") - local_nonpersistent_flags+=("--wait") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_rollback() -{ - last_command="oc_rollback" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--change-scaling-settings") - local_nonpersistent_flags+=("--change-scaling-settings") - flags+=("--change-strategy") - local_nonpersistent_flags+=("--change-strategy") - flags+=("--change-triggers") - local_nonpersistent_flags+=("--change-triggers") - flags+=("--dry-run") - flags+=("-d") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--to-version=") - local_nonpersistent_flags+=("--to-version=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_rollout_cancel() -{ - last_command="oc_rollout_cancel" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - must_have_one_noun+=("deploymentconfig") - noun_aliases=() -} - -_oc_rollout_history() -{ - last_command="oc_rollout_history" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--revision=") - local_nonpersistent_flags+=("--revision=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - must_have_one_noun+=("daemonset") - must_have_one_noun+=("deployment") - must_have_one_noun+=("deploymentconfig") - must_have_one_noun+=("statefulset") - noun_aliases=() -} - -_oc_rollout_latest() -{ - last_command="oc_rollout_latest" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--again") - local_nonpersistent_flags+=("--again") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_rollout_pause() -{ - last_command="oc_rollout_pause" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - must_have_one_noun+=("deployment") - must_have_one_noun+=("deploymentconfig") - noun_aliases=() -} - -_oc_rollout_resume() -{ - last_command="oc_rollout_resume" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - must_have_one_noun+=("deployment") - must_have_one_noun+=("deploymentconfig") - noun_aliases=() -} - -_oc_rollout_retry() -{ - last_command="oc_rollout_retry" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_rollout_status() -{ - last_command="oc_rollout_status" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--revision=") - local_nonpersistent_flags+=("--revision=") - flags+=("--timeout=") - local_nonpersistent_flags+=("--timeout=") - flags+=("--watch") - flags+=("-w") - local_nonpersistent_flags+=("--watch") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - must_have_one_noun+=("daemonset") - must_have_one_noun+=("deployment") - must_have_one_noun+=("statefulset") - noun_aliases=() -} - -_oc_rollout_undo() -{ - last_command="oc_rollout_undo" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--to-revision=") - local_nonpersistent_flags+=("--to-revision=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - must_have_one_noun+=("daemonset") - must_have_one_noun+=("deployment") - must_have_one_noun+=("deploymentconfig") - must_have_one_noun+=("statefulset") - noun_aliases=() -} - -_oc_rollout() -{ - last_command="oc_rollout" - commands=() - commands+=("cancel") - commands+=("history") - commands+=("latest") - commands+=("pause") - commands+=("resume") - commands+=("retry") - commands+=("status") - commands+=("undo") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_rsh() -{ - last_command="oc_rsh" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--container=") - two_word_flags+=("-c") - local_nonpersistent_flags+=("--container=") - flags+=("--no-tty") - flags+=("-T") - local_nonpersistent_flags+=("--no-tty") - flags+=("--shell=") - local_nonpersistent_flags+=("--shell=") - flags+=("--timeout=") - local_nonpersistent_flags+=("--timeout=") - flags+=("--tty") - flags+=("-t") - local_nonpersistent_flags+=("--tty") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_rsync() -{ - last_command="oc_rsync" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--compress") - local_nonpersistent_flags+=("--compress") - flags+=("--container=") - two_word_flags+=("-c") - local_nonpersistent_flags+=("--container=") - flags+=("--delete") - local_nonpersistent_flags+=("--delete") - flags+=("--exclude=") - local_nonpersistent_flags+=("--exclude=") - flags+=("--include=") - local_nonpersistent_flags+=("--include=") - flags+=("--no-perms") - local_nonpersistent_flags+=("--no-perms") - flags+=("--progress") - local_nonpersistent_flags+=("--progress") - flags+=("--quiet") - flags+=("-q") - local_nonpersistent_flags+=("--quiet") - flags+=("--strategy=") - local_nonpersistent_flags+=("--strategy=") - flags+=("--watch") - flags+=("-w") - local_nonpersistent_flags+=("--watch") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_run() -{ - last_command="oc_run" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--attach") - local_nonpersistent_flags+=("--attach") - flags+=("--cascade") - local_nonpersistent_flags+=("--cascade") - flags+=("--command") - local_nonpersistent_flags+=("--command") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--env=") - local_nonpersistent_flags+=("--env=") - flags+=("--expose") - local_nonpersistent_flags+=("--expose") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--force") - local_nonpersistent_flags+=("--force") - flags+=("--generator=") - local_nonpersistent_flags+=("--generator=") - flags+=("--grace-period=") - local_nonpersistent_flags+=("--grace-period=") - flags+=("--hostport=") - local_nonpersistent_flags+=("--hostport=") - flags+=("--image=") - local_nonpersistent_flags+=("--image=") - flags+=("--image-pull-policy=") - local_nonpersistent_flags+=("--image-pull-policy=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--labels=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--labels=") - flags+=("--leave-stdin-open") - local_nonpersistent_flags+=("--leave-stdin-open") - flags+=("--limits=") - local_nonpersistent_flags+=("--limits=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--overrides=") - local_nonpersistent_flags+=("--overrides=") - flags+=("--pod-running-timeout=") - local_nonpersistent_flags+=("--pod-running-timeout=") - flags+=("--port=") - local_nonpersistent_flags+=("--port=") - flags+=("--quiet") - local_nonpersistent_flags+=("--quiet") - flags+=("--record") - local_nonpersistent_flags+=("--record") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--replicas=") - two_word_flags+=("-r") - local_nonpersistent_flags+=("--replicas=") - flags+=("--requests=") - local_nonpersistent_flags+=("--requests=") - flags+=("--restart=") - local_nonpersistent_flags+=("--restart=") - flags+=("--rm") - local_nonpersistent_flags+=("--rm") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--schedule=") - local_nonpersistent_flags+=("--schedule=") - flags+=("--service-generator=") - local_nonpersistent_flags+=("--service-generator=") - flags+=("--service-overrides=") - local_nonpersistent_flags+=("--service-overrides=") - flags+=("--serviceaccount=") - local_nonpersistent_flags+=("--serviceaccount=") - flags+=("--stdin") - flags+=("-i") - local_nonpersistent_flags+=("--stdin") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--timeout=") - local_nonpersistent_flags+=("--timeout=") - flags+=("--tty") - flags+=("-t") - local_nonpersistent_flags+=("--tty") - flags+=("--wait") - local_nonpersistent_flags+=("--wait") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_flag+=("--image=") - must_have_one_noun=() - noun_aliases=() -} - -_oc_scale() -{ - last_command="oc_scale" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--current-replicas=") - local_nonpersistent_flags+=("--current-replicas=") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--record") - local_nonpersistent_flags+=("--record") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--replicas=") - local_nonpersistent_flags+=("--replicas=") - flags+=("--resource-version=") - local_nonpersistent_flags+=("--resource-version=") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--timeout=") - local_nonpersistent_flags+=("--timeout=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_flag+=("--replicas=") - must_have_one_noun=() - must_have_one_noun+=("deployment") - must_have_one_noun+=("deploymentconfig") - must_have_one_noun+=("replicaset") - must_have_one_noun+=("replicationcontroller") - must_have_one_noun+=("statefulset") - noun_aliases=() -} - -_oc_secrets_add() -{ - last_command="oc_secrets_add" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--for=") - local_nonpersistent_flags+=("--for=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_secrets_link() -{ - last_command="oc_secrets_link" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--for=") - local_nonpersistent_flags+=("--for=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_secrets_unlink() -{ - last_command="oc_secrets_unlink" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_secrets() -{ - last_command="oc_secrets" - commands=() - commands+=("add") - commands+=("link") - commands+=("unlink") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_serviceaccounts_create-kubeconfig() -{ - last_command="oc_serviceaccounts_create-kubeconfig" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--with-namespace=") - local_nonpersistent_flags+=("--with-namespace=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_serviceaccounts_get-token() -{ - last_command="oc_serviceaccounts_get-token" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_serviceaccounts_new-token() -{ - last_command="oc_serviceaccounts_new-token" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--labels=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--labels=") - flags+=("--timeout=") - local_nonpersistent_flags+=("--timeout=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_serviceaccounts() -{ - last_command="oc_serviceaccounts" - commands=() - commands+=("create-kubeconfig") - commands+=("get-token") - commands+=("new-token") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_set_build-hook() -{ - last_command="oc_set_build-hook" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--command") - local_nonpersistent_flags+=("--command") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--local") - local_nonpersistent_flags+=("--local") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--post-commit") - local_nonpersistent_flags+=("--post-commit") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--remove") - local_nonpersistent_flags+=("--remove") - flags+=("--script=") - local_nonpersistent_flags+=("--script=") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_set_build-secret() -{ - last_command="oc_set_build-secret" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--local") - local_nonpersistent_flags+=("--local") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--pull") - local_nonpersistent_flags+=("--pull") - flags+=("--push") - local_nonpersistent_flags+=("--push") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--remove") - local_nonpersistent_flags+=("--remove") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--source") - local_nonpersistent_flags+=("--source") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_set_deployment-hook() -{ - last_command="oc_set_deployment-hook" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--container=") - two_word_flags+=("-c") - local_nonpersistent_flags+=("--container=") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--environment=") - two_word_flags+=("-e") - local_nonpersistent_flags+=("--environment=") - flags+=("--failure-policy=") - local_nonpersistent_flags+=("--failure-policy=") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--local") - local_nonpersistent_flags+=("--local") - flags+=("--mid") - local_nonpersistent_flags+=("--mid") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--post") - local_nonpersistent_flags+=("--post") - flags+=("--pre") - local_nonpersistent_flags+=("--pre") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--remove") - local_nonpersistent_flags+=("--remove") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--volumes=") - local_nonpersistent_flags+=("--volumes=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_set_env() -{ - last_command="oc_set_env" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--containers=") - two_word_flags+=("-c") - local_nonpersistent_flags+=("--containers=") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--env=") - two_word_flags+=("-e") - local_nonpersistent_flags+=("--env=") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--from=") - local_nonpersistent_flags+=("--from=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--list") - local_nonpersistent_flags+=("--list") - flags+=("--local") - local_nonpersistent_flags+=("--local") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--overwrite") - local_nonpersistent_flags+=("--overwrite") - flags+=("--prefix=") - local_nonpersistent_flags+=("--prefix=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--resolve") - local_nonpersistent_flags+=("--resolve") - flags+=("--resource-version=") - local_nonpersistent_flags+=("--resource-version=") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_set_image() -{ - last_command="oc_set_image" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--local") - local_nonpersistent_flags+=("--local") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--record") - local_nonpersistent_flags+=("--record") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--source=") - local_nonpersistent_flags+=("--source=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_set_image-lookup() -{ - last_command="oc_set_image-lookup" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--enabled") - local_nonpersistent_flags+=("--enabled") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--list") - local_nonpersistent_flags+=("--list") - flags+=("--local") - local_nonpersistent_flags+=("--local") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_set_probe() -{ - last_command="oc_set_probe" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--containers=") - two_word_flags+=("-c") - local_nonpersistent_flags+=("--containers=") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--failure-threshold=") - local_nonpersistent_flags+=("--failure-threshold=") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--get-url=") - local_nonpersistent_flags+=("--get-url=") - flags+=("--initial-delay-seconds=") - local_nonpersistent_flags+=("--initial-delay-seconds=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--liveness") - local_nonpersistent_flags+=("--liveness") - flags+=("--local") - local_nonpersistent_flags+=("--local") - flags+=("--open-tcp=") - local_nonpersistent_flags+=("--open-tcp=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--period-seconds=") - local_nonpersistent_flags+=("--period-seconds=") - flags+=("--readiness") - local_nonpersistent_flags+=("--readiness") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--remove") - local_nonpersistent_flags+=("--remove") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--success-threshold=") - local_nonpersistent_flags+=("--success-threshold=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--timeout-seconds=") - local_nonpersistent_flags+=("--timeout-seconds=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_set_resources() -{ - last_command="oc_set_resources" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--containers=") - two_word_flags+=("-c") - local_nonpersistent_flags+=("--containers=") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--limits=") - local_nonpersistent_flags+=("--limits=") - flags+=("--local") - local_nonpersistent_flags+=("--local") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--record") - local_nonpersistent_flags+=("--record") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--requests=") - local_nonpersistent_flags+=("--requests=") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_set_route-backends() -{ - last_command="oc_set_route-backends" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--adjust") - local_nonpersistent_flags+=("--adjust") - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--equal") - local_nonpersistent_flags+=("--equal") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--local") - local_nonpersistent_flags+=("--local") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--zero") - local_nonpersistent_flags+=("--zero") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_set_selector() -{ - last_command="oc_set_selector" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--local") - local_nonpersistent_flags+=("--local") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--record") - local_nonpersistent_flags+=("--record") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--resource-version=") - local_nonpersistent_flags+=("--resource-version=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_set_serviceaccount() -{ - last_command="oc_set_serviceaccount" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--local") - local_nonpersistent_flags+=("--local") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--record") - local_nonpersistent_flags+=("--record") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_set_subject() -{ - last_command="oc_set_subject" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--group=") - local_nonpersistent_flags+=("--group=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--local") - local_nonpersistent_flags+=("--local") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--serviceaccount=") - local_nonpersistent_flags+=("--serviceaccount=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_set_triggers() -{ - last_command="oc_set_triggers" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--auto") - local_nonpersistent_flags+=("--auto") - flags+=("--containers=") - two_word_flags+=("-c") - local_nonpersistent_flags+=("--containers=") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--from-bitbucket") - local_nonpersistent_flags+=("--from-bitbucket") - flags+=("--from-config") - local_nonpersistent_flags+=("--from-config") - flags+=("--from-github") - local_nonpersistent_flags+=("--from-github") - flags+=("--from-gitlab") - local_nonpersistent_flags+=("--from-gitlab") - flags+=("--from-image=") - local_nonpersistent_flags+=("--from-image=") - flags+=("--from-webhook") - local_nonpersistent_flags+=("--from-webhook") - flags+=("--from-webhook-allow-env") - local_nonpersistent_flags+=("--from-webhook-allow-env") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--local") - local_nonpersistent_flags+=("--local") - flags+=("--manual") - local_nonpersistent_flags+=("--manual") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--remove") - local_nonpersistent_flags+=("--remove") - flags+=("--remove-all") - local_nonpersistent_flags+=("--remove-all") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_set_volumes() -{ - last_command="oc_set_volumes" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--add") - local_nonpersistent_flags+=("--add") - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--claim-class=") - local_nonpersistent_flags+=("--claim-class=") - flags+=("--claim-mode=") - local_nonpersistent_flags+=("--claim-mode=") - flags+=("--claim-name=") - local_nonpersistent_flags+=("--claim-name=") - flags+=("--claim-size=") - local_nonpersistent_flags+=("--claim-size=") - flags+=("--configmap-name=") - local_nonpersistent_flags+=("--configmap-name=") - flags+=("--confirm") - local_nonpersistent_flags+=("--confirm") - flags+=("--containers=") - two_word_flags+=("-c") - local_nonpersistent_flags+=("--containers=") - flags+=("--default-mode=") - local_nonpersistent_flags+=("--default-mode=") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--local") - local_nonpersistent_flags+=("--local") - flags+=("--mount-path=") - two_word_flags+=("-m") - local_nonpersistent_flags+=("--mount-path=") - flags+=("--name=") - local_nonpersistent_flags+=("--name=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--overwrite") - local_nonpersistent_flags+=("--overwrite") - flags+=("--path=") - local_nonpersistent_flags+=("--path=") - flags+=("--read-only") - local_nonpersistent_flags+=("--read-only") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--remove") - local_nonpersistent_flags+=("--remove") - flags+=("--secret-name=") - local_nonpersistent_flags+=("--secret-name=") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--source=") - local_nonpersistent_flags+=("--source=") - flags+=("--sub-path=") - local_nonpersistent_flags+=("--sub-path=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--type=") - two_word_flags+=("-t") - local_nonpersistent_flags+=("--type=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_set() -{ - last_command="oc_set" - commands=() - commands+=("build-hook") - commands+=("build-secret") - commands+=("deployment-hook") - commands+=("env") - commands+=("image") - commands+=("image-lookup") - commands+=("probe") - commands+=("resources") - commands+=("route-backends") - commands+=("selector") - commands+=("serviceaccount") - commands+=("subject") - commands+=("triggers") - commands+=("volumes") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_start-build() -{ - last_command="oc_start-build" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--build-arg=") - local_nonpersistent_flags+=("--build-arg=") - flags+=("--build-loglevel=") - local_nonpersistent_flags+=("--build-loglevel=") - flags+=("--commit=") - local_nonpersistent_flags+=("--commit=") - flags+=("--env=") - two_word_flags+=("-e") - local_nonpersistent_flags+=("--env=") - flags+=("--follow") - flags+=("-F") - local_nonpersistent_flags+=("--follow") - flags+=("--from-archive=") - local_nonpersistent_flags+=("--from-archive=") - flags+=("--from-build=") - local_nonpersistent_flags+=("--from-build=") - flags+=("--from-dir=") - local_nonpersistent_flags+=("--from-dir=") - flags+=("--from-file=") - local_nonpersistent_flags+=("--from-file=") - flags+=("--from-repo=") - local_nonpersistent_flags+=("--from-repo=") - flags+=("--from-webhook=") - local_nonpersistent_flags+=("--from-webhook=") - flags+=("--git-post-receive=") - local_nonpersistent_flags+=("--git-post-receive=") - flags+=("--git-repository=") - local_nonpersistent_flags+=("--git-repository=") - flags+=("--incremental") - local_nonpersistent_flags+=("--incremental") - flags+=("--list-webhooks=") - local_nonpersistent_flags+=("--list-webhooks=") - flags+=("--no-cache") - local_nonpersistent_flags+=("--no-cache") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--wait") - flags+=("-w") - local_nonpersistent_flags+=("--wait") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_status() -{ - last_command="oc_status" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all-namespaces") - flags+=("-A") - local_nonpersistent_flags+=("--all-namespaces") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--suggest") - local_nonpersistent_flags+=("--suggest") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_tag() -{ - last_command="oc_tag" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--alias") - local_nonpersistent_flags+=("--alias") - flags+=("--delete") - flags+=("-d") - local_nonpersistent_flags+=("--delete") - flags+=("--insecure") - local_nonpersistent_flags+=("--insecure") - flags+=("--reference") - local_nonpersistent_flags+=("--reference") - flags+=("--reference-policy=") - local_nonpersistent_flags+=("--reference-policy=") - flags+=("--scheduled") - local_nonpersistent_flags+=("--scheduled") - flags+=("--source=") - local_nonpersistent_flags+=("--source=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_version() -{ - last_command="oc_version" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--client") - local_nonpersistent_flags+=("--client") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--short") - local_nonpersistent_flags+=("--short") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_wait() -{ - last_command="oc_wait" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--all-namespaces") - flags+=("-A") - local_nonpersistent_flags+=("--all-namespaces") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--field-selector=") - local_nonpersistent_flags+=("--field-selector=") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--for=") - local_nonpersistent_flags+=("--for=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--timeout=") - local_nonpersistent_flags+=("--timeout=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_whoami() -{ - last_command="oc_whoami" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--show-console") - local_nonpersistent_flags+=("--show-console") - flags+=("--show-context") - flags+=("-c") - local_nonpersistent_flags+=("--show-context") - flags+=("--show-server") - local_nonpersistent_flags+=("--show-server") - flags+=("--show-token") - flags+=("-t") - local_nonpersistent_flags+=("--show-token") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_root_command() -{ - last_command="oc" - commands=() - commands+=("adm") - commands+=("annotate") - commands+=("api-resources") - commands+=("api-versions") - commands+=("apply") - commands+=("attach") - commands+=("auth") - commands+=("autoscale") - commands+=("cancel-build") - commands+=("cluster-info") - commands+=("completion") - commands+=("config") - commands+=("convert") - commands+=("cp") - commands+=("create") - commands+=("debug") - commands+=("delete") - commands+=("describe") - commands+=("diff") - commands+=("edit") - commands+=("ex") - commands+=("exec") - commands+=("explain") - commands+=("expose") - commands+=("extract") - commands+=("get") - commands+=("idle") - commands+=("image") - commands+=("import-image") - commands+=("kustomize") - commands+=("label") - commands+=("login") - commands+=("logout") - commands+=("logs") - commands+=("new-app") - commands+=("new-build") - commands+=("new-project") - commands+=("observe") - commands+=("options") - commands+=("patch") - commands+=("plugin") - commands+=("policy") - commands+=("port-forward") - commands+=("process") - commands+=("project") - commands+=("projects") - commands+=("proxy") - commands+=("registry") - commands+=("replace") - commands+=("rollback") - commands+=("rollout") - commands+=("rsh") - commands+=("rsync") - commands+=("run") - commands+=("scale") - commands+=("secrets") - commands+=("serviceaccounts") - commands+=("set") - commands+=("start-build") - commands+=("status") - commands+=("tag") - commands+=("version") - commands+=("wait") - commands+=("whoami") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -__start_oc() -{ - local cur prev words cword - declare -A flaghash 2>/dev/null || : - if declare -F _init_completion >/dev/null 2>&1; then - _init_completion -s || return - else - __oc_init_completion -n "=" || return - fi - - local c=0 - local flags=() - local two_word_flags=() - local local_nonpersistent_flags=() - local flags_with_completion=() - local flags_completion=() - local commands=("oc") - local must_have_one_flag=() - local must_have_one_noun=() - local last_command - local nouns=() - - __oc_handle_word -} - -if [[ $(type -t compopt) = "builtin" ]]; then - complete -o default -F __start_oc oc -else - complete -o default -o nospace -F __start_oc oc -fi - -# ex: ts=4 sw=4 et filetype=sh diff --git a/vendor/github.com/openshift/oc/contrib/completions/zsh/.files_generated b/vendor/github.com/openshift/oc/contrib/completions/zsh/.files_generated deleted file mode 100644 index 415f1771963f..000000000000 --- a/vendor/github.com/openshift/oc/contrib/completions/zsh/.files_generated +++ /dev/null @@ -1 +0,0 @@ -oc diff --git a/vendor/github.com/openshift/oc/contrib/completions/zsh/oc b/vendor/github.com/openshift/oc/contrib/completions/zsh/oc deleted file mode 100644 index 9bc00e23a567..000000000000 --- a/vendor/github.com/openshift/oc/contrib/completions/zsh/oc +++ /dev/null @@ -1,15018 +0,0 @@ -#compdef kubectl - - -__kubectl_bash_source() { - alias shopt=':' - alias _expand=_bash_expand - alias _complete=_bash_comp - emulate -L sh - setopt kshglob noshglob braceexpand - - source "$@" -} - -__kubectl_type() { - # -t is not supported by zsh - if [ "$1" == "-t" ]; then - shift - - # fake Bash 4 to disable "complete -o nospace". Instead - # "compopt +-o nospace" is used in the code to toggle trailing - # spaces. We don't support that, but leave trailing spaces on - # all the time - if [ "$1" = "__kubectl_compopt" ]; then - echo builtin - return 0 - fi - fi - type "$@" -} - -__kubectl_compgen() { - local completions w - completions=( $(compgen "$@") ) || return $? - - # filter by given word as prefix - while [[ "$1" = -* && "$1" != -- ]]; do - shift - shift - done - if [[ "$1" == -- ]]; then - shift - fi - for w in "${completions[@]}"; do - if [[ "${w}" = "$1"* ]]; then - echo "${w}" - fi - done -} - -__kubectl_compopt() { - true # don't do anything. Not supported by bashcompinit in zsh -} - -__kubectl_ltrim_colon_completions() -{ - if [[ "$1" == *:* && "$COMP_WORDBREAKS" == *:* ]]; then - # Remove colon-word prefix from COMPREPLY items - local colon_word=${1%${1##*:}} - local i=${#COMPREPLY[*]} - while [[ $((--i)) -ge 0 ]]; do - COMPREPLY[$i]=${COMPREPLY[$i]#"$colon_word"} - done - fi -} - -__kubectl_get_comp_words_by_ref() { - cur="${COMP_WORDS[COMP_CWORD]}" - prev="${COMP_WORDS[${COMP_CWORD}-1]}" - words=("${COMP_WORDS[@]}") - cword=("${COMP_CWORD[@]}") -} - -__kubectl_filedir() { - local RET OLD_IFS w qw - - __kubectl_debug "_filedir $@ cur=$cur" - if [[ "$1" = \~* ]]; then - # somehow does not work. Maybe, zsh does not call this at all - eval echo "$1" - return 0 - fi - - OLD_IFS="$IFS" - IFS=$'\n' - if [ "$1" = "-d" ]; then - shift - RET=( $(compgen -d) ) - else - RET=( $(compgen -f) ) - fi - IFS="$OLD_IFS" - - IFS="," __kubectl_debug "RET=${RET[@]} len=${#RET[@]}" - - for w in ${RET[@]}; do - if [[ ! "${w}" = "${cur}"* ]]; then - continue - fi - if eval "[[ \"\${w}\" = *.$1 || -d \"\${w}\" ]]"; then - qw="$(__kubectl_quote "${w}")" - if [ -d "${w}" ]; then - COMPREPLY+=("${qw}/") - else - COMPREPLY+=("${qw}") - fi - fi - done -} - -__kubectl_quote() { - if [[ $1 == \'* || $1 == \"* ]]; then - # Leave out first character - printf %q "${1:1}" - else - printf %q "$1" - fi -} - -autoload -U +X bashcompinit && bashcompinit - -# use word boundary patterns for BSD or GNU sed -LWORD='[[:<:]]' -RWORD='[[:>:]]' -if sed --help 2>&1 | grep -q GNU; then - LWORD='\<' - RWORD='\>' -fi - -__kubectl_convert_bash_to_zsh() { - sed \ - -e 's/declare -F/whence -w/' \ - -e 's/_get_comp_words_by_ref "\$@"/_get_comp_words_by_ref "\$*"/' \ - -e 's/local \([a-zA-Z0-9_]*\)=/local \1; \1=/' \ - -e 's/flags+=("\(--.*\)=")/flags+=("\1"); two_word_flags+=("\1")/' \ - -e 's/must_have_one_flag+=("\(--.*\)=")/must_have_one_flag+=("\1")/' \ - -e "s/${LWORD}_filedir${RWORD}/__kubectl_filedir/g" \ - -e "s/${LWORD}_get_comp_words_by_ref${RWORD}/__kubectl_get_comp_words_by_ref/g" \ - -e "s/${LWORD}__ltrim_colon_completions${RWORD}/__kubectl_ltrim_colon_completions/g" \ - -e "s/${LWORD}compgen${RWORD}/__kubectl_compgen/g" \ - -e "s/${LWORD}compopt${RWORD}/__kubectl_compopt/g" \ - -e "s/${LWORD}declare${RWORD}/builtin declare/g" \ - -e "s/\\\$(type${RWORD}/\$(__kubectl_type/g" \ - <<'BASH_COMPLETION_EOF' -# bash completion for oc -*- shell-script -*- - -__oc_debug() -{ - if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then - echo "$*" >> "${BASH_COMP_DEBUG_FILE}" - fi -} - -# Homebrew on Macs have version 1.3 of bash-completion which doesn't include -# _init_completion. This is a very minimal version of that function. -__oc_init_completion() -{ - COMPREPLY=() - _get_comp_words_by_ref "$@" cur prev words cword -} - -__oc_index_of_word() -{ - local w word=$1 - shift - index=0 - for w in "$@"; do - [[ $w = "$word" ]] && return - index=$((index+1)) - done - index=-1 -} - -__oc_contains_word() -{ - local w word=$1; shift - for w in "$@"; do - [[ $w = "$word" ]] && return - done - return 1 -} - -__oc_handle_reply() -{ - __oc_debug "${FUNCNAME[0]}" - case $cur in - -*) - if [[ $(type -t compopt) = "builtin" ]]; then - compopt -o nospace - fi - local allflags - if [ ${#must_have_one_flag[@]} -ne 0 ]; then - allflags=("${must_have_one_flag[@]}") - else - allflags=("${flags[*]} ${two_word_flags[*]}") - fi - COMPREPLY=( $(compgen -W "${allflags[*]}" -- "$cur") ) - if [[ $(type -t compopt) = "builtin" ]]; then - [[ "${COMPREPLY[0]}" == *= ]] || compopt +o nospace - fi - - # complete after --flag=abc - if [[ $cur == *=* ]]; then - if [[ $(type -t compopt) = "builtin" ]]; then - compopt +o nospace - fi - - local index flag - flag="${cur%=*}" - __oc_index_of_word "${flag}" "${flags_with_completion[@]}" - COMPREPLY=() - if [[ ${index} -ge 0 ]]; then - PREFIX="" - cur="${cur#*=}" - ${flags_completion[${index}]} - if [ -n "${ZSH_VERSION}" ]; then - # zsh completion needs --flag= prefix - eval "COMPREPLY=( \"\${COMPREPLY[@]/#/${flag}=}\" )" - fi - fi - fi - return 0; - ;; - esac - - # check if we are handling a flag with special work handling - local index - __oc_index_of_word "${prev}" "${flags_with_completion[@]}" - if [[ ${index} -ge 0 ]]; then - ${flags_completion[${index}]} - return - fi - - # we are parsing a flag and don't have a special handler, no completion - if [[ ${cur} != "${words[cword]}" ]]; then - return - fi - - local completions - completions=("${commands[@]}") - if [[ ${#must_have_one_noun[@]} -ne 0 ]]; then - completions=("${must_have_one_noun[@]}") - fi - if [[ ${#must_have_one_flag[@]} -ne 0 ]]; then - completions+=("${must_have_one_flag[@]}") - fi - COMPREPLY=( $(compgen -W "${completions[*]}" -- "$cur") ) - - if [[ ${#COMPREPLY[@]} -eq 0 && ${#noun_aliases[@]} -gt 0 && ${#must_have_one_noun[@]} -ne 0 ]]; then - COMPREPLY=( $(compgen -W "${noun_aliases[*]}" -- "$cur") ) - fi - - if [[ ${#COMPREPLY[@]} -eq 0 ]]; then - declare -F __custom_func >/dev/null && __custom_func - fi - - # available in bash-completion >= 2, not always present on macOS - if declare -F __ltrim_colon_completions >/dev/null; then - __ltrim_colon_completions "$cur" - fi - - # If there is only 1 completion and it is a flag with an = it will be completed - # but we don't want a space after the = - if [[ "${#COMPREPLY[@]}" -eq "1" ]] && [[ $(type -t compopt) = "builtin" ]] && [[ "${COMPREPLY[0]}" == --*= ]]; then - compopt -o nospace - fi -} - -# The arguments should be in the form "ext1|ext2|extn" -__oc_handle_filename_extension_flag() -{ - local ext="$1" - _filedir "@(${ext})" -} - -__oc_handle_subdirs_in_dir_flag() -{ - local dir="$1" - pushd "${dir}" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 -} - -__oc_handle_flag() -{ - __oc_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" - - # if a command required a flag, and we found it, unset must_have_one_flag() - local flagname=${words[c]} - local flagvalue - # if the word contained an = - if [[ ${words[c]} == *"="* ]]; then - flagvalue=${flagname#*=} # take in as flagvalue after the = - flagname=${flagname%=*} # strip everything after the = - flagname="${flagname}=" # but put the = back - fi - __oc_debug "${FUNCNAME[0]}: looking for ${flagname}" - if __oc_contains_word "${flagname}" "${must_have_one_flag[@]}"; then - must_have_one_flag=() - fi - - # if you set a flag which only applies to this command, don't show subcommands - if __oc_contains_word "${flagname}" "${local_nonpersistent_flags[@]}"; then - commands=() - fi - - # keep flag value with flagname as flaghash - # flaghash variable is an associative array which is only supported in bash > 3. - if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then - if [ -n "${flagvalue}" ] ; then - flaghash[${flagname}]=${flagvalue} - elif [ -n "${words[ $((c+1)) ]}" ] ; then - flaghash[${flagname}]=${words[ $((c+1)) ]} - else - flaghash[${flagname}]="true" # pad "true" for bool flag - fi - fi - - # skip the argument to a two word flag - if __oc_contains_word "${words[c]}" "${two_word_flags[@]}"; then - c=$((c+1)) - # if we are looking for a flags value, don't show commands - if [[ $c -eq $cword ]]; then - commands=() - fi - fi - - c=$((c+1)) - -} - -__oc_handle_noun() -{ - __oc_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" - - if __oc_contains_word "${words[c]}" "${must_have_one_noun[@]}"; then - must_have_one_noun=() - elif __oc_contains_word "${words[c]}" "${noun_aliases[@]}"; then - must_have_one_noun=() - fi - - nouns+=("${words[c]}") - c=$((c+1)) -} - -__oc_handle_command() -{ - __oc_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" - - local next_command - if [[ -n ${last_command} ]]; then - next_command="_${last_command}_${words[c]//:/__}" - else - if [[ $c -eq 0 ]]; then - next_command="_oc_root_command" - else - next_command="_${words[c]//:/__}" - fi - fi - c=$((c+1)) - __oc_debug "${FUNCNAME[0]}: looking for ${next_command}" - declare -F "$next_command" >/dev/null && $next_command -} - -__oc_handle_word() -{ - if [[ $c -ge $cword ]]; then - __oc_handle_reply - return - fi - __oc_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" - if [[ "${words[c]}" == -* ]]; then - __oc_handle_flag - elif __oc_contains_word "${words[c]}" "${commands[@]}"; then - __oc_handle_command - elif [[ $c -eq 0 ]]; then - __oc_handle_command - else - __oc_handle_noun - fi - __oc_handle_word -} - -# call oc get $1, -__oc_override_flag_list=(config cluster user context namespace server) -__oc_override_flags() -{ - local ${__oc_override_flag_list[*]} two_word_of of - for w in "${words[@]}"; do - if [ -n "${two_word_of}" ]; then - eval "${two_word_of}=\"--${two_word_of}=\${w}\"" - two_word_of= - continue - fi - for of in "${__oc_override_flag_list[@]}"; do - case "${w}" in - --${of}=*) - eval "${of}=\"${w}\"" - ;; - --${of}) - two_word_of="${of}" - ;; - esac - done - done - for of in "${__oc_override_flag_list[@]}"; do - if eval "test -n \"\$${of}\""; then - eval "echo \${${of}}" - fi - done -} -__oc_parse_get() -{ - - local template - template="{{ range .items }}{{ .metadata.name }} {{ end }}" - local oc_out - if oc_out=$(oc get $(__oc_override_flags) -o template --template="${template}" "$1" 2>/dev/null); then - COMPREPLY=( $( compgen -W "${oc_out[*]}" -- "$cur" ) ) - fi -} - -__oc_get_namespaces() -{ - local template oc_out - template="{{ range .items }}{{ .metadata.name }} {{ end }}" - if oc_out=$(oc get -o template --template="${template}" namespace 2>/dev/null); then - COMPREPLY=( $( compgen -W "${oc_out[*]}" -- "$cur" ) ) - fi -} - -__oc_get_resource() -{ - if [[ ${#nouns[@]} -eq 0 ]]; then - local oc_out - if oc_out=$(oc api-resources $(__oc_override_flags) -o name --cached --request-timeout=5s --verbs=get 2>/dev/null); then - COMPREPLY=( $( compgen -W "${oc_out[*]}" -- "$cur" ) ) - return 0 - fi - return 1 - fi - __oc_parse_get "${nouns[${#nouns[@]} -1]}" -} - -# $1 is the name of the pod we want to get the list of containers inside -__oc_get_containers() -{ - local template - template="{{ range .spec.containers }}{{ .name }} {{ end }}" - __oc_debug "${FUNCNAME} nouns are ${nouns[@]}" - - local len="${#nouns[@]}" - if [[ ${len} -ne 1 ]]; then - return - fi - local last=${nouns[${len} -1]} - local oc_out - if oc_out=$(oc get -o template --template="${template}" pods "${last}" 2>/dev/null); then - COMPREPLY=( $( compgen -W "${oc_out[*]}" -- "$cur" ) ) - fi -} - -# Require both a pod and a container to be specified -__oc_require_pod_and_container() -{ - if [[ ${#nouns[@]} -eq 0 ]]; then - __oc_parse_get pods - return 0 - fi; - __oc_get_containers - return 0 -} - -__custom_func() { - case ${last_command} in - - # first arg is the kind according to ValidArgs, second is resource name - oc_get | oc_describe | oc_delete | oc_label | oc_expose | oc_export | oc_patch | oc_annotate | oc_edit | oc_scale | oc_autoscale | oc_observe ) - __oc_get_resource - return - ;; - - # first arg is a pod name - oc_rsh | oc_exec | oc_port-forward | oc_attach) - if [[ ${#nouns[@]} -eq 0 ]]; then - __oc_parse_get pods - fi; - return - ;; - - # first arg is a pod name, second is a container name - oc_logs) - __oc_require_pod_and_container - return - ;; - - # first arg is a build config name - oc_start-build | oc_cancel-build) - if [[ ${#nouns[@]} -eq 0 ]]; then - __oc_parse_get buildconfigs - fi; - return - ;; - - # first arg is a deployment config OR deployment - oc_rollback) - if [[ ${#nouns[@]} -eq 0 ]]; then - __oc_parse_get deploymentconfigs,replicationcontrollers - fi; - return - ;; - - # first arg is a project name - oc_project) - if [[ ${#nouns[@]} -eq 0 ]]; then - __oc_parse_get projects - fi; - return - ;; - - # first arg is an image stream - oc_import-image) - if [[ ${#nouns[@]} -eq 0 ]]; then - __oc_parse_get imagestreams - fi; - return - ;; - - *) - ;; - esac -} - -_oc_adm_build-chain() -{ - last_command="oc_adm_build-chain" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--reverse") - local_nonpersistent_flags+=("--reverse") - flags+=("--trigger-only") - local_nonpersistent_flags+=("--trigger-only") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_certificate_approve() -{ - last_command="oc_adm_certificate_approve" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--force") - local_nonpersistent_flags+=("--force") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_certificate_deny() -{ - last_command="oc_adm_certificate_deny" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--force") - local_nonpersistent_flags+=("--force") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_certificate() -{ - last_command="oc_adm_certificate" - commands=() - commands+=("approve") - commands+=("deny") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_completion() -{ - last_command="oc_adm_completion" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - must_have_one_noun+=("bash") - must_have_one_noun+=("zsh") - noun_aliases=() -} - -_oc_adm_config_current-context() -{ - last_command="oc_adm_config_current-context" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_config_delete-cluster() -{ - last_command="oc_adm_config_delete-cluster" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_config_delete-context() -{ - last_command="oc_adm_config_delete-context" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_config_get-clusters() -{ - last_command="oc_adm_config_get-clusters" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_config_get-contexts() -{ - last_command="oc_adm_config_get-contexts" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--no-headers") - local_nonpersistent_flags+=("--no-headers") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_config_rename-context() -{ - last_command="oc_adm_config_rename-context" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_config_set() -{ - last_command="oc_adm_config_set" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--set-raw-bytes") - local_nonpersistent_flags+=("--set-raw-bytes") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_config_set-cluster() -{ - last_command="oc_adm_config_set-cluster" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--embed-certs") - local_nonpersistent_flags+=("--embed-certs") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_config_set-context() -{ - last_command="oc_adm_config_set-context" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--current") - local_nonpersistent_flags+=("--current") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_config_set-credentials() -{ - last_command="oc_adm_config_set-credentials" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--auth-provider=") - local_nonpersistent_flags+=("--auth-provider=") - flags+=("--auth-provider-arg=") - local_nonpersistent_flags+=("--auth-provider-arg=") - flags+=("--embed-certs") - local_nonpersistent_flags+=("--embed-certs") - flags+=("--password=") - local_nonpersistent_flags+=("--password=") - flags+=("--username=") - local_nonpersistent_flags+=("--username=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_config_unset() -{ - last_command="oc_adm_config_unset" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_config_use-context() -{ - last_command="oc_adm_config_use-context" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_config_view() -{ - last_command="oc_adm_config_view" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--flatten") - local_nonpersistent_flags+=("--flatten") - flags+=("--merge") - local_nonpersistent_flags+=("--merge") - flags+=("--minify") - local_nonpersistent_flags+=("--minify") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--raw") - local_nonpersistent_flags+=("--raw") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_config() -{ - last_command="oc_adm_config" - commands=() - commands+=("current-context") - commands+=("delete-cluster") - commands+=("delete-context") - commands+=("get-clusters") - commands+=("get-contexts") - commands+=("rename-context") - commands+=("set") - commands+=("set-cluster") - commands+=("set-context") - commands+=("set-credentials") - commands+=("unset") - commands+=("use-context") - commands+=("view") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_cordon() -{ - last_command="oc_adm_cordon" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_create-bootstrap-project-template() -{ - last_command="oc_adm_create-bootstrap-project-template" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--name=") - local_nonpersistent_flags+=("--name=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_create-error-template() -{ - last_command="oc_adm_create-error-template" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_create-kubeconfig() -{ - last_command="oc_adm_create-kubeconfig" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--master=") - local_nonpersistent_flags+=("--master=") - flags+=("--public-master=") - local_nonpersistent_flags+=("--public-master=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_create-login-template() -{ - last_command="oc_adm_create-login-template" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_create-provider-selection-template() -{ - last_command="oc_adm_create-provider-selection-template" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_drain() -{ - last_command="oc_adm_drain" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--delete-local-data") - local_nonpersistent_flags+=("--delete-local-data") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--force") - local_nonpersistent_flags+=("--force") - flags+=("--grace-period=") - local_nonpersistent_flags+=("--grace-period=") - flags+=("--ignore-daemonsets") - local_nonpersistent_flags+=("--ignore-daemonsets") - flags+=("--pod-selector=") - local_nonpersistent_flags+=("--pod-selector=") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--timeout=") - local_nonpersistent_flags+=("--timeout=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_groups_add-users() -{ - last_command="oc_adm_groups_add-users" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_groups_new() -{ - last_command="oc_adm_groups_new" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_groups_prune() -{ - last_command="oc_adm_groups_prune" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--blacklist=") - flags_with_completion+=("--blacklist") - flags_completion+=("__oc_handle_filename_extension_flag txt") - local_nonpersistent_flags+=("--blacklist=") - flags+=("--confirm") - local_nonpersistent_flags+=("--confirm") - flags+=("--sync-config=") - flags_with_completion+=("--sync-config") - flags_completion+=("__oc_handle_filename_extension_flag yaml|yml") - local_nonpersistent_flags+=("--sync-config=") - flags+=("--whitelist=") - flags_with_completion+=("--whitelist") - flags_completion+=("__oc_handle_filename_extension_flag txt") - local_nonpersistent_flags+=("--whitelist=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_groups_remove-users() -{ - last_command="oc_adm_groups_remove-users" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_groups_sync() -{ - last_command="oc_adm_groups_sync" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--blacklist=") - flags_with_completion+=("--blacklist") - flags_completion+=("__oc_handle_filename_extension_flag txt") - local_nonpersistent_flags+=("--blacklist=") - flags+=("--confirm") - local_nonpersistent_flags+=("--confirm") - flags+=("--sync-config=") - flags_with_completion+=("--sync-config") - flags_completion+=("__oc_handle_filename_extension_flag yaml|yml") - local_nonpersistent_flags+=("--sync-config=") - flags+=("--type=") - local_nonpersistent_flags+=("--type=") - flags+=("--whitelist=") - flags_with_completion+=("--whitelist") - flags_completion+=("__oc_handle_filename_extension_flag txt") - local_nonpersistent_flags+=("--whitelist=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_groups() -{ - last_command="oc_adm_groups" - commands=() - commands+=("add-users") - commands+=("new") - commands+=("prune") - commands+=("remove-users") - commands+=("sync") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_migrate_etcd-ttl() -{ - last_command="oc_adm_migrate_etcd-ttl" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--cacert=") - local_nonpersistent_flags+=("--cacert=") - flags+=("--cert=") - local_nonpersistent_flags+=("--cert=") - flags+=("--etcd-address=") - local_nonpersistent_flags+=("--etcd-address=") - flags+=("--key=") - local_nonpersistent_flags+=("--key=") - flags+=("--lease-duration=") - local_nonpersistent_flags+=("--lease-duration=") - flags+=("--ttl-keys-prefix=") - local_nonpersistent_flags+=("--ttl-keys-prefix=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_migrate_image-references() -{ - last_command="oc_adm_migrate_image-references" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all-namespaces") - flags+=("-A") - local_nonpersistent_flags+=("--all-namespaces") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--confirm") - local_nonpersistent_flags+=("--confirm") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--from-key=") - local_nonpersistent_flags+=("--from-key=") - flags+=("--include=") - local_nonpersistent_flags+=("--include=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--to-key=") - local_nonpersistent_flags+=("--to-key=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_migrate_legacy-hpa() -{ - last_command="oc_adm_migrate_legacy-hpa" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all-namespaces") - flags+=("-A") - local_nonpersistent_flags+=("--all-namespaces") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--confirm") - local_nonpersistent_flags+=("--confirm") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--from-key=") - local_nonpersistent_flags+=("--from-key=") - flags+=("--include=") - local_nonpersistent_flags+=("--include=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--to-key=") - local_nonpersistent_flags+=("--to-key=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_migrate_storage() -{ - last_command="oc_adm_migrate_storage" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all-namespaces") - flags+=("-A") - local_nonpersistent_flags+=("--all-namespaces") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--bandwidth=") - local_nonpersistent_flags+=("--bandwidth=") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--from-key=") - local_nonpersistent_flags+=("--from-key=") - flags+=("--include=") - local_nonpersistent_flags+=("--include=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--to-key=") - local_nonpersistent_flags+=("--to-key=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_migrate_template-instances() -{ - last_command="oc_adm_migrate_template-instances" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all-namespaces") - flags+=("-A") - local_nonpersistent_flags+=("--all-namespaces") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--confirm") - local_nonpersistent_flags+=("--confirm") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--from-key=") - local_nonpersistent_flags+=("--from-key=") - flags+=("--include=") - local_nonpersistent_flags+=("--include=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--to-key=") - local_nonpersistent_flags+=("--to-key=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_migrate() -{ - last_command="oc_adm_migrate" - commands=() - commands+=("etcd-ttl") - commands+=("image-references") - commands+=("legacy-hpa") - commands+=("storage") - commands+=("template-instances") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_must-gather() -{ - last_command="oc_adm_must-gather" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--dest-dir=") - local_nonpersistent_flags+=("--dest-dir=") - flags+=("--image=") - local_nonpersistent_flags+=("--image=") - flags+=("--node-name=") - local_nonpersistent_flags+=("--node-name=") - flags+=("--source-dir=") - local_nonpersistent_flags+=("--source-dir=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_new-project() -{ - last_command="oc_adm_new-project" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--admin=") - local_nonpersistent_flags+=("--admin=") - flags+=("--admin-role=") - local_nonpersistent_flags+=("--admin-role=") - flags+=("--description=") - local_nonpersistent_flags+=("--description=") - flags+=("--display-name=") - local_nonpersistent_flags+=("--display-name=") - flags+=("--node-selector=") - local_nonpersistent_flags+=("--node-selector=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_node-logs() -{ - last_command="oc_adm_node-logs" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--case-sensitive") - local_nonpersistent_flags+=("--case-sensitive") - flags+=("--grep=") - two_word_flags+=("-g") - local_nonpersistent_flags+=("--grep=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--path=") - local_nonpersistent_flags+=("--path=") - flags+=("--raw") - local_nonpersistent_flags+=("--raw") - flags+=("--role=") - local_nonpersistent_flags+=("--role=") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--since=") - local_nonpersistent_flags+=("--since=") - flags+=("--tail=") - local_nonpersistent_flags+=("--tail=") - flags+=("--unify") - local_nonpersistent_flags+=("--unify") - flags+=("--unit=") - two_word_flags+=("-u") - local_nonpersistent_flags+=("--unit=") - flags+=("--until=") - local_nonpersistent_flags+=("--until=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_options() -{ - last_command="oc_adm_options" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_pod-network_isolate-projects() -{ - last_command="oc_adm_pod-network_isolate-projects" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--selector=") - local_nonpersistent_flags+=("--selector=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_pod-network_join-projects() -{ - last_command="oc_adm_pod-network_join-projects" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--selector=") - local_nonpersistent_flags+=("--selector=") - flags+=("--to=") - local_nonpersistent_flags+=("--to=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_pod-network_make-projects-global() -{ - last_command="oc_adm_pod-network_make-projects-global" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--selector=") - local_nonpersistent_flags+=("--selector=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_pod-network() -{ - last_command="oc_adm_pod-network" - commands=() - commands+=("isolate-projects") - commands+=("join-projects") - commands+=("make-projects-global") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_policy_add-cluster-role-to-group() -{ - last_command="oc_adm_policy_add-cluster-role-to-group" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--rolebinding-name=") - local_nonpersistent_flags+=("--rolebinding-name=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_policy_add-cluster-role-to-user() -{ - last_command="oc_adm_policy_add-cluster-role-to-user" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--rolebinding-name=") - local_nonpersistent_flags+=("--rolebinding-name=") - flags+=("--serviceaccount=") - two_word_flags+=("-z") - local_nonpersistent_flags+=("--serviceaccount=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_policy_add-role-to-group() -{ - last_command="oc_adm_policy_add-role-to-group" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--role-namespace=") - local_nonpersistent_flags+=("--role-namespace=") - flags+=("--rolebinding-name=") - local_nonpersistent_flags+=("--rolebinding-name=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_policy_add-role-to-user() -{ - last_command="oc_adm_policy_add-role-to-user" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--role-namespace=") - local_nonpersistent_flags+=("--role-namespace=") - flags+=("--rolebinding-name=") - local_nonpersistent_flags+=("--rolebinding-name=") - flags+=("--serviceaccount=") - two_word_flags+=("-z") - local_nonpersistent_flags+=("--serviceaccount=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_policy_add-scc-to-group() -{ - last_command="oc_adm_policy_add-scc-to-group" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_policy_add-scc-to-user() -{ - last_command="oc_adm_policy_add-scc-to-user" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--serviceaccount=") - two_word_flags+=("-z") - local_nonpersistent_flags+=("--serviceaccount=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_policy_remove-cluster-role-from-group() -{ - last_command="oc_adm_policy_remove-cluster-role-from-group" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--rolebinding-name=") - local_nonpersistent_flags+=("--rolebinding-name=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_policy_remove-cluster-role-from-user() -{ - last_command="oc_adm_policy_remove-cluster-role-from-user" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--rolebinding-name=") - local_nonpersistent_flags+=("--rolebinding-name=") - flags+=("--serviceaccount=") - two_word_flags+=("-z") - local_nonpersistent_flags+=("--serviceaccount=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_policy_remove-group() -{ - last_command="oc_adm_policy_remove-group" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_policy_remove-role-from-group() -{ - last_command="oc_adm_policy_remove-role-from-group" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--role-namespace=") - local_nonpersistent_flags+=("--role-namespace=") - flags+=("--rolebinding-name=") - local_nonpersistent_flags+=("--rolebinding-name=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_policy_remove-role-from-user() -{ - last_command="oc_adm_policy_remove-role-from-user" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--role-namespace=") - local_nonpersistent_flags+=("--role-namespace=") - flags+=("--rolebinding-name=") - local_nonpersistent_flags+=("--rolebinding-name=") - flags+=("--serviceaccount=") - two_word_flags+=("-z") - local_nonpersistent_flags+=("--serviceaccount=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_policy_remove-scc-from-group() -{ - last_command="oc_adm_policy_remove-scc-from-group" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_policy_remove-scc-from-user() -{ - last_command="oc_adm_policy_remove-scc-from-user" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--serviceaccount=") - two_word_flags+=("-z") - local_nonpersistent_flags+=("--serviceaccount=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_policy_remove-user() -{ - last_command="oc_adm_policy_remove-user" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_policy_scc-review() -{ - last_command="oc_adm_policy_scc-review" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--no-headers") - local_nonpersistent_flags+=("--no-headers") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--serviceaccount=") - two_word_flags+=("-z") - local_nonpersistent_flags+=("--serviceaccount=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_policy_scc-subject-review() -{ - last_command="oc_adm_policy_scc-subject-review" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--groups=") - two_word_flags+=("-g") - local_nonpersistent_flags+=("--groups=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--no-headers") - local_nonpersistent_flags+=("--no-headers") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--serviceaccount=") - two_word_flags+=("-z") - local_nonpersistent_flags+=("--serviceaccount=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_policy_who-can() -{ - last_command="oc_adm_policy_who-can" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all-namespaces") - flags+=("-A") - local_nonpersistent_flags+=("--all-namespaces") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_policy() -{ - last_command="oc_adm_policy" - commands=() - commands+=("add-cluster-role-to-group") - commands+=("add-cluster-role-to-user") - commands+=("add-role-to-group") - commands+=("add-role-to-user") - commands+=("add-scc-to-group") - commands+=("add-scc-to-user") - commands+=("remove-cluster-role-from-group") - commands+=("remove-cluster-role-from-user") - commands+=("remove-group") - commands+=("remove-role-from-group") - commands+=("remove-role-from-user") - commands+=("remove-scc-from-group") - commands+=("remove-scc-from-user") - commands+=("remove-user") - commands+=("scc-review") - commands+=("scc-subject-review") - commands+=("who-can") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_prune_auth() -{ - last_command="oc_adm_prune_auth" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_prune_builds() -{ - last_command="oc_adm_prune_builds" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--confirm") - local_nonpersistent_flags+=("--confirm") - flags+=("--keep-complete=") - local_nonpersistent_flags+=("--keep-complete=") - flags+=("--keep-failed=") - local_nonpersistent_flags+=("--keep-failed=") - flags+=("--keep-younger-than=") - local_nonpersistent_flags+=("--keep-younger-than=") - flags+=("--orphans") - local_nonpersistent_flags+=("--orphans") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_prune_deployments() -{ - last_command="oc_adm_prune_deployments" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--confirm") - local_nonpersistent_flags+=("--confirm") - flags+=("--keep-complete=") - local_nonpersistent_flags+=("--keep-complete=") - flags+=("--keep-failed=") - local_nonpersistent_flags+=("--keep-failed=") - flags+=("--keep-younger-than=") - local_nonpersistent_flags+=("--keep-younger-than=") - flags+=("--orphans") - local_nonpersistent_flags+=("--orphans") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_prune_groups() -{ - last_command="oc_adm_prune_groups" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--blacklist=") - flags_with_completion+=("--blacklist") - flags_completion+=("__oc_handle_filename_extension_flag txt") - local_nonpersistent_flags+=("--blacklist=") - flags+=("--confirm") - local_nonpersistent_flags+=("--confirm") - flags+=("--sync-config=") - flags_with_completion+=("--sync-config") - flags_completion+=("__oc_handle_filename_extension_flag yaml|yml") - local_nonpersistent_flags+=("--sync-config=") - flags+=("--whitelist=") - flags_with_completion+=("--whitelist") - flags_completion+=("__oc_handle_filename_extension_flag txt") - local_nonpersistent_flags+=("--whitelist=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_prune_images() -{ - last_command="oc_adm_prune_images" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--confirm") - local_nonpersistent_flags+=("--confirm") - flags+=("--force-insecure") - local_nonpersistent_flags+=("--force-insecure") - flags+=("--ignore-invalid-refs") - local_nonpersistent_flags+=("--ignore-invalid-refs") - flags+=("--keep-tag-revisions=") - local_nonpersistent_flags+=("--keep-tag-revisions=") - flags+=("--keep-younger-than=") - local_nonpersistent_flags+=("--keep-younger-than=") - flags+=("--prune-over-size-limit") - local_nonpersistent_flags+=("--prune-over-size-limit") - flags+=("--prune-registry") - local_nonpersistent_flags+=("--prune-registry") - flags+=("--registry-url=") - local_nonpersistent_flags+=("--registry-url=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_prune() -{ - last_command="oc_adm_prune" - commands=() - commands+=("auth") - commands+=("builds") - commands+=("deployments") - commands+=("groups") - commands+=("images") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_release_extract() -{ - last_command="oc_adm_release_extract" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--command=") - local_nonpersistent_flags+=("--command=") - flags+=("--command-os=") - local_nonpersistent_flags+=("--command-os=") - flags+=("--file=") - local_nonpersistent_flags+=("--file=") - flags+=("--from=") - local_nonpersistent_flags+=("--from=") - flags+=("--git=") - local_nonpersistent_flags+=("--git=") - flags+=("--insecure") - local_nonpersistent_flags+=("--insecure") - flags+=("--max-per-registry=") - local_nonpersistent_flags+=("--max-per-registry=") - flags+=("--registry-config=") - two_word_flags+=("-a") - local_nonpersistent_flags+=("--registry-config=") - flags+=("--signing-key=") - local_nonpersistent_flags+=("--signing-key=") - flags+=("--skip-verification") - local_nonpersistent_flags+=("--skip-verification") - flags+=("--to=") - local_nonpersistent_flags+=("--to=") - flags+=("--tools") - local_nonpersistent_flags+=("--tools") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_release_info() -{ - last_command="oc_adm_release_info" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--bugs=") - local_nonpersistent_flags+=("--bugs=") - flags+=("--changelog=") - local_nonpersistent_flags+=("--changelog=") - flags+=("--changes-from=") - local_nonpersistent_flags+=("--changes-from=") - flags+=("--commits") - local_nonpersistent_flags+=("--commits") - flags+=("--contents") - local_nonpersistent_flags+=("--contents") - flags+=("--image-for=") - local_nonpersistent_flags+=("--image-for=") - flags+=("--include-images") - local_nonpersistent_flags+=("--include-images") - flags+=("--insecure") - local_nonpersistent_flags+=("--insecure") - flags+=("--max-per-registry=") - local_nonpersistent_flags+=("--max-per-registry=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--pullspecs") - local_nonpersistent_flags+=("--pullspecs") - flags+=("--registry-config=") - two_word_flags+=("-a") - local_nonpersistent_flags+=("--registry-config=") - flags+=("--size") - local_nonpersistent_flags+=("--size") - flags+=("--skip-verification") - local_nonpersistent_flags+=("--skip-verification") - flags+=("--verify") - local_nonpersistent_flags+=("--verify") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_release_mirror() -{ - last_command="oc_adm_release_mirror" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--from=") - local_nonpersistent_flags+=("--from=") - flags+=("--insecure") - local_nonpersistent_flags+=("--insecure") - flags+=("--max-per-registry=") - local_nonpersistent_flags+=("--max-per-registry=") - flags+=("--registry-config=") - two_word_flags+=("-a") - local_nonpersistent_flags+=("--registry-config=") - flags+=("--skip-release-image") - local_nonpersistent_flags+=("--skip-release-image") - flags+=("--skip-verification") - local_nonpersistent_flags+=("--skip-verification") - flags+=("--to=") - local_nonpersistent_flags+=("--to=") - flags+=("--to-image-stream=") - local_nonpersistent_flags+=("--to-image-stream=") - flags+=("--to-release-image=") - local_nonpersistent_flags+=("--to-release-image=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_release_new() -{ - last_command="oc_adm_release_new" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-images") - local_nonpersistent_flags+=("--allow-missing-images") - flags+=("--component-versions=") - local_nonpersistent_flags+=("--component-versions=") - flags+=("--dir=") - local_nonpersistent_flags+=("--dir=") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--exclude=") - local_nonpersistent_flags+=("--exclude=") - flags+=("--from-dir=") - local_nonpersistent_flags+=("--from-dir=") - flags+=("--from-image-stream=") - local_nonpersistent_flags+=("--from-image-stream=") - flags+=("--from-image-stream-file=") - two_word_flags+=("-f") - local_nonpersistent_flags+=("--from-image-stream-file=") - flags+=("--from-release=") - local_nonpersistent_flags+=("--from-release=") - flags+=("--include=") - local_nonpersistent_flags+=("--include=") - flags+=("--insecure") - local_nonpersistent_flags+=("--insecure") - flags+=("--mapping-file=") - local_nonpersistent_flags+=("--mapping-file=") - flags+=("--max-per-registry=") - local_nonpersistent_flags+=("--max-per-registry=") - flags+=("--metadata=") - local_nonpersistent_flags+=("--metadata=") - flags+=("--mirror=") - local_nonpersistent_flags+=("--mirror=") - flags+=("--name=") - local_nonpersistent_flags+=("--name=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--previous=") - local_nonpersistent_flags+=("--previous=") - flags+=("--reference-mode=") - local_nonpersistent_flags+=("--reference-mode=") - flags+=("--registry-config=") - two_word_flags+=("-a") - local_nonpersistent_flags+=("--registry-config=") - flags+=("--release-manifest") - local_nonpersistent_flags+=("--release-manifest") - flags+=("--skip-manifest-check") - local_nonpersistent_flags+=("--skip-manifest-check") - flags+=("--skip-verification") - local_nonpersistent_flags+=("--skip-verification") - flags+=("--to-dir=") - local_nonpersistent_flags+=("--to-dir=") - flags+=("--to-file=") - local_nonpersistent_flags+=("--to-file=") - flags+=("--to-image=") - local_nonpersistent_flags+=("--to-image=") - flags+=("--to-image-base=") - local_nonpersistent_flags+=("--to-image-base=") - flags+=("--to-image-base-tag=") - local_nonpersistent_flags+=("--to-image-base-tag=") - flags+=("--to-signature=") - local_nonpersistent_flags+=("--to-signature=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_release() -{ - last_command="oc_adm_release" - commands=() - commands+=("extract") - commands+=("info") - commands+=("mirror") - commands+=("new") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_taint() -{ - last_command="oc_adm_taint" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--overwrite") - local_nonpersistent_flags+=("--overwrite") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - must_have_one_noun+=("node") - noun_aliases=() -} - -_oc_adm_top_images() -{ - last_command="oc_adm_top_images" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_top_imagestreams() -{ - last_command="oc_adm_top_imagestreams" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_top_node() -{ - last_command="oc_adm_top_node" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--heapster-namespace=") - local_nonpersistent_flags+=("--heapster-namespace=") - flags+=("--heapster-port=") - local_nonpersistent_flags+=("--heapster-port=") - flags+=("--heapster-scheme=") - local_nonpersistent_flags+=("--heapster-scheme=") - flags+=("--heapster-service=") - local_nonpersistent_flags+=("--heapster-service=") - flags+=("--no-headers") - local_nonpersistent_flags+=("--no-headers") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_top_pod() -{ - last_command="oc_adm_top_pod" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all-namespaces") - flags+=("-A") - local_nonpersistent_flags+=("--all-namespaces") - flags+=("--containers") - local_nonpersistent_flags+=("--containers") - flags+=("--heapster-namespace=") - local_nonpersistent_flags+=("--heapster-namespace=") - flags+=("--heapster-port=") - local_nonpersistent_flags+=("--heapster-port=") - flags+=("--heapster-scheme=") - local_nonpersistent_flags+=("--heapster-scheme=") - flags+=("--heapster-service=") - local_nonpersistent_flags+=("--heapster-service=") - flags+=("--no-headers") - local_nonpersistent_flags+=("--no-headers") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_top() -{ - last_command="oc_adm_top" - commands=() - commands+=("images") - commands+=("imagestreams") - commands+=("node") - commands+=("pod") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_uncordon() -{ - last_command="oc_adm_uncordon" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_upgrade() -{ - last_command="oc_adm_upgrade" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--clear") - local_nonpersistent_flags+=("--clear") - flags+=("--force") - local_nonpersistent_flags+=("--force") - flags+=("--to=") - local_nonpersistent_flags+=("--to=") - flags+=("--to-image=") - local_nonpersistent_flags+=("--to-image=") - flags+=("--to-latest") - local_nonpersistent_flags+=("--to-latest") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm_verify-image-signature() -{ - last_command="oc_adm_verify-image-signature" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--expected-identity=") - local_nonpersistent_flags+=("--expected-identity=") - flags+=("--insecure") - local_nonpersistent_flags+=("--insecure") - flags+=("--public-key=") - local_nonpersistent_flags+=("--public-key=") - flags+=("--registry-url=") - local_nonpersistent_flags+=("--registry-url=") - flags+=("--remove-all") - local_nonpersistent_flags+=("--remove-all") - flags+=("--save") - local_nonpersistent_flags+=("--save") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_adm() -{ - last_command="oc_adm" - commands=() - commands+=("build-chain") - commands+=("certificate") - commands+=("completion") - commands+=("config") - commands+=("cordon") - commands+=("create-bootstrap-project-template") - commands+=("create-error-template") - commands+=("create-kubeconfig") - commands+=("create-login-template") - commands+=("create-provider-selection-template") - commands+=("drain") - commands+=("groups") - commands+=("migrate") - commands+=("must-gather") - commands+=("new-project") - commands+=("node-logs") - commands+=("options") - commands+=("pod-network") - commands+=("policy") - commands+=("prune") - commands+=("release") - commands+=("taint") - commands+=("top") - commands+=("uncordon") - commands+=("upgrade") - commands+=("verify-image-signature") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_annotate() -{ - last_command="oc_annotate" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--field-selector=") - local_nonpersistent_flags+=("--field-selector=") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--local") - local_nonpersistent_flags+=("--local") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--overwrite") - local_nonpersistent_flags+=("--overwrite") - flags+=("--record") - local_nonpersistent_flags+=("--record") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--resource-version=") - local_nonpersistent_flags+=("--resource-version=") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_api-resources() -{ - last_command="oc_api-resources" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--api-group=") - local_nonpersistent_flags+=("--api-group=") - flags+=("--cached") - local_nonpersistent_flags+=("--cached") - flags+=("--namespaced") - local_nonpersistent_flags+=("--namespaced") - flags+=("--no-headers") - local_nonpersistent_flags+=("--no-headers") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--verbs=") - local_nonpersistent_flags+=("--verbs=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_api-versions() -{ - last_command="oc_api-versions" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_apply_edit-last-applied() -{ - last_command="oc_apply_edit-last-applied" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--record") - local_nonpersistent_flags+=("--record") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--windows-line-endings") - local_nonpersistent_flags+=("--windows-line-endings") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_apply_set-last-applied() -{ - last_command="oc_apply_set-last-applied" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--create-annotation") - local_nonpersistent_flags+=("--create-annotation") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_apply_view-last-applied() -{ - last_command="oc_apply_view-last-applied" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_apply() -{ - last_command="oc_apply" - commands=() - commands+=("edit-last-applied") - commands+=("set-last-applied") - commands+=("view-last-applied") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--cascade") - local_nonpersistent_flags+=("--cascade") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--experimental-field-manager=") - local_nonpersistent_flags+=("--experimental-field-manager=") - flags+=("--experimental-force-conflicts") - local_nonpersistent_flags+=("--experimental-force-conflicts") - flags+=("--experimental-server-side") - local_nonpersistent_flags+=("--experimental-server-side") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--force") - local_nonpersistent_flags+=("--force") - flags+=("--grace-period=") - local_nonpersistent_flags+=("--grace-period=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--openapi-patch") - local_nonpersistent_flags+=("--openapi-patch") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--overwrite") - local_nonpersistent_flags+=("--overwrite") - flags+=("--prune") - local_nonpersistent_flags+=("--prune") - flags+=("--prune-whitelist=") - local_nonpersistent_flags+=("--prune-whitelist=") - flags+=("--record") - local_nonpersistent_flags+=("--record") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--server-dry-run") - local_nonpersistent_flags+=("--server-dry-run") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--timeout=") - local_nonpersistent_flags+=("--timeout=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--wait") - local_nonpersistent_flags+=("--wait") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_attach() -{ - last_command="oc_attach" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--container=") - two_word_flags+=("-c") - local_nonpersistent_flags+=("--container=") - flags+=("--pod-running-timeout=") - local_nonpersistent_flags+=("--pod-running-timeout=") - flags+=("--stdin") - flags+=("-i") - local_nonpersistent_flags+=("--stdin") - flags+=("--tty") - flags+=("-t") - local_nonpersistent_flags+=("--tty") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_auth_can-i() -{ - last_command="oc_auth_can-i" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all-namespaces") - flags+=("-A") - local_nonpersistent_flags+=("--all-namespaces") - flags+=("--list") - local_nonpersistent_flags+=("--list") - flags+=("--no-headers") - local_nonpersistent_flags+=("--no-headers") - flags+=("--quiet") - flags+=("-q") - local_nonpersistent_flags+=("--quiet") - flags+=("--subresource=") - local_nonpersistent_flags+=("--subresource=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_auth_reconcile() -{ - last_command="oc_auth_reconcile" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--remove-extra-permissions") - local_nonpersistent_flags+=("--remove-extra-permissions") - flags+=("--remove-extra-subjects") - local_nonpersistent_flags+=("--remove-extra-subjects") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_auth() -{ - last_command="oc_auth" - commands=() - commands+=("can-i") - commands+=("reconcile") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_autoscale() -{ - last_command="oc_autoscale" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--cpu-percent=") - local_nonpersistent_flags+=("--cpu-percent=") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--generator=") - local_nonpersistent_flags+=("--generator=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--max=") - local_nonpersistent_flags+=("--max=") - flags+=("--min=") - local_nonpersistent_flags+=("--min=") - flags+=("--name=") - local_nonpersistent_flags+=("--name=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--record") - local_nonpersistent_flags+=("--record") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_flag+=("--max=") - must_have_one_noun=() - must_have_one_noun+=("deployment") - must_have_one_noun+=("deploymentconfig") - must_have_one_noun+=("replicaset") - must_have_one_noun+=("replicationcontroller") - noun_aliases=() -} - -_oc_cancel-build() -{ - last_command="oc_cancel-build" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--dump-logs") - local_nonpersistent_flags+=("--dump-logs") - flags+=("--restart") - local_nonpersistent_flags+=("--restart") - flags+=("--state=") - local_nonpersistent_flags+=("--state=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_cluster-info_dump() -{ - last_command="oc_cluster-info_dump" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all-namespaces") - flags+=("-A") - local_nonpersistent_flags+=("--all-namespaces") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--namespaces=") - local_nonpersistent_flags+=("--namespaces=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--output-directory=") - local_nonpersistent_flags+=("--output-directory=") - flags+=("--pod-running-timeout=") - local_nonpersistent_flags+=("--pod-running-timeout=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_cluster-info() -{ - last_command="oc_cluster-info" - commands=() - commands+=("dump") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_completion() -{ - last_command="oc_completion" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--help") - flags+=("-h") - local_nonpersistent_flags+=("--help") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - must_have_one_noun+=("bash") - must_have_one_noun+=("zsh") - noun_aliases=() -} - -_oc_config_current-context() -{ - last_command="oc_config_current-context" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_config_delete-cluster() -{ - last_command="oc_config_delete-cluster" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_config_delete-context() -{ - last_command="oc_config_delete-context" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_config_get-clusters() -{ - last_command="oc_config_get-clusters" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_config_get-contexts() -{ - last_command="oc_config_get-contexts" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--no-headers") - local_nonpersistent_flags+=("--no-headers") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_config_rename-context() -{ - last_command="oc_config_rename-context" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_config_set() -{ - last_command="oc_config_set" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--set-raw-bytes") - local_nonpersistent_flags+=("--set-raw-bytes") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_config_set-cluster() -{ - last_command="oc_config_set-cluster" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--embed-certs") - local_nonpersistent_flags+=("--embed-certs") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_config_set-context() -{ - last_command="oc_config_set-context" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--current") - local_nonpersistent_flags+=("--current") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_config_set-credentials() -{ - last_command="oc_config_set-credentials" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--auth-provider=") - local_nonpersistent_flags+=("--auth-provider=") - flags+=("--auth-provider-arg=") - local_nonpersistent_flags+=("--auth-provider-arg=") - flags+=("--embed-certs") - local_nonpersistent_flags+=("--embed-certs") - flags+=("--password=") - local_nonpersistent_flags+=("--password=") - flags+=("--username=") - local_nonpersistent_flags+=("--username=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_config_unset() -{ - last_command="oc_config_unset" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_config_use-context() -{ - last_command="oc_config_use-context" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_config_view() -{ - last_command="oc_config_view" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--flatten") - local_nonpersistent_flags+=("--flatten") - flags+=("--merge") - local_nonpersistent_flags+=("--merge") - flags+=("--minify") - local_nonpersistent_flags+=("--minify") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--raw") - local_nonpersistent_flags+=("--raw") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_config() -{ - last_command="oc_config" - commands=() - commands+=("current-context") - commands+=("delete-cluster") - commands+=("delete-context") - commands+=("get-clusters") - commands+=("get-contexts") - commands+=("rename-context") - commands+=("set") - commands+=("set-cluster") - commands+=("set-context") - commands+=("set-credentials") - commands+=("unset") - commands+=("use-context") - commands+=("view") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_convert() -{ - last_command="oc_convert" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--local") - local_nonpersistent_flags+=("--local") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--output-version=") - local_nonpersistent_flags+=("--output-version=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_cp() -{ - last_command="oc_cp" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--container=") - two_word_flags+=("-c") - local_nonpersistent_flags+=("--container=") - flags+=("--no-preserve") - local_nonpersistent_flags+=("--no-preserve") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_clusterresourcequota() -{ - last_command="oc_create_clusterresourcequota" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--hard=") - local_nonpersistent_flags+=("--hard=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--project-annotation-selector=") - local_nonpersistent_flags+=("--project-annotation-selector=") - flags+=("--project-label-selector=") - local_nonpersistent_flags+=("--project-label-selector=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_clusterrole() -{ - last_command="oc_create_clusterrole" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--aggregation-rule=") - local_nonpersistent_flags+=("--aggregation-rule=") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--non-resource-url=") - local_nonpersistent_flags+=("--non-resource-url=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--resource=") - local_nonpersistent_flags+=("--resource=") - flags+=("--resource-name=") - local_nonpersistent_flags+=("--resource-name=") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--verb=") - local_nonpersistent_flags+=("--verb=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_clusterrolebinding() -{ - last_command="oc_create_clusterrolebinding" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--clusterrole=") - flags_with_completion+=("--clusterrole") - flags_completion+=("__kubectl_get_resource_clusterrole") - local_nonpersistent_flags+=("--clusterrole=") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--generator=") - local_nonpersistent_flags+=("--generator=") - flags+=("--group=") - local_nonpersistent_flags+=("--group=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--serviceaccount=") - local_nonpersistent_flags+=("--serviceaccount=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_configmap() -{ - last_command="oc_create_configmap" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--append-hash") - local_nonpersistent_flags+=("--append-hash") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--from-env-file=") - local_nonpersistent_flags+=("--from-env-file=") - flags+=("--from-file=") - local_nonpersistent_flags+=("--from-file=") - flags+=("--from-literal=") - local_nonpersistent_flags+=("--from-literal=") - flags+=("--generator=") - local_nonpersistent_flags+=("--generator=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_cronjob() -{ - last_command="oc_create_cronjob" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--image=") - local_nonpersistent_flags+=("--image=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--restart=") - local_nonpersistent_flags+=("--restart=") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--schedule=") - local_nonpersistent_flags+=("--schedule=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_deployment() -{ - last_command="oc_create_deployment" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--generator=") - local_nonpersistent_flags+=("--generator=") - flags+=("--image=") - local_nonpersistent_flags+=("--image=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_flag+=("--image=") - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_deploymentconfig() -{ - last_command="oc_create_deploymentconfig" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--image=") - local_nonpersistent_flags+=("--image=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_flag+=("--image=") - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_identity() -{ - last_command="oc_create_identity" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_imagestream() -{ - last_command="oc_create_imagestream" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--lookup-local") - local_nonpersistent_flags+=("--lookup-local") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_imagestreamtag() -{ - last_command="oc_create_imagestreamtag" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--annotation=") - two_word_flags+=("-A") - local_nonpersistent_flags+=("--annotation=") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--from=") - local_nonpersistent_flags+=("--from=") - flags+=("--from-image=") - local_nonpersistent_flags+=("--from-image=") - flags+=("--insecure") - local_nonpersistent_flags+=("--insecure") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--reference") - local_nonpersistent_flags+=("--reference") - flags+=("--reference-policy=") - local_nonpersistent_flags+=("--reference-policy=") - flags+=("--scheduled") - local_nonpersistent_flags+=("--scheduled") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_job() -{ - last_command="oc_create_job" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--from=") - local_nonpersistent_flags+=("--from=") - flags+=("--image=") - local_nonpersistent_flags+=("--image=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_namespace() -{ - last_command="oc_create_namespace" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--generator=") - local_nonpersistent_flags+=("--generator=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_poddisruptionbudget() -{ - last_command="oc_create_poddisruptionbudget" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--generator=") - local_nonpersistent_flags+=("--generator=") - flags+=("--max-unavailable=") - local_nonpersistent_flags+=("--max-unavailable=") - flags+=("--min-available=") - local_nonpersistent_flags+=("--min-available=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--selector=") - local_nonpersistent_flags+=("--selector=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_priorityclass() -{ - last_command="oc_create_priorityclass" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--description=") - local_nonpersistent_flags+=("--description=") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--generator=") - local_nonpersistent_flags+=("--generator=") - flags+=("--global-default") - local_nonpersistent_flags+=("--global-default") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--value=") - local_nonpersistent_flags+=("--value=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_quota() -{ - last_command="oc_create_quota" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--generator=") - local_nonpersistent_flags+=("--generator=") - flags+=("--hard=") - local_nonpersistent_flags+=("--hard=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--scopes=") - local_nonpersistent_flags+=("--scopes=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_role() -{ - last_command="oc_create_role" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--resource=") - local_nonpersistent_flags+=("--resource=") - flags+=("--resource-name=") - local_nonpersistent_flags+=("--resource-name=") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--verb=") - local_nonpersistent_flags+=("--verb=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_rolebinding() -{ - last_command="oc_create_rolebinding" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--clusterrole=") - local_nonpersistent_flags+=("--clusterrole=") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--generator=") - local_nonpersistent_flags+=("--generator=") - flags+=("--group=") - local_nonpersistent_flags+=("--group=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--role=") - local_nonpersistent_flags+=("--role=") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--serviceaccount=") - local_nonpersistent_flags+=("--serviceaccount=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_route_edge() -{ - last_command="oc_create_route_edge" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--ca-cert=") - flags_with_completion+=("--ca-cert") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--ca-cert=") - flags+=("--cert=") - flags_with_completion+=("--cert") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--cert=") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--hostname=") - local_nonpersistent_flags+=("--hostname=") - flags+=("--insecure-policy=") - local_nonpersistent_flags+=("--insecure-policy=") - flags+=("--key=") - flags_with_completion+=("--key") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--key=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--path=") - local_nonpersistent_flags+=("--path=") - flags+=("--port=") - local_nonpersistent_flags+=("--port=") - flags+=("--service=") - local_nonpersistent_flags+=("--service=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--wildcard-policy=") - local_nonpersistent_flags+=("--wildcard-policy=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_flag+=("--service=") - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_route_passthrough() -{ - last_command="oc_create_route_passthrough" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--hostname=") - local_nonpersistent_flags+=("--hostname=") - flags+=("--insecure-policy=") - local_nonpersistent_flags+=("--insecure-policy=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--port=") - local_nonpersistent_flags+=("--port=") - flags+=("--service=") - local_nonpersistent_flags+=("--service=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--wildcard-policy=") - local_nonpersistent_flags+=("--wildcard-policy=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_flag+=("--service=") - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_route_reencrypt() -{ - last_command="oc_create_route_reencrypt" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--ca-cert=") - flags_with_completion+=("--ca-cert") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--ca-cert=") - flags+=("--cert=") - flags_with_completion+=("--cert") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--cert=") - flags+=("--dest-ca-cert=") - flags_with_completion+=("--dest-ca-cert") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--dest-ca-cert=") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--hostname=") - local_nonpersistent_flags+=("--hostname=") - flags+=("--insecure-policy=") - local_nonpersistent_flags+=("--insecure-policy=") - flags+=("--key=") - flags_with_completion+=("--key") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--key=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--path=") - local_nonpersistent_flags+=("--path=") - flags+=("--port=") - local_nonpersistent_flags+=("--port=") - flags+=("--service=") - local_nonpersistent_flags+=("--service=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--wildcard-policy=") - local_nonpersistent_flags+=("--wildcard-policy=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_flag+=("--service=") - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_route() -{ - last_command="oc_create_route" - commands=() - commands+=("edge") - commands+=("passthrough") - commands+=("reencrypt") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_secret_docker-registry() -{ - last_command="oc_create_secret_docker-registry" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--append-hash") - local_nonpersistent_flags+=("--append-hash") - flags+=("--docker-email=") - local_nonpersistent_flags+=("--docker-email=") - flags+=("--docker-password=") - local_nonpersistent_flags+=("--docker-password=") - flags+=("--docker-server=") - local_nonpersistent_flags+=("--docker-server=") - flags+=("--docker-username=") - local_nonpersistent_flags+=("--docker-username=") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--from-file=") - local_nonpersistent_flags+=("--from-file=") - flags+=("--generator=") - local_nonpersistent_flags+=("--generator=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_flag+=("--docker-password=") - must_have_one_flag+=("--docker-username=") - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_secret_generic() -{ - last_command="oc_create_secret_generic" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--append-hash") - local_nonpersistent_flags+=("--append-hash") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--from-env-file=") - local_nonpersistent_flags+=("--from-env-file=") - flags+=("--from-file=") - local_nonpersistent_flags+=("--from-file=") - flags+=("--from-literal=") - local_nonpersistent_flags+=("--from-literal=") - flags+=("--generator=") - local_nonpersistent_flags+=("--generator=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--type=") - local_nonpersistent_flags+=("--type=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_secret_tls() -{ - last_command="oc_create_secret_tls" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--append-hash") - local_nonpersistent_flags+=("--append-hash") - flags+=("--cert=") - local_nonpersistent_flags+=("--cert=") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--generator=") - local_nonpersistent_flags+=("--generator=") - flags+=("--key=") - local_nonpersistent_flags+=("--key=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_secret() -{ - last_command="oc_create_secret" - commands=() - commands+=("docker-registry") - commands+=("generic") - commands+=("tls") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_service_clusterip() -{ - last_command="oc_create_service_clusterip" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--clusterip=") - local_nonpersistent_flags+=("--clusterip=") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--generator=") - local_nonpersistent_flags+=("--generator=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--tcp=") - local_nonpersistent_flags+=("--tcp=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_service_externalname() -{ - last_command="oc_create_service_externalname" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--external-name=") - local_nonpersistent_flags+=("--external-name=") - flags+=("--generator=") - local_nonpersistent_flags+=("--generator=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--tcp=") - local_nonpersistent_flags+=("--tcp=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_flag+=("--external-name=") - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_service_loadbalancer() -{ - last_command="oc_create_service_loadbalancer" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--generator=") - local_nonpersistent_flags+=("--generator=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--tcp=") - local_nonpersistent_flags+=("--tcp=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_service_nodeport() -{ - last_command="oc_create_service_nodeport" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--generator=") - local_nonpersistent_flags+=("--generator=") - flags+=("--node-port=") - local_nonpersistent_flags+=("--node-port=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--tcp=") - local_nonpersistent_flags+=("--tcp=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_service() -{ - last_command="oc_create_service" - commands=() - commands+=("clusterip") - commands+=("externalname") - commands+=("loadbalancer") - commands+=("nodeport") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_serviceaccount() -{ - last_command="oc_create_serviceaccount" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--generator=") - local_nonpersistent_flags+=("--generator=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_user() -{ - last_command="oc_create_user" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--full-name=") - local_nonpersistent_flags+=("--full-name=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create_useridentitymapping() -{ - last_command="oc_create_useridentitymapping" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_create() -{ - last_command="oc_create" - commands=() - commands+=("clusterresourcequota") - commands+=("clusterrole") - commands+=("clusterrolebinding") - commands+=("configmap") - commands+=("cronjob") - commands+=("deployment") - commands+=("deploymentconfig") - commands+=("identity") - commands+=("imagestream") - commands+=("imagestreamtag") - commands+=("job") - commands+=("namespace") - commands+=("poddisruptionbudget") - commands+=("priorityclass") - commands+=("quota") - commands+=("role") - commands+=("rolebinding") - commands+=("route") - commands+=("secret") - commands+=("service") - commands+=("serviceaccount") - commands+=("user") - commands+=("useridentitymapping") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--edit") - local_nonpersistent_flags+=("--edit") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--raw=") - local_nonpersistent_flags+=("--raw=") - flags+=("--record") - local_nonpersistent_flags+=("--record") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--windows-line-endings") - local_nonpersistent_flags+=("--windows-line-endings") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_debug() -{ - last_command="oc_debug" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--as-root") - local_nonpersistent_flags+=("--as-root") - flags+=("--as-user=") - local_nonpersistent_flags+=("--as-user=") - flags+=("--container=") - two_word_flags+=("-c") - local_nonpersistent_flags+=("--container=") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--image=") - local_nonpersistent_flags+=("--image=") - flags+=("--keep-annotations") - local_nonpersistent_flags+=("--keep-annotations") - flags+=("--keep-init-containers") - local_nonpersistent_flags+=("--keep-init-containers") - flags+=("--keep-liveness") - local_nonpersistent_flags+=("--keep-liveness") - flags+=("--keep-readiness") - local_nonpersistent_flags+=("--keep-readiness") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--no-stdin") - flags+=("-I") - local_nonpersistent_flags+=("--no-stdin") - flags+=("--no-tty") - flags+=("-T") - local_nonpersistent_flags+=("--no-tty") - flags+=("--node-name=") - local_nonpersistent_flags+=("--node-name=") - flags+=("--one-container") - local_nonpersistent_flags+=("--one-container") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--show-labels") - local_nonpersistent_flags+=("--show-labels") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--tty") - flags+=("-t") - local_nonpersistent_flags+=("--tty") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_delete() -{ - last_command="oc_delete" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--all-namespaces") - flags+=("-A") - local_nonpersistent_flags+=("--all-namespaces") - flags+=("--cascade") - local_nonpersistent_flags+=("--cascade") - flags+=("--field-selector=") - local_nonpersistent_flags+=("--field-selector=") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--force") - local_nonpersistent_flags+=("--force") - flags+=("--grace-period=") - local_nonpersistent_flags+=("--grace-period=") - flags+=("--ignore-not-found") - local_nonpersistent_flags+=("--ignore-not-found") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--now") - local_nonpersistent_flags+=("--now") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--timeout=") - local_nonpersistent_flags+=("--timeout=") - flags+=("--wait") - local_nonpersistent_flags+=("--wait") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_describe() -{ - last_command="oc_describe" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all-namespaces") - flags+=("-A") - local_nonpersistent_flags+=("--all-namespaces") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--show-events") - local_nonpersistent_flags+=("--show-events") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_diff() -{ - last_command="oc_diff" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--experimental-field-manager=") - local_nonpersistent_flags+=("--experimental-field-manager=") - flags+=("--experimental-force-conflicts") - local_nonpersistent_flags+=("--experimental-force-conflicts") - flags+=("--experimental-server-side") - local_nonpersistent_flags+=("--experimental-server-side") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_edit() -{ - last_command="oc_edit" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--output-patch") - local_nonpersistent_flags+=("--output-patch") - flags+=("--record") - local_nonpersistent_flags+=("--record") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--windows-line-endings") - local_nonpersistent_flags+=("--windows-line-endings") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_ex_build-chain() -{ - last_command="oc_ex_build-chain" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--reverse") - local_nonpersistent_flags+=("--reverse") - flags+=("--trigger-only") - local_nonpersistent_flags+=("--trigger-only") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_ex_dockergc() -{ - last_command="oc_ex_dockergc" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--image-gc-high-threshold=") - local_nonpersistent_flags+=("--image-gc-high-threshold=") - flags+=("--image-gc-low-threshold=") - local_nonpersistent_flags+=("--image-gc-low-threshold=") - flags+=("--minimum-ttl-duration=") - local_nonpersistent_flags+=("--minimum-ttl-duration=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_ex_options() -{ - last_command="oc_ex_options" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_ex_prune-groups() -{ - last_command="oc_ex_prune-groups" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--blacklist=") - flags_with_completion+=("--blacklist") - flags_completion+=("__oc_handle_filename_extension_flag txt") - local_nonpersistent_flags+=("--blacklist=") - flags+=("--confirm") - local_nonpersistent_flags+=("--confirm") - flags+=("--sync-config=") - flags_with_completion+=("--sync-config") - flags_completion+=("__oc_handle_filename_extension_flag yaml|yml") - local_nonpersistent_flags+=("--sync-config=") - flags+=("--whitelist=") - flags_with_completion+=("--whitelist") - flags_completion+=("__oc_handle_filename_extension_flag txt") - local_nonpersistent_flags+=("--whitelist=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_ex_sync-groups() -{ - last_command="oc_ex_sync-groups" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--blacklist=") - flags_with_completion+=("--blacklist") - flags_completion+=("__oc_handle_filename_extension_flag txt") - local_nonpersistent_flags+=("--blacklist=") - flags+=("--confirm") - local_nonpersistent_flags+=("--confirm") - flags+=("--sync-config=") - flags_with_completion+=("--sync-config") - flags_completion+=("__oc_handle_filename_extension_flag yaml|yml") - local_nonpersistent_flags+=("--sync-config=") - flags+=("--type=") - local_nonpersistent_flags+=("--type=") - flags+=("--whitelist=") - flags_with_completion+=("--whitelist") - flags_completion+=("__oc_handle_filename_extension_flag txt") - local_nonpersistent_flags+=("--whitelist=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_ex() -{ - last_command="oc_ex" - commands=() - commands+=("build-chain") - commands+=("dockergc") - commands+=("options") - commands+=("prune-groups") - commands+=("sync-groups") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_exec() -{ - last_command="oc_exec" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--container=") - two_word_flags+=("-c") - local_nonpersistent_flags+=("--container=") - flags+=("--stdin") - flags+=("-i") - local_nonpersistent_flags+=("--stdin") - flags+=("--tty") - flags+=("-t") - local_nonpersistent_flags+=("--tty") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_explain() -{ - last_command="oc_explain" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--api-version=") - local_nonpersistent_flags+=("--api-version=") - flags+=("--recursive") - local_nonpersistent_flags+=("--recursive") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_expose() -{ - last_command="oc_expose" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--cluster-ip=") - local_nonpersistent_flags+=("--cluster-ip=") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--external-ip=") - local_nonpersistent_flags+=("--external-ip=") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--generator=") - local_nonpersistent_flags+=("--generator=") - flags+=("--hostname=") - local_nonpersistent_flags+=("--hostname=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--labels=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--labels=") - flags+=("--load-balancer-ip=") - local_nonpersistent_flags+=("--load-balancer-ip=") - flags+=("--name=") - local_nonpersistent_flags+=("--name=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--overrides=") - local_nonpersistent_flags+=("--overrides=") - flags+=("--path=") - local_nonpersistent_flags+=("--path=") - flags+=("--port=") - local_nonpersistent_flags+=("--port=") - flags+=("--protocol=") - local_nonpersistent_flags+=("--protocol=") - flags+=("--record") - local_nonpersistent_flags+=("--record") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--selector=") - local_nonpersistent_flags+=("--selector=") - flags+=("--session-affinity=") - local_nonpersistent_flags+=("--session-affinity=") - flags+=("--target-port=") - local_nonpersistent_flags+=("--target-port=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--type=") - local_nonpersistent_flags+=("--type=") - flags+=("--wildcard-policy=") - local_nonpersistent_flags+=("--wildcard-policy=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - must_have_one_noun+=("deployment") - must_have_one_noun+=("pod") - must_have_one_noun+=("replicaset") - must_have_one_noun+=("replicationcontroller") - must_have_one_noun+=("service") - noun_aliases=() -} - -_oc_extract() -{ - last_command="oc_extract" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--confirm") - local_nonpersistent_flags+=("--confirm") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("_filedir") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--filename=") - flags+=("--keys=") - local_nonpersistent_flags+=("--keys=") - flags+=("--to=") - local_nonpersistent_flags+=("--to=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_get() -{ - last_command="oc_get" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all-namespaces") - flags+=("-A") - local_nonpersistent_flags+=("--all-namespaces") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--chunk-size=") - local_nonpersistent_flags+=("--chunk-size=") - flags+=("--field-selector=") - local_nonpersistent_flags+=("--field-selector=") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--ignore-not-found") - local_nonpersistent_flags+=("--ignore-not-found") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--label-columns=") - two_word_flags+=("-L") - local_nonpersistent_flags+=("--label-columns=") - flags+=("--no-headers") - local_nonpersistent_flags+=("--no-headers") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--raw=") - local_nonpersistent_flags+=("--raw=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--server-print") - local_nonpersistent_flags+=("--server-print") - flags+=("--show-kind") - local_nonpersistent_flags+=("--show-kind") - flags+=("--show-labels") - local_nonpersistent_flags+=("--show-labels") - flags+=("--sort-by=") - local_nonpersistent_flags+=("--sort-by=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--watch") - flags+=("-w") - local_nonpersistent_flags+=("--watch") - flags+=("--watch-only") - local_nonpersistent_flags+=("--watch-only") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_idle() -{ - last_command="oc_idle" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--all-namespaces") - flags+=("-A") - local_nonpersistent_flags+=("--all-namespaces") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--resource-names-file=") - flags_with_completion+=("--resource-names-file") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--resource-names-file=") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_image_append() -{ - last_command="oc_image_append" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--created-at=") - local_nonpersistent_flags+=("--created-at=") - flags+=("--drop-history") - local_nonpersistent_flags+=("--drop-history") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--filter-by-os=") - local_nonpersistent_flags+=("--filter-by-os=") - flags+=("--force") - local_nonpersistent_flags+=("--force") - flags+=("--from=") - local_nonpersistent_flags+=("--from=") - flags+=("--image=") - local_nonpersistent_flags+=("--image=") - flags+=("--insecure") - local_nonpersistent_flags+=("--insecure") - flags+=("--max-per-registry=") - local_nonpersistent_flags+=("--max-per-registry=") - flags+=("--meta=") - local_nonpersistent_flags+=("--meta=") - flags+=("--registry-config=") - two_word_flags+=("-a") - local_nonpersistent_flags+=("--registry-config=") - flags+=("--skip-verification") - local_nonpersistent_flags+=("--skip-verification") - flags+=("--to=") - local_nonpersistent_flags+=("--to=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_image_extract() -{ - last_command="oc_image_extract" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all-layers") - local_nonpersistent_flags+=("--all-layers") - flags+=("--confirm") - local_nonpersistent_flags+=("--confirm") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--file=") - local_nonpersistent_flags+=("--file=") - flags+=("--filter-by-os=") - local_nonpersistent_flags+=("--filter-by-os=") - flags+=("--insecure") - local_nonpersistent_flags+=("--insecure") - flags+=("--only-files") - local_nonpersistent_flags+=("--only-files") - flags+=("--path=") - local_nonpersistent_flags+=("--path=") - flags+=("--preserve-ownership") - flags+=("-p") - local_nonpersistent_flags+=("--preserve-ownership") - flags+=("--registry-config=") - two_word_flags+=("-a") - local_nonpersistent_flags+=("--registry-config=") - flags+=("--skip-verification") - local_nonpersistent_flags+=("--skip-verification") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_image_info() -{ - last_command="oc_image_info" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--filter-by-os=") - local_nonpersistent_flags+=("--filter-by-os=") - flags+=("--insecure") - local_nonpersistent_flags+=("--insecure") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--registry-config=") - two_word_flags+=("-a") - local_nonpersistent_flags+=("--registry-config=") - flags+=("--skip-verification") - local_nonpersistent_flags+=("--skip-verification") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_image_mirror() -{ - last_command="oc_image_mirror" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--filename=") - two_word_flags+=("-f") - local_nonpersistent_flags+=("--filename=") - flags+=("--filter-by-os=") - local_nonpersistent_flags+=("--filter-by-os=") - flags+=("--force") - local_nonpersistent_flags+=("--force") - flags+=("--insecure") - local_nonpersistent_flags+=("--insecure") - flags+=("--max-per-registry=") - local_nonpersistent_flags+=("--max-per-registry=") - flags+=("--max-registry=") - local_nonpersistent_flags+=("--max-registry=") - flags+=("--registry-config=") - two_word_flags+=("-a") - local_nonpersistent_flags+=("--registry-config=") - flags+=("--s3-source-bucket=") - local_nonpersistent_flags+=("--s3-source-bucket=") - flags+=("--skip-missing") - local_nonpersistent_flags+=("--skip-missing") - flags+=("--skip-mount") - local_nonpersistent_flags+=("--skip-mount") - flags+=("--skip-multiple-scopes") - local_nonpersistent_flags+=("--skip-multiple-scopes") - flags+=("--skip-verification") - local_nonpersistent_flags+=("--skip-verification") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_image() -{ - last_command="oc_image" - commands=() - commands+=("append") - commands+=("extract") - commands+=("info") - commands+=("mirror") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_import-image() -{ - last_command="oc_import-image" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--confirm") - local_nonpersistent_flags+=("--confirm") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--from=") - local_nonpersistent_flags+=("--from=") - flags+=("--insecure") - local_nonpersistent_flags+=("--insecure") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--reference-policy=") - local_nonpersistent_flags+=("--reference-policy=") - flags+=("--scheduled") - local_nonpersistent_flags+=("--scheduled") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_kustomize() -{ - last_command="oc_kustomize" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_label() -{ - last_command="oc_label" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--field-selector=") - local_nonpersistent_flags+=("--field-selector=") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--list") - local_nonpersistent_flags+=("--list") - flags+=("--local") - local_nonpersistent_flags+=("--local") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--overwrite") - local_nonpersistent_flags+=("--overwrite") - flags+=("--record") - local_nonpersistent_flags+=("--record") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--resource-version=") - local_nonpersistent_flags+=("--resource-version=") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_login() -{ - last_command="oc_login" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--password=") - two_word_flags+=("-p") - local_nonpersistent_flags+=("--password=") - flags+=("--username=") - two_word_flags+=("-u") - local_nonpersistent_flags+=("--username=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_logout() -{ - last_command="oc_logout" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_logs() -{ - last_command="oc_logs" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all-containers") - local_nonpersistent_flags+=("--all-containers") - flags+=("--container=") - two_word_flags+=("-c") - local_nonpersistent_flags+=("--container=") - flags+=("--follow") - flags+=("-f") - local_nonpersistent_flags+=("--follow") - flags+=("--limit-bytes=") - local_nonpersistent_flags+=("--limit-bytes=") - flags+=("--max-log-requests=") - local_nonpersistent_flags+=("--max-log-requests=") - flags+=("--pod-running-timeout=") - local_nonpersistent_flags+=("--pod-running-timeout=") - flags+=("--previous") - flags+=("-p") - local_nonpersistent_flags+=("--previous") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--since=") - local_nonpersistent_flags+=("--since=") - flags+=("--since-time=") - local_nonpersistent_flags+=("--since-time=") - flags+=("--tail=") - local_nonpersistent_flags+=("--tail=") - flags+=("--timestamps") - local_nonpersistent_flags+=("--timestamps") - flags+=("--version=") - local_nonpersistent_flags+=("--version=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_new-app() -{ - last_command="oc_new-app" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-images") - local_nonpersistent_flags+=("--allow-missing-images") - flags+=("--allow-missing-imagestream-tags") - local_nonpersistent_flags+=("--allow-missing-imagestream-tags") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--as-test") - local_nonpersistent_flags+=("--as-test") - flags+=("--binary") - local_nonpersistent_flags+=("--binary") - flags+=("--build-env=") - local_nonpersistent_flags+=("--build-env=") - flags+=("--build-env-file=") - flags_with_completion+=("--build-env-file") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--build-env-file=") - flags+=("--code=") - local_nonpersistent_flags+=("--code=") - flags+=("--context-dir=") - local_nonpersistent_flags+=("--context-dir=") - flags+=("--docker-image=") - local_nonpersistent_flags+=("--docker-image=") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--env=") - two_word_flags+=("-e") - local_nonpersistent_flags+=("--env=") - flags+=("--env-file=") - flags_with_completion+=("--env-file") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--env-file=") - flags+=("--file=") - flags_with_completion+=("--file") - flags_completion+=("__oc_handle_filename_extension_flag yaml|yml|json") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag yaml|yml|json") - local_nonpersistent_flags+=("--file=") - flags+=("--grant-install-rights") - local_nonpersistent_flags+=("--grant-install-rights") - flags+=("--group=") - local_nonpersistent_flags+=("--group=") - flags+=("--ignore-unknown-parameters") - local_nonpersistent_flags+=("--ignore-unknown-parameters") - flags+=("--image-stream=") - two_word_flags+=("-i") - local_nonpersistent_flags+=("--image-stream=") - flags+=("--insecure-registry") - local_nonpersistent_flags+=("--insecure-registry") - flags+=("--labels=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--labels=") - flags+=("--list") - flags+=("-L") - local_nonpersistent_flags+=("--list") - flags+=("--name=") - local_nonpersistent_flags+=("--name=") - flags+=("--no-install") - local_nonpersistent_flags+=("--no-install") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--output-version=") - local_nonpersistent_flags+=("--output-version=") - flags+=("--param=") - two_word_flags+=("-p") - local_nonpersistent_flags+=("--param=") - flags+=("--param-file=") - flags_with_completion+=("--param-file") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--param-file=") - flags+=("--search") - flags+=("-S") - local_nonpersistent_flags+=("--search") - flags+=("--show-all") - flags+=("-a") - local_nonpersistent_flags+=("--show-all") - flags+=("--show-labels") - local_nonpersistent_flags+=("--show-labels") - flags+=("--sort-by=") - local_nonpersistent_flags+=("--sort-by=") - flags+=("--source-secret=") - local_nonpersistent_flags+=("--source-secret=") - flags+=("--strategy=") - local_nonpersistent_flags+=("--strategy=") - flags+=("--template=") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_new-build() -{ - last_command="oc_new-build" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-images") - local_nonpersistent_flags+=("--allow-missing-images") - flags+=("--allow-missing-imagestream-tags") - local_nonpersistent_flags+=("--allow-missing-imagestream-tags") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--binary") - local_nonpersistent_flags+=("--binary") - flags+=("--build-arg=") - local_nonpersistent_flags+=("--build-arg=") - flags+=("--build-config-map=") - local_nonpersistent_flags+=("--build-config-map=") - flags+=("--build-secret=") - local_nonpersistent_flags+=("--build-secret=") - flags+=("--code=") - local_nonpersistent_flags+=("--code=") - flags+=("--context-dir=") - local_nonpersistent_flags+=("--context-dir=") - flags+=("--docker-image=") - local_nonpersistent_flags+=("--docker-image=") - flags+=("--dockerfile=") - two_word_flags+=("-D") - local_nonpersistent_flags+=("--dockerfile=") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--env=") - two_word_flags+=("-e") - local_nonpersistent_flags+=("--env=") - flags+=("--env-file=") - flags_with_completion+=("--env-file") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--env-file=") - flags+=("--image-stream=") - two_word_flags+=("-i") - local_nonpersistent_flags+=("--image-stream=") - flags+=("--labels=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--labels=") - flags+=("--name=") - local_nonpersistent_flags+=("--name=") - flags+=("--no-output") - local_nonpersistent_flags+=("--no-output") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--output-version=") - local_nonpersistent_flags+=("--output-version=") - flags+=("--push-secret=") - local_nonpersistent_flags+=("--push-secret=") - flags+=("--show-all") - flags+=("-a") - local_nonpersistent_flags+=("--show-all") - flags+=("--show-labels") - local_nonpersistent_flags+=("--show-labels") - flags+=("--sort-by=") - local_nonpersistent_flags+=("--sort-by=") - flags+=("--source-image=") - local_nonpersistent_flags+=("--source-image=") - flags+=("--source-image-path=") - local_nonpersistent_flags+=("--source-image-path=") - flags+=("--source-secret=") - local_nonpersistent_flags+=("--source-secret=") - flags+=("--strategy=") - local_nonpersistent_flags+=("--strategy=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--to=") - local_nonpersistent_flags+=("--to=") - flags+=("--to-docker") - local_nonpersistent_flags+=("--to-docker") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_new-project() -{ - last_command="oc_new-project" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--description=") - local_nonpersistent_flags+=("--description=") - flags+=("--display-name=") - local_nonpersistent_flags+=("--display-name=") - flags+=("--skip-config-write") - local_nonpersistent_flags+=("--skip-config-write") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_observe() -{ - last_command="oc_observe" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all-namespaces") - flags+=("-A") - local_nonpersistent_flags+=("--all-namespaces") - flags+=("--argument=") - two_word_flags+=("-a") - local_nonpersistent_flags+=("--argument=") - flags+=("--delete=") - two_word_flags+=("-d") - local_nonpersistent_flags+=("--delete=") - flags+=("--exit-after=") - local_nonpersistent_flags+=("--exit-after=") - flags+=("--listen-addr=") - local_nonpersistent_flags+=("--listen-addr=") - flags+=("--maximum-errors=") - local_nonpersistent_flags+=("--maximum-errors=") - flags+=("--names=") - local_nonpersistent_flags+=("--names=") - flags+=("--no-headers") - local_nonpersistent_flags+=("--no-headers") - flags+=("--object-env-var=") - local_nonpersistent_flags+=("--object-env-var=") - flags+=("--once") - local_nonpersistent_flags+=("--once") - flags+=("--output=") - local_nonpersistent_flags+=("--output=") - flags+=("--print-metrics-on-exit") - local_nonpersistent_flags+=("--print-metrics-on-exit") - flags+=("--resync-period=") - local_nonpersistent_flags+=("--resync-period=") - flags+=("--retry-count=") - local_nonpersistent_flags+=("--retry-count=") - flags+=("--retry-on-exit-code=") - local_nonpersistent_flags+=("--retry-on-exit-code=") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--strict-templates") - local_nonpersistent_flags+=("--strict-templates") - flags+=("--type-env-var=") - local_nonpersistent_flags+=("--type-env-var=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_options() -{ - last_command="oc_options" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_patch() -{ - last_command="oc_patch" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--local") - local_nonpersistent_flags+=("--local") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--patch=") - two_word_flags+=("-p") - local_nonpersistent_flags+=("--patch=") - flags+=("--record") - local_nonpersistent_flags+=("--record") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--type=") - local_nonpersistent_flags+=("--type=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_flag+=("--patch=") - must_have_one_flag+=("-p") - must_have_one_noun=() - noun_aliases=() -} - -_oc_plugin_list() -{ - last_command="oc_plugin_list" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--name-only") - local_nonpersistent_flags+=("--name-only") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_plugin() -{ - last_command="oc_plugin" - commands=() - commands+=("list") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_policy_add-role-to-group() -{ - last_command="oc_policy_add-role-to-group" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--role-namespace=") - local_nonpersistent_flags+=("--role-namespace=") - flags+=("--rolebinding-name=") - local_nonpersistent_flags+=("--rolebinding-name=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_policy_add-role-to-user() -{ - last_command="oc_policy_add-role-to-user" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--role-namespace=") - local_nonpersistent_flags+=("--role-namespace=") - flags+=("--rolebinding-name=") - local_nonpersistent_flags+=("--rolebinding-name=") - flags+=("--serviceaccount=") - two_word_flags+=("-z") - local_nonpersistent_flags+=("--serviceaccount=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_policy_remove-group() -{ - last_command="oc_policy_remove-group" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_policy_remove-role-from-group() -{ - last_command="oc_policy_remove-role-from-group" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--role-namespace=") - local_nonpersistent_flags+=("--role-namespace=") - flags+=("--rolebinding-name=") - local_nonpersistent_flags+=("--rolebinding-name=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_policy_remove-role-from-user() -{ - last_command="oc_policy_remove-role-from-user" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--role-namespace=") - local_nonpersistent_flags+=("--role-namespace=") - flags+=("--rolebinding-name=") - local_nonpersistent_flags+=("--rolebinding-name=") - flags+=("--serviceaccount=") - two_word_flags+=("-z") - local_nonpersistent_flags+=("--serviceaccount=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_policy_remove-user() -{ - last_command="oc_policy_remove-user" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_policy_scc-review() -{ - last_command="oc_policy_scc-review" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--no-headers") - local_nonpersistent_flags+=("--no-headers") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--serviceaccount=") - two_word_flags+=("-z") - local_nonpersistent_flags+=("--serviceaccount=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_policy_scc-subject-review() -{ - last_command="oc_policy_scc-subject-review" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--groups=") - two_word_flags+=("-g") - local_nonpersistent_flags+=("--groups=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--no-headers") - local_nonpersistent_flags+=("--no-headers") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--serviceaccount=") - two_word_flags+=("-z") - local_nonpersistent_flags+=("--serviceaccount=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_policy_who-can() -{ - last_command="oc_policy_who-can" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all-namespaces") - flags+=("-A") - local_nonpersistent_flags+=("--all-namespaces") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_policy() -{ - last_command="oc_policy" - commands=() - commands+=("add-role-to-group") - commands+=("add-role-to-user") - commands+=("remove-group") - commands+=("remove-role-from-group") - commands+=("remove-role-from-user") - commands+=("remove-user") - commands+=("scc-review") - commands+=("scc-subject-review") - commands+=("who-can") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_port-forward() -{ - last_command="oc_port-forward" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--address=") - local_nonpersistent_flags+=("--address=") - flags+=("--pod-running-timeout=") - local_nonpersistent_flags+=("--pod-running-timeout=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_process() -{ - last_command="oc_process" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag yaml|yml|json") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag yaml|yml|json") - local_nonpersistent_flags+=("--filename=") - flags+=("--ignore-unknown-parameters") - local_nonpersistent_flags+=("--ignore-unknown-parameters") - flags+=("--labels=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--labels=") - flags+=("--local") - local_nonpersistent_flags+=("--local") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--param=") - two_word_flags+=("-p") - local_nonpersistent_flags+=("--param=") - flags+=("--param-file=") - flags_with_completion+=("--param-file") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--param-file=") - flags+=("--parameters") - local_nonpersistent_flags+=("--parameters") - flags+=("--raw") - local_nonpersistent_flags+=("--raw") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - two_word_flags+=("-t") - flags_with_completion+=("-t") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_project() -{ - last_command="oc_project" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--short") - flags+=("-q") - local_nonpersistent_flags+=("--short") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_projects() -{ - last_command="oc_projects" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--short") - flags+=("-q") - local_nonpersistent_flags+=("--short") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_proxy() -{ - last_command="oc_proxy" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--accept-hosts=") - local_nonpersistent_flags+=("--accept-hosts=") - flags+=("--accept-paths=") - local_nonpersistent_flags+=("--accept-paths=") - flags+=("--address=") - local_nonpersistent_flags+=("--address=") - flags+=("--api-prefix=") - local_nonpersistent_flags+=("--api-prefix=") - flags+=("--disable-filter") - local_nonpersistent_flags+=("--disable-filter") - flags+=("--keepalive=") - local_nonpersistent_flags+=("--keepalive=") - flags+=("--port=") - two_word_flags+=("-p") - local_nonpersistent_flags+=("--port=") - flags+=("--reject-methods=") - local_nonpersistent_flags+=("--reject-methods=") - flags+=("--reject-paths=") - local_nonpersistent_flags+=("--reject-paths=") - flags+=("--unix-socket=") - two_word_flags+=("-u") - local_nonpersistent_flags+=("--unix-socket=") - flags+=("--www=") - two_word_flags+=("-w") - local_nonpersistent_flags+=("--www=") - flags+=("--www-prefix=") - two_word_flags+=("-P") - local_nonpersistent_flags+=("--www-prefix=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_registry_info() -{ - last_command="oc_registry_info" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--check") - local_nonpersistent_flags+=("--check") - flags+=("--internal") - local_nonpersistent_flags+=("--internal") - flags+=("--public") - local_nonpersistent_flags+=("--public") - flags+=("--quiet") - flags+=("-q") - local_nonpersistent_flags+=("--quiet") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_registry_login() -{ - last_command="oc_registry_login" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--insecure") - local_nonpersistent_flags+=("--insecure") - flags+=("--registry=") - local_nonpersistent_flags+=("--registry=") - flags+=("--registry-config=") - two_word_flags+=("-a") - local_nonpersistent_flags+=("--registry-config=") - flags+=("--service-account=") - two_word_flags+=("-z") - local_nonpersistent_flags+=("--service-account=") - flags+=("--skip-check") - local_nonpersistent_flags+=("--skip-check") - flags+=("--to=") - local_nonpersistent_flags+=("--to=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_registry() -{ - last_command="oc_registry" - commands=() - commands+=("info") - commands+=("login") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_replace() -{ - last_command="oc_replace" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--cascade") - local_nonpersistent_flags+=("--cascade") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--force") - local_nonpersistent_flags+=("--force") - flags+=("--grace-period=") - local_nonpersistent_flags+=("--grace-period=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--timeout=") - local_nonpersistent_flags+=("--timeout=") - flags+=("--validate") - local_nonpersistent_flags+=("--validate") - flags+=("--wait") - local_nonpersistent_flags+=("--wait") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_rollback() -{ - last_command="oc_rollback" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--change-scaling-settings") - local_nonpersistent_flags+=("--change-scaling-settings") - flags+=("--change-strategy") - local_nonpersistent_flags+=("--change-strategy") - flags+=("--change-triggers") - local_nonpersistent_flags+=("--change-triggers") - flags+=("--dry-run") - flags+=("-d") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--to-version=") - local_nonpersistent_flags+=("--to-version=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_rollout_cancel() -{ - last_command="oc_rollout_cancel" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - must_have_one_noun+=("deploymentconfig") - noun_aliases=() -} - -_oc_rollout_history() -{ - last_command="oc_rollout_history" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--revision=") - local_nonpersistent_flags+=("--revision=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - must_have_one_noun+=("daemonset") - must_have_one_noun+=("deployment") - must_have_one_noun+=("deploymentconfig") - must_have_one_noun+=("statefulset") - noun_aliases=() -} - -_oc_rollout_latest() -{ - last_command="oc_rollout_latest" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--again") - local_nonpersistent_flags+=("--again") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_rollout_pause() -{ - last_command="oc_rollout_pause" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - must_have_one_noun+=("deployment") - must_have_one_noun+=("deploymentconfig") - noun_aliases=() -} - -_oc_rollout_resume() -{ - last_command="oc_rollout_resume" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - must_have_one_noun+=("deployment") - must_have_one_noun+=("deploymentconfig") - noun_aliases=() -} - -_oc_rollout_retry() -{ - last_command="oc_rollout_retry" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_rollout_status() -{ - last_command="oc_rollout_status" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--revision=") - local_nonpersistent_flags+=("--revision=") - flags+=("--timeout=") - local_nonpersistent_flags+=("--timeout=") - flags+=("--watch") - flags+=("-w") - local_nonpersistent_flags+=("--watch") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - must_have_one_noun+=("daemonset") - must_have_one_noun+=("deployment") - must_have_one_noun+=("statefulset") - noun_aliases=() -} - -_oc_rollout_undo() -{ - last_command="oc_rollout_undo" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--to-revision=") - local_nonpersistent_flags+=("--to-revision=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - must_have_one_noun+=("daemonset") - must_have_one_noun+=("deployment") - must_have_one_noun+=("deploymentconfig") - must_have_one_noun+=("statefulset") - noun_aliases=() -} - -_oc_rollout() -{ - last_command="oc_rollout" - commands=() - commands+=("cancel") - commands+=("history") - commands+=("latest") - commands+=("pause") - commands+=("resume") - commands+=("retry") - commands+=("status") - commands+=("undo") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_rsh() -{ - last_command="oc_rsh" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--container=") - two_word_flags+=("-c") - local_nonpersistent_flags+=("--container=") - flags+=("--no-tty") - flags+=("-T") - local_nonpersistent_flags+=("--no-tty") - flags+=("--shell=") - local_nonpersistent_flags+=("--shell=") - flags+=("--timeout=") - local_nonpersistent_flags+=("--timeout=") - flags+=("--tty") - flags+=("-t") - local_nonpersistent_flags+=("--tty") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_rsync() -{ - last_command="oc_rsync" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--compress") - local_nonpersistent_flags+=("--compress") - flags+=("--container=") - two_word_flags+=("-c") - local_nonpersistent_flags+=("--container=") - flags+=("--delete") - local_nonpersistent_flags+=("--delete") - flags+=("--exclude=") - local_nonpersistent_flags+=("--exclude=") - flags+=("--include=") - local_nonpersistent_flags+=("--include=") - flags+=("--no-perms") - local_nonpersistent_flags+=("--no-perms") - flags+=("--progress") - local_nonpersistent_flags+=("--progress") - flags+=("--quiet") - flags+=("-q") - local_nonpersistent_flags+=("--quiet") - flags+=("--strategy=") - local_nonpersistent_flags+=("--strategy=") - flags+=("--watch") - flags+=("-w") - local_nonpersistent_flags+=("--watch") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_run() -{ - last_command="oc_run" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--attach") - local_nonpersistent_flags+=("--attach") - flags+=("--cascade") - local_nonpersistent_flags+=("--cascade") - flags+=("--command") - local_nonpersistent_flags+=("--command") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--env=") - local_nonpersistent_flags+=("--env=") - flags+=("--expose") - local_nonpersistent_flags+=("--expose") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--force") - local_nonpersistent_flags+=("--force") - flags+=("--generator=") - local_nonpersistent_flags+=("--generator=") - flags+=("--grace-period=") - local_nonpersistent_flags+=("--grace-period=") - flags+=("--hostport=") - local_nonpersistent_flags+=("--hostport=") - flags+=("--image=") - local_nonpersistent_flags+=("--image=") - flags+=("--image-pull-policy=") - local_nonpersistent_flags+=("--image-pull-policy=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--labels=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--labels=") - flags+=("--leave-stdin-open") - local_nonpersistent_flags+=("--leave-stdin-open") - flags+=("--limits=") - local_nonpersistent_flags+=("--limits=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--overrides=") - local_nonpersistent_flags+=("--overrides=") - flags+=("--pod-running-timeout=") - local_nonpersistent_flags+=("--pod-running-timeout=") - flags+=("--port=") - local_nonpersistent_flags+=("--port=") - flags+=("--quiet") - local_nonpersistent_flags+=("--quiet") - flags+=("--record") - local_nonpersistent_flags+=("--record") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--replicas=") - two_word_flags+=("-r") - local_nonpersistent_flags+=("--replicas=") - flags+=("--requests=") - local_nonpersistent_flags+=("--requests=") - flags+=("--restart=") - local_nonpersistent_flags+=("--restart=") - flags+=("--rm") - local_nonpersistent_flags+=("--rm") - flags+=("--save-config") - local_nonpersistent_flags+=("--save-config") - flags+=("--schedule=") - local_nonpersistent_flags+=("--schedule=") - flags+=("--service-generator=") - local_nonpersistent_flags+=("--service-generator=") - flags+=("--service-overrides=") - local_nonpersistent_flags+=("--service-overrides=") - flags+=("--serviceaccount=") - local_nonpersistent_flags+=("--serviceaccount=") - flags+=("--stdin") - flags+=("-i") - local_nonpersistent_flags+=("--stdin") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--timeout=") - local_nonpersistent_flags+=("--timeout=") - flags+=("--tty") - flags+=("-t") - local_nonpersistent_flags+=("--tty") - flags+=("--wait") - local_nonpersistent_flags+=("--wait") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_flag+=("--image=") - must_have_one_noun=() - noun_aliases=() -} - -_oc_scale() -{ - last_command="oc_scale" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--current-replicas=") - local_nonpersistent_flags+=("--current-replicas=") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--record") - local_nonpersistent_flags+=("--record") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--replicas=") - local_nonpersistent_flags+=("--replicas=") - flags+=("--resource-version=") - local_nonpersistent_flags+=("--resource-version=") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--timeout=") - local_nonpersistent_flags+=("--timeout=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_flag+=("--replicas=") - must_have_one_noun=() - must_have_one_noun+=("deployment") - must_have_one_noun+=("deploymentconfig") - must_have_one_noun+=("replicaset") - must_have_one_noun+=("replicationcontroller") - must_have_one_noun+=("statefulset") - noun_aliases=() -} - -_oc_secrets_add() -{ - last_command="oc_secrets_add" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--for=") - local_nonpersistent_flags+=("--for=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_secrets_link() -{ - last_command="oc_secrets_link" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--for=") - local_nonpersistent_flags+=("--for=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_secrets_unlink() -{ - last_command="oc_secrets_unlink" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_secrets() -{ - last_command="oc_secrets" - commands=() - commands+=("add") - commands+=("link") - commands+=("unlink") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_serviceaccounts_create-kubeconfig() -{ - last_command="oc_serviceaccounts_create-kubeconfig" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--with-namespace=") - local_nonpersistent_flags+=("--with-namespace=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_serviceaccounts_get-token() -{ - last_command="oc_serviceaccounts_get-token" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_serviceaccounts_new-token() -{ - last_command="oc_serviceaccounts_new-token" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--labels=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--labels=") - flags+=("--timeout=") - local_nonpersistent_flags+=("--timeout=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_serviceaccounts() -{ - last_command="oc_serviceaccounts" - commands=() - commands+=("create-kubeconfig") - commands+=("get-token") - commands+=("new-token") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_set_build-hook() -{ - last_command="oc_set_build-hook" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--command") - local_nonpersistent_flags+=("--command") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--local") - local_nonpersistent_flags+=("--local") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--post-commit") - local_nonpersistent_flags+=("--post-commit") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--remove") - local_nonpersistent_flags+=("--remove") - flags+=("--script=") - local_nonpersistent_flags+=("--script=") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_set_build-secret() -{ - last_command="oc_set_build-secret" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--local") - local_nonpersistent_flags+=("--local") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--pull") - local_nonpersistent_flags+=("--pull") - flags+=("--push") - local_nonpersistent_flags+=("--push") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--remove") - local_nonpersistent_flags+=("--remove") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--source") - local_nonpersistent_flags+=("--source") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_set_deployment-hook() -{ - last_command="oc_set_deployment-hook" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--container=") - two_word_flags+=("-c") - local_nonpersistent_flags+=("--container=") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--environment=") - two_word_flags+=("-e") - local_nonpersistent_flags+=("--environment=") - flags+=("--failure-policy=") - local_nonpersistent_flags+=("--failure-policy=") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--local") - local_nonpersistent_flags+=("--local") - flags+=("--mid") - local_nonpersistent_flags+=("--mid") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--post") - local_nonpersistent_flags+=("--post") - flags+=("--pre") - local_nonpersistent_flags+=("--pre") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--remove") - local_nonpersistent_flags+=("--remove") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--volumes=") - local_nonpersistent_flags+=("--volumes=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_set_env() -{ - last_command="oc_set_env" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--containers=") - two_word_flags+=("-c") - local_nonpersistent_flags+=("--containers=") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--env=") - two_word_flags+=("-e") - local_nonpersistent_flags+=("--env=") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--from=") - local_nonpersistent_flags+=("--from=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--list") - local_nonpersistent_flags+=("--list") - flags+=("--local") - local_nonpersistent_flags+=("--local") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--overwrite") - local_nonpersistent_flags+=("--overwrite") - flags+=("--prefix=") - local_nonpersistent_flags+=("--prefix=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--resolve") - local_nonpersistent_flags+=("--resolve") - flags+=("--resource-version=") - local_nonpersistent_flags+=("--resource-version=") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_set_image() -{ - last_command="oc_set_image" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--local") - local_nonpersistent_flags+=("--local") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--record") - local_nonpersistent_flags+=("--record") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--source=") - local_nonpersistent_flags+=("--source=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_set_image-lookup() -{ - last_command="oc_set_image-lookup" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--enabled") - local_nonpersistent_flags+=("--enabled") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--list") - local_nonpersistent_flags+=("--list") - flags+=("--local") - local_nonpersistent_flags+=("--local") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_set_probe() -{ - last_command="oc_set_probe" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--containers=") - two_word_flags+=("-c") - local_nonpersistent_flags+=("--containers=") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--failure-threshold=") - local_nonpersistent_flags+=("--failure-threshold=") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--get-url=") - local_nonpersistent_flags+=("--get-url=") - flags+=("--initial-delay-seconds=") - local_nonpersistent_flags+=("--initial-delay-seconds=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--liveness") - local_nonpersistent_flags+=("--liveness") - flags+=("--local") - local_nonpersistent_flags+=("--local") - flags+=("--open-tcp=") - local_nonpersistent_flags+=("--open-tcp=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--period-seconds=") - local_nonpersistent_flags+=("--period-seconds=") - flags+=("--readiness") - local_nonpersistent_flags+=("--readiness") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--remove") - local_nonpersistent_flags+=("--remove") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--success-threshold=") - local_nonpersistent_flags+=("--success-threshold=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--timeout-seconds=") - local_nonpersistent_flags+=("--timeout-seconds=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_set_resources() -{ - last_command="oc_set_resources" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--containers=") - two_word_flags+=("-c") - local_nonpersistent_flags+=("--containers=") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--limits=") - local_nonpersistent_flags+=("--limits=") - flags+=("--local") - local_nonpersistent_flags+=("--local") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--record") - local_nonpersistent_flags+=("--record") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--requests=") - local_nonpersistent_flags+=("--requests=") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_set_route-backends() -{ - last_command="oc_set_route-backends" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--adjust") - local_nonpersistent_flags+=("--adjust") - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--equal") - local_nonpersistent_flags+=("--equal") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--local") - local_nonpersistent_flags+=("--local") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--zero") - local_nonpersistent_flags+=("--zero") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_set_selector() -{ - last_command="oc_set_selector" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--local") - local_nonpersistent_flags+=("--local") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--record") - local_nonpersistent_flags+=("--record") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--resource-version=") - local_nonpersistent_flags+=("--resource-version=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_set_serviceaccount() -{ - last_command="oc_set_serviceaccount" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--local") - local_nonpersistent_flags+=("--local") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--record") - local_nonpersistent_flags+=("--record") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_set_subject() -{ - last_command="oc_set_subject" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--group=") - local_nonpersistent_flags+=("--group=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--local") - local_nonpersistent_flags+=("--local") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--serviceaccount=") - local_nonpersistent_flags+=("--serviceaccount=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_set_triggers() -{ - last_command="oc_set_triggers" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--auto") - local_nonpersistent_flags+=("--auto") - flags+=("--containers=") - two_word_flags+=("-c") - local_nonpersistent_flags+=("--containers=") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--from-bitbucket") - local_nonpersistent_flags+=("--from-bitbucket") - flags+=("--from-config") - local_nonpersistent_flags+=("--from-config") - flags+=("--from-github") - local_nonpersistent_flags+=("--from-github") - flags+=("--from-gitlab") - local_nonpersistent_flags+=("--from-gitlab") - flags+=("--from-image=") - local_nonpersistent_flags+=("--from-image=") - flags+=("--from-webhook") - local_nonpersistent_flags+=("--from-webhook") - flags+=("--from-webhook-allow-env") - local_nonpersistent_flags+=("--from-webhook-allow-env") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--local") - local_nonpersistent_flags+=("--local") - flags+=("--manual") - local_nonpersistent_flags+=("--manual") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--remove") - local_nonpersistent_flags+=("--remove") - flags+=("--remove-all") - local_nonpersistent_flags+=("--remove-all") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_set_volumes() -{ - last_command="oc_set_volumes" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--add") - local_nonpersistent_flags+=("--add") - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--claim-class=") - local_nonpersistent_flags+=("--claim-class=") - flags+=("--claim-mode=") - local_nonpersistent_flags+=("--claim-mode=") - flags+=("--claim-name=") - local_nonpersistent_flags+=("--claim-name=") - flags+=("--claim-size=") - local_nonpersistent_flags+=("--claim-size=") - flags+=("--configmap-name=") - local_nonpersistent_flags+=("--configmap-name=") - flags+=("--confirm") - local_nonpersistent_flags+=("--confirm") - flags+=("--containers=") - two_word_flags+=("-c") - local_nonpersistent_flags+=("--containers=") - flags+=("--default-mode=") - local_nonpersistent_flags+=("--default-mode=") - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--kustomize=") - two_word_flags+=("-k") - local_nonpersistent_flags+=("--kustomize=") - flags+=("--local") - local_nonpersistent_flags+=("--local") - flags+=("--mount-path=") - two_word_flags+=("-m") - local_nonpersistent_flags+=("--mount-path=") - flags+=("--name=") - local_nonpersistent_flags+=("--name=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--overwrite") - local_nonpersistent_flags+=("--overwrite") - flags+=("--path=") - local_nonpersistent_flags+=("--path=") - flags+=("--read-only") - local_nonpersistent_flags+=("--read-only") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--remove") - local_nonpersistent_flags+=("--remove") - flags+=("--secret-name=") - local_nonpersistent_flags+=("--secret-name=") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--source=") - local_nonpersistent_flags+=("--source=") - flags+=("--sub-path=") - local_nonpersistent_flags+=("--sub-path=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--type=") - two_word_flags+=("-t") - local_nonpersistent_flags+=("--type=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_set() -{ - last_command="oc_set" - commands=() - commands+=("build-hook") - commands+=("build-secret") - commands+=("deployment-hook") - commands+=("env") - commands+=("image") - commands+=("image-lookup") - commands+=("probe") - commands+=("resources") - commands+=("route-backends") - commands+=("selector") - commands+=("serviceaccount") - commands+=("subject") - commands+=("triggers") - commands+=("volumes") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_start-build() -{ - last_command="oc_start-build" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--build-arg=") - local_nonpersistent_flags+=("--build-arg=") - flags+=("--build-loglevel=") - local_nonpersistent_flags+=("--build-loglevel=") - flags+=("--commit=") - local_nonpersistent_flags+=("--commit=") - flags+=("--env=") - two_word_flags+=("-e") - local_nonpersistent_flags+=("--env=") - flags+=("--follow") - flags+=("-F") - local_nonpersistent_flags+=("--follow") - flags+=("--from-archive=") - local_nonpersistent_flags+=("--from-archive=") - flags+=("--from-build=") - local_nonpersistent_flags+=("--from-build=") - flags+=("--from-dir=") - local_nonpersistent_flags+=("--from-dir=") - flags+=("--from-file=") - local_nonpersistent_flags+=("--from-file=") - flags+=("--from-repo=") - local_nonpersistent_flags+=("--from-repo=") - flags+=("--from-webhook=") - local_nonpersistent_flags+=("--from-webhook=") - flags+=("--git-post-receive=") - local_nonpersistent_flags+=("--git-post-receive=") - flags+=("--git-repository=") - local_nonpersistent_flags+=("--git-repository=") - flags+=("--incremental") - local_nonpersistent_flags+=("--incremental") - flags+=("--list-webhooks=") - local_nonpersistent_flags+=("--list-webhooks=") - flags+=("--no-cache") - local_nonpersistent_flags+=("--no-cache") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--wait") - flags+=("-w") - local_nonpersistent_flags+=("--wait") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_status() -{ - last_command="oc_status" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all-namespaces") - flags+=("-A") - local_nonpersistent_flags+=("--all-namespaces") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--suggest") - local_nonpersistent_flags+=("--suggest") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_tag() -{ - last_command="oc_tag" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--alias") - local_nonpersistent_flags+=("--alias") - flags+=("--delete") - flags+=("-d") - local_nonpersistent_flags+=("--delete") - flags+=("--insecure") - local_nonpersistent_flags+=("--insecure") - flags+=("--reference") - local_nonpersistent_flags+=("--reference") - flags+=("--reference-policy=") - local_nonpersistent_flags+=("--reference-policy=") - flags+=("--scheduled") - local_nonpersistent_flags+=("--scheduled") - flags+=("--source=") - local_nonpersistent_flags+=("--source=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_version() -{ - last_command="oc_version" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--client") - local_nonpersistent_flags+=("--client") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--short") - local_nonpersistent_flags+=("--short") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_wait() -{ - last_command="oc_wait" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--all-namespaces") - flags+=("-A") - local_nonpersistent_flags+=("--all-namespaces") - flags+=("--allow-missing-template-keys") - local_nonpersistent_flags+=("--allow-missing-template-keys") - flags+=("--field-selector=") - local_nonpersistent_flags+=("--field-selector=") - flags+=("--filename=") - flags_with_completion+=("--filename") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - two_word_flags+=("-f") - flags_with_completion+=("-f") - flags_completion+=("__oc_handle_filename_extension_flag json|yaml|yml") - local_nonpersistent_flags+=("--filename=") - flags+=("--for=") - local_nonpersistent_flags+=("--for=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--recursive") - flags+=("-R") - local_nonpersistent_flags+=("--recursive") - flags+=("--selector=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--selector=") - flags+=("--template=") - flags_with_completion+=("--template") - flags_completion+=("_filedir") - local_nonpersistent_flags+=("--template=") - flags+=("--timeout=") - local_nonpersistent_flags+=("--timeout=") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_whoami() -{ - last_command="oc_whoami" - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--show-console") - local_nonpersistent_flags+=("--show-console") - flags+=("--show-context") - flags+=("-c") - local_nonpersistent_flags+=("--show-context") - flags+=("--show-server") - local_nonpersistent_flags+=("--show-server") - flags+=("--show-token") - flags+=("-t") - local_nonpersistent_flags+=("--show-token") - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_oc_root_command() -{ - last_command="oc" - commands=() - commands+=("adm") - commands+=("annotate") - commands+=("api-resources") - commands+=("api-versions") - commands+=("apply") - commands+=("attach") - commands+=("auth") - commands+=("autoscale") - commands+=("cancel-build") - commands+=("cluster-info") - commands+=("completion") - commands+=("config") - commands+=("convert") - commands+=("cp") - commands+=("create") - commands+=("debug") - commands+=("delete") - commands+=("describe") - commands+=("diff") - commands+=("edit") - commands+=("ex") - commands+=("exec") - commands+=("explain") - commands+=("expose") - commands+=("extract") - commands+=("get") - commands+=("idle") - commands+=("image") - commands+=("import-image") - commands+=("kustomize") - commands+=("label") - commands+=("login") - commands+=("logout") - commands+=("logs") - commands+=("new-app") - commands+=("new-build") - commands+=("new-project") - commands+=("observe") - commands+=("options") - commands+=("patch") - commands+=("plugin") - commands+=("policy") - commands+=("port-forward") - commands+=("process") - commands+=("project") - commands+=("projects") - commands+=("proxy") - commands+=("registry") - commands+=("replace") - commands+=("rollback") - commands+=("rollout") - commands+=("rsh") - commands+=("rsync") - commands+=("run") - commands+=("scale") - commands+=("secrets") - commands+=("serviceaccounts") - commands+=("set") - commands+=("start-build") - commands+=("status") - commands+=("tag") - commands+=("version") - commands+=("wait") - commands+=("whoami") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--as=") - flags+=("--as-group=") - flags+=("--cache-dir=") - flags+=("--certificate-authority=") - flags+=("--client-certificate=") - flags+=("--client-key=") - flags+=("--cluster=") - flags+=("--config=") - flags+=("--context=") - flags+=("--insecure-skip-tls-verify") - flags+=("--kubeconfig=") - flags+=("--match-server-version") - flags+=("--namespace=") - flags_with_completion+=("--namespace") - flags_completion+=("__oc_get_namespaces") - two_word_flags+=("-n") - flags_with_completion+=("-n") - flags_completion+=("__oc_get_namespaces") - flags+=("--request-timeout=") - flags+=("--server=") - two_word_flags+=("-s") - flags+=("--token=") - flags+=("--user=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -__start_oc() -{ - local cur prev words cword - declare -A flaghash 2>/dev/null || : - if declare -F _init_completion >/dev/null 2>&1; then - _init_completion -s || return - else - __oc_init_completion -n "=" || return - fi - - local c=0 - local flags=() - local two_word_flags=() - local local_nonpersistent_flags=() - local flags_with_completion=() - local flags_completion=() - local commands=("oc") - local must_have_one_flag=() - local must_have_one_noun=() - local last_command - local nouns=() - - __oc_handle_word -} - -if [[ $(type -t compopt) = "builtin" ]]; then - complete -o default -F __start_oc oc -else - complete -o default -o nospace -F __start_oc oc -fi - -# ex: ts=4 sw=4 et filetype=sh - -BASH_COMPLETION_EOF -} - -__kubectl_bash_source <(__kubectl_convert_bash_to_zsh) -_complete kubectl 2>/dev/null diff --git a/vendor/github.com/openshift/oc/glide.lock b/vendor/github.com/openshift/oc/glide.lock deleted file mode 100644 index 9fafadd2c893..000000000000 --- a/vendor/github.com/openshift/oc/glide.lock +++ /dev/null @@ -1,1305 +0,0 @@ -hash: 133600678bfced8f947d0b7797aa19e87a4b7cb279eab1c96799fd17aac36ec3 -updated: 2019-08-01T17:39:30.516109233+02:00 -imports: -- name: bitbucket.org/ww/goautoneg - version: 2ae31c8b6b30d2f4c8100c20d527b571e9c433bb - repo: https://github.com/munnerz/goautoneg.git -- name: github.com/alexbrainman/sspi - version: e580b900e9f5657daa5473021296289be6da2661 - subpackages: - - negotiate -- name: github.com/apcera/gssapi - version: 5fb4217df13b8e6878046fe1e5c10e560e1b86dc -- name: github.com/aws/aws-sdk-go - version: 81f3829f5a9d041041bdf56e55926691309d7699 - subpackages: - - aws - - aws/awserr - - aws/awsutil - - aws/client - - aws/client/metadata - - aws/corehandlers - - aws/credentials - - aws/credentials/ec2rolecreds - - aws/credentials/endpointcreds - - aws/credentials/processcreds - - aws/credentials/stscreds - - aws/csm - - aws/defaults - - aws/ec2metadata - - aws/endpoints - - aws/request - - aws/session - - aws/signer/v4 - - internal/ini - - internal/s3err - - internal/sdkio - - internal/sdkrand - - internal/sdkuri - - internal/shareddefaults - - private/protocol - - private/protocol/ec2query - - private/protocol/eventstream - - private/protocol/eventstream/eventstreamapi - - private/protocol/json/jsonutil - - private/protocol/jsonrpc - - private/protocol/query - - private/protocol/query/queryutil - - private/protocol/rest - - private/protocol/restxml - - private/protocol/xml/xmlutil - - service/autoscaling - - service/ec2 - - service/ecr - - service/elb - - service/elbv2 - - service/kms - - service/s3 - - service/s3/s3iface - - service/s3/s3manager - - service/sts -- name: github.com/Azure/go-ansiterm - version: d6e3b3328b783f23731bc4d058875b0371ff8109 - subpackages: - - winterm -- name: github.com/beorn7/perks - version: 3ac7bf7a47d159a033b107610db8a1b6575507a4 - subpackages: - - quantile -- name: github.com/blang/semver - version: b38d23b8782a487059e8fc8773e9a5b228a77cb6 -- name: github.com/certifi/gocertifi - version: ee1a9a0726d2ae45f54118cac878c990d4016ded -- name: github.com/chai2010/gettext-go - version: c6fed771bfd517099caf0f7a961671fa8ed08723 - subpackages: - - gettext - - gettext/mo - - gettext/plural - - gettext/po -- name: github.com/containerd/continuity - version: aaeac12a7ffcd198ae25440a9dff125c2e2703a7 - subpackages: - - pathdriver -- name: github.com/containers/image - version: 4bc6d24282b115f8b61a6d08470ed42ac7c91392 - repo: https://github.com/openshift/containers-image.git - subpackages: - - docker/policyconfiguration - - docker/reference - - manifest - - signature - - transports - - types - - version -- name: github.com/containers/storage - version: 1b2a0dcaf4e74170644c851ff75b8637dbd2af68 - subpackages: - - pkg/fileutils - - pkg/homedir - - pkg/idtools - - pkg/mount - - pkg/system -- name: github.com/coreos/bbolt - version: 48ea1b39c25fc1bab3506fbc712ecbaa842c4d2d -- name: github.com/coreos/etcd - version: 27fc7e2296f506182f58ce846e48f36b34fe6842 - subpackages: - - auth/authpb - - clientv3 - - etcdserver/api/v3rpc/rpctypes - - etcdserver/etcdserverpb - - mvcc/mvccpb - - pkg/tlsutil - - pkg/transport - - pkg/types -- name: github.com/davecgh/go-spew - version: 782f4967f2dc4564575ca782fe2d04090b5faca8 - subpackages: - - spew -- name: github.com/daviddengcn/go-colortext - version: 511bcaf42ccd42c38aba7427b6673277bf19e2a1 -- name: github.com/docker/distribution - version: d4c35485a70df4dce2179bc227b1393a69edb809 - repo: https://github.com/openshift/docker-distribution.git - subpackages: - - digestset - - manifest - - manifest/manifestlist - - manifest/schema1 - - manifest/schema2 - - metrics - - reference - - registry/api/errcode - - registry/api/v2 - - registry/client - - registry/client/auth - - registry/client/auth/challenge - - registry/client/transport - - registry/storage/cache - - registry/storage/cache/memory -- name: github.com/docker/docker - version: a9fbbdc8dd8794b20af358382ab780559bca589d - subpackages: - - api - - api/types - - api/types/blkiodev - - api/types/container - - api/types/events - - api/types/filters - - api/types/image - - api/types/mount - - api/types/network - - api/types/registry - - api/types/strslice - - api/types/swarm - - api/types/swarm/runtime - - api/types/time - - api/types/versions - - api/types/volume - - client - - opts - - pkg/archive - - pkg/fileutils - - pkg/homedir - - pkg/idtools - - pkg/ioutils - - pkg/jsonmessage - - pkg/longpath - - pkg/mount - - pkg/pools - - pkg/stdcopy - - pkg/system - - pkg/term - - pkg/term/windows -- name: github.com/docker/go-connections - version: 3ede32e2033de7505e6500d6c868c2b9ed9f169d - subpackages: - - nat - - sockets - - tlsconfig -- name: github.com/docker/go-metrics - version: b84716841b82eab644a0c64fc8b42d480e49add5 -- name: github.com/docker/go-units - version: 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1 -- name: github.com/docker/libnetwork - version: a9cd636e37898226332c439363e2ed0ea185ae92 - subpackages: - - ipamutils - - ipvs -- name: github.com/docker/libtrust - version: aabc10ec26b754e797f9028f4589c5b7bd90dc20 -- name: github.com/docker/spdystream - version: 449fdfce4d962303d702fec724ef0ad181c92528 - subpackages: - - spdy -- name: github.com/emicklei/go-restful - version: ff4f55a206334ef123e4f79bbf348980da81ca46 - subpackages: - - log -- name: github.com/evanphx/json-patch - version: 5858425f75500d40c52783dce87d085a483ce135 -- name: github.com/exponent-io/jsonpath - version: d6023ce2651d8eafb5c75bb0c7167536102ec9f5 -- name: github.com/fatih/camelcase - version: f6a740d52f961c60348ebb109adde9f4635d7540 -- name: github.com/fsnotify/fsnotify - version: c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9 -- name: github.com/fsouza/go-dockerclient - version: da3951ba2e9e02bc0e7642150b3e265aed7e1df3 -- name: github.com/getsentry/raven-go - version: c977f96e109525a5d8fa10a19165341f601f38b0 -- name: github.com/ghodss/yaml - version: 73d445a93680fa1a78ae23a5839bad48f32ba1ee -- name: github.com/go-openapi/jsonpointer - version: ef5f0afec364d3b9396b7b77b43dbe26bf1f8004 -- name: github.com/go-openapi/jsonreference - version: 8483a886a90412cd6858df4ea3483dce9c8e35a3 -- name: github.com/go-openapi/loads - version: a80dea3052f00e5f032e860dd7355cd0cc67e24d -- name: github.com/go-openapi/spec - version: 5bae59e25b21498baea7f9d46e9c147ec106a42e -- name: github.com/go-openapi/swag - version: 5899d5c5e619fda5fa86e14795a835f473ca284c -- name: github.com/gogo/protobuf - version: 342cbe0a04158f6dcb03ca0079991a51a4248c02 - subpackages: - - gogoproto - - proto - - protoc-gen-gogo/descriptor - - sortkeys -- name: github.com/golang/glog - version: 3c92600d7533018d216b534fe894ad60a1e6d5bf - repo: https://github.com/openshift/golang-glog.git -- name: github.com/golang/groupcache - version: 02826c3e79038b59d737d3b1c0a1d937f71a4433 - subpackages: - - lru -- name: github.com/golang/protobuf - version: b4deda0973fb4c70b50d226b1af49f3da59f5265 - subpackages: - - proto - - ptypes - - ptypes/any - - ptypes/duration - - ptypes/timestamp -- name: github.com/gonum/blas - version: 37e82626499e1df7c54aeaba0959fd6e7e8dc1e4 - subpackages: - - blas64 - - native - - native/internal/math32 -- name: github.com/gonum/floats - version: f74b330d45c56584a6ea7a27f5c64ea2900631e9 -- name: github.com/gonum/graph - version: 50b27dea7ebbfb052dfaf91681afc6fde28d8796 - subpackages: - - encoding/dot - - formats/dot - - formats/dot/ast - - formats/dot/internal/astx - - formats/dot/internal/errors - - formats/dot/internal/lexer - - formats/dot/internal/parser - - formats/dot/internal/token - - internal/linear - - internal/ordered - - internal/set - - path - - simple - - topo - - traverse -- name: github.com/gonum/internal - version: e57e4534cf9b3b00ef6c0175f59d8d2d34f60914 - subpackages: - - asm/f32 - - asm/f64 -- name: github.com/gonum/lapack - version: 5ed4b826becd1807e09377508f51756586d1a98c - subpackages: - - lapack64 - - native -- name: github.com/gonum/matrix - version: dd6034299e4242c9f0ea36735e6d4264dfcb3f9f - subpackages: - - mat64 -- name: github.com/google/btree - version: 20236160a414454a9c64b6c8829381c6f4bddcaa -- name: github.com/google/cadvisor - version: 8949c822ea91fa6b4996614a5ad6ade840be24ee -- name: github.com/google/gofuzz - version: 24818f796faf91cd76ec7bddd72458fbced7a6c1 -- name: github.com/google/uuid - version: c2e93f3ae59f2904160ceaab466009f965df46d6 -- name: github.com/googleapis/gnostic - version: 0c5108395e2debce0d731cf0287ddf7242066aba - subpackages: - - OpenAPIv2 - - compiler - - extensions -- name: github.com/gorilla/mux - version: e67b3c02c7195c052acff13261f0c9fd1ba53011 -- name: github.com/gregjones/httpcache - version: 787624de3eb7bd915c329cba748687a3b22666a6 - subpackages: - - diskcache -- name: github.com/grpc-ecosystem/grpc-gateway - version: 8cc3a55af3bcf171a1c23a90c4df9cf591706104 -- name: github.com/hashicorp/golang-lru - version: 20f1fb78b0740ba8c3cb143a61e86ba5c8669768 - subpackages: - - simplelru -- name: github.com/imdario/mergo - version: 9316a62528ac99aaecb4e47eadd6dc8aa6533d58 -- name: github.com/inconshreveable/mousetrap - version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 -- name: github.com/jmespath/go-jmespath - version: 0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74 -- name: github.com/joho/godotenv - version: 6d367c18edf6ca7fd004efd6863e4c5728fa858e -- name: github.com/jonboulle/clockwork - version: 72f9bd7c4e0c2a40055ab3d0f09654f730cce982 -- name: github.com/json-iterator/go - version: ab8a2e0c74be9d3be70b3184d9acc634935ded82 -- name: github.com/jteeuwen/go-bindata - version: a0ff2567cfb70903282db057e799fd826784d41d -- name: github.com/liggitt/tabwriter - version: 89fcab3d43de07060e4fd4c1547430ed57e87f24 -- name: github.com/lithammer/dedent - version: 8478954c3bc893cf36c5ee7c822266b993a3b3ee -- name: github.com/mailru/easyjson - version: 2f5df55504ebc322e4d52d34df6a1f5b503bf26d - subpackages: - - buffer - - jlexer - - jwriter -- name: github.com/MakeNowJust/heredoc - version: bb23615498cded5e105af4ce27de75b089cbe851 -- name: github.com/matttproud/golang_protobuf_extensions - version: c12348ce28de40eed0136aa2b644d0ee0650e56c - subpackages: - - pbutil -- name: github.com/Microsoft/go-winio - version: 97e4973ce50b2ff5f09635a57e2b88a037aae829 -- name: github.com/miekg/dns - version: 5a2b9fab83ff0f8bfc99684bd5f43a37abe560f1 -- name: github.com/mitchellh/go-wordwrap - version: ad45545899c7b13c020ea92b2072220eefad42b8 -- name: github.com/moby/buildkit - version: c3a857e3fca0a5cadd44ffd886a977559841aeaa - subpackages: - - frontend/dockerfile/command - - frontend/dockerfile/parser -- name: github.com/modern-go/concurrent - version: bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94 -- name: github.com/modern-go/reflect2 - version: 94122c33edd36123c84d5368cfb2b69df93a0ec8 -- name: github.com/mtrmac/gpgme - version: b2432428689ca58c2b8e8dea9449d3295cf96fc9 -- name: github.com/mxk/go-flowrate - version: cca7078d478f8520f85629ad7c68962d31ed7682 - subpackages: - - flowrate -- name: github.com/Nvveen/Gotty - version: cd527374f1e5bff4938207604a14f2e38a9cf512 -- name: github.com/onsi/ginkgo - version: 53ca7dc85f609e8aa3af7902f189ed5dca96dbb5 - repo: https://github.com/openshift/onsi-ginkgo.git -- name: github.com/opencontainers/go-digest - version: ac19fd6e7483ff933754af248d80be865e543d22 -- name: github.com/opencontainers/image-spec - version: 372ad780f63454fbbbbcc7cf80e5b90245c13e13 - subpackages: - - specs-go - - specs-go/v1 -- name: github.com/opencontainers/runc - version: f000fe11ece1b79f744edd9c8e1a53ba0f5e0f24 - subpackages: - - libcontainer - - libcontainer/apparmor - - libcontainer/cgroups - - libcontainer/cgroups/fs - - libcontainer/cgroups/systemd - - libcontainer/configs - - libcontainer/configs/validate - - libcontainer/criurpc - - libcontainer/intelrdt - - libcontainer/keys - - libcontainer/mount - - libcontainer/seccomp - - libcontainer/stacktrace - - libcontainer/system - - libcontainer/user - - libcontainer/utils -- name: github.com/openshift/api - version: 0922aa5a655be314e20a3e0e94f4f2b105100154 - subpackages: - - annotations - - apps - - apps/v1 - - authorization - - authorization/v1 - - build - - build/v1 - - config - - config/v1 - - image - - image/docker10 - - image/dockerpre012 - - image/v1 - - kubecontrolplane - - kubecontrolplane/v1 - - legacyconfig/v1 - - network - - network/v1 - - oauth - - oauth/v1 - - openshiftcontrolplane - - openshiftcontrolplane/v1 - - operator - - operator/v1 - - operator/v1alpha1 - - osin - - osin/v1 - - pkg/serialization - - project - - project/v1 - - quota - - quota/v1 - - route - - route/v1 - - security - - security/v1 - - servicecertsigner - - servicecertsigner/v1alpha1 - - template - - template/v1 - - unidling/v1alpha1 - - user - - user/v1 - - webconsole - - webconsole/v1 -- name: github.com/openshift/client-go - version: a85ea6a6b3a5d2dbe41582ee35695dd4683e1f02 - subpackages: - - apps/clientset/versioned - - apps/clientset/versioned/fake - - apps/clientset/versioned/scheme - - apps/clientset/versioned/typed/apps/v1 - - apps/clientset/versioned/typed/apps/v1/fake - - authorization/clientset/versioned - - authorization/clientset/versioned/fake - - authorization/clientset/versioned/scheme - - authorization/clientset/versioned/typed/authorization/v1 - - authorization/clientset/versioned/typed/authorization/v1/fake - - build/clientset/versioned - - build/clientset/versioned/fake - - build/clientset/versioned/scheme - - build/clientset/versioned/typed/build/v1 - - build/clientset/versioned/typed/build/v1/fake - - config/clientset/versioned - - config/clientset/versioned/scheme - - config/clientset/versioned/typed/config/v1 - - image/clientset/versioned - - image/clientset/versioned/fake - - image/clientset/versioned/scheme - - image/clientset/versioned/typed/image/v1 - - image/clientset/versioned/typed/image/v1/fake - - network/clientset/versioned/scheme - - network/clientset/versioned/typed/network/v1 - - oauth/clientset/versioned - - oauth/clientset/versioned/fake - - oauth/clientset/versioned/scheme - - oauth/clientset/versioned/typed/oauth/v1 - - oauth/clientset/versioned/typed/oauth/v1/fake - - operator/clientset/versioned - - operator/clientset/versioned/scheme - - operator/clientset/versioned/typed/operator/v1 - - operator/clientset/versioned/typed/operator/v1alpha1 - - project/clientset/versioned - - project/clientset/versioned/fake - - project/clientset/versioned/scheme - - project/clientset/versioned/typed/project/v1 - - project/clientset/versioned/typed/project/v1/fake - - quota/clientset/versioned/scheme - - quota/clientset/versioned/typed/quota/v1 - - route/clientset/versioned - - route/clientset/versioned/fake - - route/clientset/versioned/scheme - - route/clientset/versioned/typed/route/v1 - - route/clientset/versioned/typed/route/v1/fake - - security/clientset/versioned - - security/clientset/versioned/fake - - security/clientset/versioned/scheme - - security/clientset/versioned/typed/security/v1 - - security/clientset/versioned/typed/security/v1/fake - - template/clientset/versioned - - template/clientset/versioned/fake - - template/clientset/versioned/scheme - - template/clientset/versioned/typed/template/v1 - - template/clientset/versioned/typed/template/v1/fake - - user/clientset/versioned - - user/clientset/versioned/fake - - user/clientset/versioned/scheme - - user/clientset/versioned/typed/user/v1 - - user/clientset/versioned/typed/user/v1/fake -- name: github.com/openshift/library-go - version: 1810ce5f54ff4a1a4fea05e06024896a4885eb05 - subpackages: - - pkg/apps/appsserialization - - pkg/apps/appsutil - - pkg/authorization/authorizationutil - - pkg/build/buildutil - - pkg/build/envresolve - - pkg/build/naming - - pkg/certs - - pkg/config/client - - pkg/config/helpers - - pkg/config/validation - - pkg/crypto - - pkg/git - - pkg/image/dockerv1client - - pkg/image/imageutil - - pkg/image/internal/digest - - pkg/image/internal/reference - - pkg/image/reference - - pkg/image/referencemutator - - pkg/image/registryclient - - pkg/image/trigger - - pkg/legacyapi/legacygroupification - - pkg/network/networkapihelpers - - pkg/network/networkutils - - pkg/oauth/oauthdiscovery - - pkg/operator/resource/retry - - pkg/security/ldapclient - - pkg/security/ldapquery - - pkg/security/ldaptestclient - - pkg/security/ldaputil - - pkg/serviceability - - pkg/template/generator - - pkg/template/templateprocessing - - pkg/unidling/unidlingclient -- name: github.com/openshift/source-to-image - version: 2a579ecd66dfaf9ee21bbc860fcde8e4d1d12301 - subpackages: - - pkg/api - - pkg/api/constants - - pkg/errors - - pkg/scm/git - - pkg/tar - - pkg/util - - pkg/util/cmd - - pkg/util/cygpath - - pkg/util/fs - - pkg/util/log - - pkg/util/user -- name: github.com/pborman/uuid - version: ca53cad383cad2479bbba7f7a1a05797ec1386e4 -- name: github.com/peterbourgon/diskv - version: 5f041e8faa004a95c88a202771f4cc3e991971e6 -- name: github.com/pkg/errors - version: 645ef00459ed84a119197bfb8d8205042c6df63d -- name: github.com/pkg/profile - version: f6fe06335df110bcf1ed6d4e852b760bfc15beee -- name: github.com/prometheus/client_golang - version: 505eaef017263e299324067d40ca2c48f6a2cf50 - subpackages: - - prometheus - - prometheus/internal - - prometheus/promhttp - - prometheus/testutil -- name: github.com/prometheus/client_model - version: fa8ad6fec33561be4280a8f0514318c79d7f6cb6 - subpackages: - - go -- name: github.com/prometheus/common - version: cfeb6f9992ffa54aaa4f2170ade4067ee478b250 - subpackages: - - expfmt - - internal/bitbucket.org/ww/goautoneg - - model -- name: github.com/prometheus/procfs - version: 65c1f6f8f0fc1e2185eb9863a3bc751496404259 - subpackages: - - xfs -- name: github.com/PuerkitoBio/purell - version: 8a290539e2e8629dbc4e6bad948158f790ec31f4 -- name: github.com/PuerkitoBio/urlesc - version: 5bd2802263f21d8788851d5305584c82a5c75d7e -- name: github.com/RangelReale/osincli - version: fababb0555f21315d1a34af6615a16eaab44396b - repo: https://github.com/openshift/osincli.git -- name: github.com/russross/blackfriday - version: 300106c228d52c8941d4b3de6054a6062a86dda3 -- name: github.com/shurcooL/sanitized_anchor_name - version: 10ef21a441db47d8b13ebcc5fd2310f636973c77 -- name: github.com/sirupsen/logrus - version: 89742aefa4b206dcf400792f3bd35b542998eb3b -- name: github.com/spf13/cobra - version: c439c4fa093711d42e1b01acb1235b52004753c1 -- name: github.com/spf13/pflag - version: 583c0c0531f06d5278b7d917446061adc344b5cd -- name: github.com/vjeantet/ldapserver - version: 5ac58729571e52ae23768e3c270c624d4ee7fa23 -- name: go4.org - version: 03efcb870d84809319ea509714dd6d19a1498483 - repo: https://github.com/go4org/go4 - subpackages: - - errorutil -- name: golang.org/x/crypto - version: de0752318171da717af4ce24d0a2e8626afaeb11 - subpackages: - - bcrypt - - blowfish - - cast5 - - cryptobyte - - cryptobyte/asn1 - - curve25519 - - ed25519 - - ed25519/internal/edwards25519 - - internal/chacha20 - - internal/subtle - - nacl/secretbox - - ocsp - - openpgp - - openpgp/armor - - openpgp/elgamal - - openpgp/errors - - openpgp/packet - - openpgp/s2k - - pkcs12 - - pkcs12/internal/rc2 - - poly1305 - - salsa20/salsa - - ssh - - ssh/terminal -- name: golang.org/x/net - version: 65e2d4e15006aab9813ff8769e768bbf4bb667a0 - subpackages: - - context - - context/ctxhttp - - html - - html/atom - - http/httpguts - - http2 - - http2/hpack - - idna - - internal/socks - - internal/timeseries - - proxy - - trace -- name: golang.org/x/oauth2 - version: a6bd8cefa1811bd24b86f8902872e4e8225f74c4 - subpackages: - - google - - internal - - jws - - jwt -- name: golang.org/x/sys - version: 95c6576299259db960f6c5b9b69ea52422860fce - subpackages: - - unix - - windows - - windows/registry - - windows/svc -- name: golang.org/x/text - version: b19bf474d317b857955b12035d2c5acb57ce8b01 - subpackages: - - cases - - encoding - - encoding/internal - - encoding/internal/identifier - - encoding/unicode - - internal - - internal/tag - - internal/utf8internal - - language - - runes - - secure/bidirule - - secure/precis - - transform - - unicode/bidi - - unicode/norm - - width -- name: golang.org/x/time - version: f51c12702a4d776e4c1fa9b0fabab841babae631 - subpackages: - - rate -- name: golang.org/x/tools - version: 7f7074d5bcfd282eb16bc382b0bb3da762461985 - subpackages: - - benchmark/parse - - container/intsets - - go/ast/astutil - - go/gcexportdata - - go/internal/cgo - - go/internal/gcimporter - - go/internal/packagesdriver - - go/packages - - go/vcs - - imports - - internal/fastwalk - - internal/gopathwalk - - internal/module - - internal/semver -- name: gonum.org/v1/gonum - version: cebdade430ccb61c1feba4878085f6cf8cb3320e - repo: https://github.com/gonum/gonum.git -- name: google.golang.org/appengine - version: 12d5545dc1cfa6047a286d5e853841b6471f4c19 - subpackages: - - internal - - internal/base - - internal/datastore - - internal/log - - internal/remote_api - - internal/urlfetch - - urlfetch -- name: google.golang.org/genproto - version: 09f6ed296fc66555a25fe4ce95173148778dfa85 - subpackages: - - googleapis/rpc/status -- name: google.golang.org/grpc - version: 168a6198bcb0ef175f7dacec0b8691fc141dc9b8 - subpackages: - - balancer - - balancer/base - - balancer/roundrobin - - codes - - connectivity - - credentials - - encoding - - encoding/proto - - grpclog - - health/grpc_health_v1 - - internal - - internal/backoff - - internal/channelz - - internal/grpcrand - - keepalive - - metadata - - naming - - peer - - resolver - - resolver/dns - - resolver/passthrough - - stats - - status - - tap - - transport -- name: gopkg.in/asn1-ber.v1 - version: f715ec2f112d1e4195b827ad68cf44017a3ef2b1 -- name: gopkg.in/inf.v0 - version: 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4 -- name: gopkg.in/ldap.v2 - version: bb7a9ca6e4fbc2129e3db588a34bc970ffe811a9 -- name: gopkg.in/square/go-jose.v2 - version: 89060dee6a84df9a4dae49f676f0c755037834f1 - subpackages: - - cipher - - json - - jwt -- name: gopkg.in/yaml.v2 - version: 5420a8b6744d3b0345ab293f6fcba19c978f1183 -- name: k8s.io/api - version: 40a48860b5abbba9aa891b02b32da429b08d96a0 - subpackages: - - admission/v1beta1 - - admissionregistration/v1beta1 - - apps/v1 - - apps/v1beta1 - - apps/v1beta2 - - auditregistration/v1alpha1 - - authentication/v1 - - authentication/v1beta1 - - authorization/v1 - - authorization/v1beta1 - - autoscaling/v1 - - autoscaling/v2beta1 - - autoscaling/v2beta2 - - batch/v1 - - batch/v1beta1 - - batch/v2alpha1 - - certificates/v1beta1 - - coordination/v1 - - coordination/v1beta1 - - core/v1 - - events/v1beta1 - - extensions/v1beta1 - - imagepolicy/v1alpha1 - - networking/v1 - - networking/v1beta1 - - node/v1alpha1 - - node/v1beta1 - - policy/v1beta1 - - rbac/v1 - - rbac/v1alpha1 - - rbac/v1beta1 - - scheduling/v1 - - scheduling/v1alpha1 - - scheduling/v1beta1 - - settings/v1alpha1 - - storage/v1 - - storage/v1alpha1 - - storage/v1beta1 -- name: k8s.io/apiextensions-apiserver - version: 53c4693659ed354d76121458fb819202dd1635fa - subpackages: - - pkg/features -- name: k8s.io/apimachinery - version: b11b32a81c68aa4961651c3cefd1ff715af93ef5 - repo: https://github.com/openshift/kubernetes-apimachinery.git - subpackages: - - pkg/api/apitesting - - pkg/api/equality - - pkg/api/errors - - pkg/api/meta - - pkg/api/meta/testrestmapper - - pkg/api/resource - - pkg/api/validation - - pkg/api/validation/path - - pkg/apis/meta/internalversion - - pkg/apis/meta/v1 - - pkg/apis/meta/v1/unstructured - - pkg/apis/meta/v1/unstructured/unstructuredscheme - - pkg/apis/meta/v1/validation - - pkg/apis/meta/v1beta1 - - pkg/conversion - - pkg/conversion/queryparams - - pkg/fields - - pkg/labels - - pkg/runtime - - pkg/runtime/schema - - pkg/runtime/serializer - - pkg/runtime/serializer/json - - pkg/runtime/serializer/protobuf - - pkg/runtime/serializer/recognizer - - pkg/runtime/serializer/streaming - - pkg/runtime/serializer/versioning - - pkg/selection - - pkg/types - - pkg/util/cache - - pkg/util/clock - - pkg/util/diff - - pkg/util/duration - - pkg/util/errors - - pkg/util/framer - - pkg/util/httpstream - - pkg/util/httpstream/spdy - - pkg/util/intstr - - pkg/util/json - - pkg/util/jsonmergepatch - - pkg/util/mergepatch - - pkg/util/naming - - pkg/util/net - - pkg/util/proxy - - pkg/util/rand - - pkg/util/remotecommand - - pkg/util/runtime - - pkg/util/sets - - pkg/util/strategicpatch - - pkg/util/uuid - - pkg/util/validation - - pkg/util/validation/field - - pkg/util/wait - - pkg/util/yaml - - pkg/version - - pkg/watch - - third_party/forked/golang/json - - third_party/forked/golang/netutil - - third_party/forked/golang/reflect -- name: k8s.io/apiserver - version: 8b27c41bdbb11ff103caa673315e097bf0289171 - subpackages: - - pkg/apis/audit - - pkg/authentication/authenticator - - pkg/authentication/request/x509 - - pkg/authentication/serviceaccount - - pkg/authentication/user - - pkg/endpoints/request - - pkg/features - - pkg/server/healthz - - pkg/storage/names - - pkg/util/feature -- name: k8s.io/cli-runtime - version: 2fdfcf685c9c00ee1a5243da4a47339f0b16fe45 - repo: https://github.com/openshift/kubernetes-cli-runtime.git - subpackages: - - pkg/genericclioptions - - pkg/genericclioptions/openshiftpatch - - pkg/kustomize - - pkg/kustomize/k8sdeps - - pkg/kustomize/k8sdeps/configmapandsecret - - pkg/kustomize/k8sdeps/kunstruct - - pkg/kustomize/k8sdeps/kv - - pkg/kustomize/k8sdeps/transformer - - pkg/kustomize/k8sdeps/transformer/hash - - pkg/kustomize/k8sdeps/transformer/patch - - pkg/kustomize/k8sdeps/validator - - pkg/printers - - pkg/resource -- name: k8s.io/client-go - version: 07e29e5eae48c8279cce3dc0f544e5e7a8ee9bb7 - repo: https://github.com/openshift/kubernetes-client-go.git - subpackages: - - discovery - - discovery/cached/disk - - discovery/fake - - dynamic - - dynamic/fake - - kubernetes - - kubernetes/fake - - kubernetes/scheme - - kubernetes/typed/admissionregistration/v1beta1 - - kubernetes/typed/admissionregistration/v1beta1/fake - - kubernetes/typed/apps/v1 - - kubernetes/typed/apps/v1/fake - - kubernetes/typed/apps/v1beta1 - - kubernetes/typed/apps/v1beta1/fake - - kubernetes/typed/apps/v1beta2 - - kubernetes/typed/apps/v1beta2/fake - - kubernetes/typed/auditregistration/v1alpha1 - - kubernetes/typed/auditregistration/v1alpha1/fake - - kubernetes/typed/authentication/v1 - - kubernetes/typed/authentication/v1/fake - - kubernetes/typed/authentication/v1beta1 - - kubernetes/typed/authentication/v1beta1/fake - - kubernetes/typed/authorization/v1 - - kubernetes/typed/authorization/v1/fake - - kubernetes/typed/authorization/v1beta1 - - kubernetes/typed/authorization/v1beta1/fake - - kubernetes/typed/autoscaling/v1 - - kubernetes/typed/autoscaling/v1/fake - - kubernetes/typed/autoscaling/v2beta1 - - kubernetes/typed/autoscaling/v2beta1/fake - - kubernetes/typed/autoscaling/v2beta2 - - kubernetes/typed/autoscaling/v2beta2/fake - - kubernetes/typed/batch/v1 - - kubernetes/typed/batch/v1/fake - - kubernetes/typed/batch/v1beta1 - - kubernetes/typed/batch/v1beta1/fake - - kubernetes/typed/batch/v2alpha1 - - kubernetes/typed/batch/v2alpha1/fake - - kubernetes/typed/certificates/v1beta1 - - kubernetes/typed/certificates/v1beta1/fake - - kubernetes/typed/coordination/v1 - - kubernetes/typed/coordination/v1/fake - - kubernetes/typed/coordination/v1beta1 - - kubernetes/typed/coordination/v1beta1/fake - - kubernetes/typed/core/v1 - - kubernetes/typed/core/v1/fake - - kubernetes/typed/events/v1beta1 - - kubernetes/typed/events/v1beta1/fake - - kubernetes/typed/extensions/v1beta1 - - kubernetes/typed/extensions/v1beta1/fake - - kubernetes/typed/networking/v1 - - kubernetes/typed/networking/v1/fake - - kubernetes/typed/networking/v1beta1 - - kubernetes/typed/networking/v1beta1/fake - - kubernetes/typed/node/v1alpha1 - - kubernetes/typed/node/v1alpha1/fake - - kubernetes/typed/node/v1beta1 - - kubernetes/typed/node/v1beta1/fake - - kubernetes/typed/policy/v1beta1 - - kubernetes/typed/policy/v1beta1/fake - - kubernetes/typed/rbac/v1 - - kubernetes/typed/rbac/v1/fake - - kubernetes/typed/rbac/v1alpha1 - - kubernetes/typed/rbac/v1alpha1/fake - - kubernetes/typed/rbac/v1beta1 - - kubernetes/typed/rbac/v1beta1/fake - - kubernetes/typed/scheduling/v1 - - kubernetes/typed/scheduling/v1/fake - - kubernetes/typed/scheduling/v1alpha1 - - kubernetes/typed/scheduling/v1alpha1/fake - - kubernetes/typed/scheduling/v1beta1 - - kubernetes/typed/scheduling/v1beta1/fake - - kubernetes/typed/settings/v1alpha1 - - kubernetes/typed/settings/v1alpha1/fake - - kubernetes/typed/storage/v1 - - kubernetes/typed/storage/v1/fake - - kubernetes/typed/storage/v1alpha1 - - kubernetes/typed/storage/v1alpha1/fake - - kubernetes/typed/storage/v1beta1 - - kubernetes/typed/storage/v1beta1/fake - - pkg/apis/clientauthentication - - pkg/apis/clientauthentication/v1alpha1 - - pkg/apis/clientauthentication/v1beta1 - - pkg/version - - plugin/pkg/client/auth/exec - - rest - - rest/fake - - rest/watch - - restmapper - - scale - - scale/fake - - scale/scheme - - scale/scheme/appsint - - scale/scheme/appsv1beta1 - - scale/scheme/appsv1beta2 - - scale/scheme/autoscalingv1 - - scale/scheme/extensionsint - - scale/scheme/extensionsv1beta1 - - testing - - third_party/forked/golang/template - - tools/auth - - tools/cache - - tools/clientcmd - - tools/clientcmd/api - - tools/clientcmd/api/latest - - tools/clientcmd/api/v1 - - tools/metrics - - tools/pager - - tools/portforward - - tools/record - - tools/record/util - - tools/reference - - tools/remotecommand - - tools/watch - - transport - - transport/spdy - - util/cert - - util/connrotation - - util/exec - - util/flowcontrol - - util/homedir - - util/jsonpath - - util/keyutil - - util/retry -- name: k8s.io/cloud-provider - version: c892ea32361a3655e7bd8c06f2d02dd8ce73dd78 - subpackages: - - features -- name: k8s.io/cluster-bootstrap - version: 50662da99b70008aa395937d09b9da52a9edd041 -- name: k8s.io/code-generator - version: 50b561225d70b3eb79a1faafd3dfe7b1a62cbe73 -- name: k8s.io/component-base - version: 4a91899592f42b2f5859587cc5a676a5b94d2ee3 - subpackages: - - cli/flag - - logs -- name: k8s.io/csi-api - version: 693d387aa133c41adc69c717981d40df2b5c797a -- name: k8s.io/csi-translation-lib - version: ce92c5cfdd61987ea4ce3b8c279cec25d109f366 -- name: k8s.io/gengo - version: 51747d6e00da1fc578d5a333a93bb2abcbce7a95 -- name: k8s.io/klog - version: 8139d8cb77af419532b33dfa7dd09fbc5f1d344f -- name: k8s.io/kube-aggregator - version: da8327669ac57b6e6a06676eeb7de19c9780f76d -- name: k8s.io/kube-controller-manager - version: 97ed623e38350ab8a3cce36d6003ff9ab27f6669 -- name: k8s.io/kube-openapi - version: b3a7cee44a305be0a69e1b9ac03018307287e1b0 - subpackages: - - pkg/common - - pkg/util/proto - - pkg/util/proto/testing - - pkg/util/proto/validation -- name: k8s.io/kube-proxy - version: 4d735c31b05439ef4136b5ee8454e62e854fac1f -- name: k8s.io/kube-scheduler - version: b74e9e79538d3a93ad1d1f391b9461c04a20c84e -- name: k8s.io/kubelet - version: f6da02f583256d1a15cc44a101c80bfa2080a46f -- name: k8s.io/kubernetes - version: 473b2830919bc04a8044a290df4be2c83b525b93 - repo: https://github.com/openshift/kubernetes.git - subpackages: - - cmd/genutils - - pkg/api/legacyscheme - - pkg/api/service - - pkg/api/v1/pod - - pkg/apis/apps - - pkg/apis/apps/install - - pkg/apis/apps/v1 - - pkg/apis/apps/v1beta1 - - pkg/apis/apps/v1beta2 - - pkg/apis/authentication - - pkg/apis/authentication/install - - pkg/apis/authentication/v1 - - pkg/apis/authentication/v1beta1 - - pkg/apis/authorization - - pkg/apis/authorization/install - - pkg/apis/authorization/v1 - - pkg/apis/authorization/v1beta1 - - pkg/apis/autoscaling - - pkg/apis/autoscaling/install - - pkg/apis/autoscaling/v1 - - pkg/apis/autoscaling/v2beta1 - - pkg/apis/autoscaling/v2beta2 - - pkg/apis/batch - - pkg/apis/batch/install - - pkg/apis/batch/v1 - - pkg/apis/batch/v1beta1 - - pkg/apis/batch/v2alpha1 - - pkg/apis/certificates - - pkg/apis/certificates/install - - pkg/apis/certificates/v1beta1 - - pkg/apis/coordination - - pkg/apis/coordination/install - - pkg/apis/coordination/v1 - - pkg/apis/coordination/v1beta1 - - pkg/apis/core - - pkg/apis/core/helper - - pkg/apis/core/install - - pkg/apis/core/pods - - pkg/apis/core/v1 - - pkg/apis/core/v1/helper - - pkg/apis/core/validation - - pkg/apis/events - - pkg/apis/events/install - - pkg/apis/events/v1beta1 - - pkg/apis/extensions - - pkg/apis/extensions/install - - pkg/apis/extensions/v1beta1 - - pkg/apis/networking - - pkg/apis/node - - pkg/apis/policy - - pkg/apis/policy/install - - pkg/apis/policy/v1beta1 - - pkg/apis/rbac - - pkg/apis/rbac/install - - pkg/apis/rbac/v1 - - pkg/apis/rbac/v1alpha1 - - pkg/apis/rbac/v1beta1 - - pkg/apis/scheduling - - pkg/apis/scheduling/install - - pkg/apis/scheduling/v1 - - pkg/apis/scheduling/v1alpha1 - - pkg/apis/scheduling/v1beta1 - - pkg/apis/settings - - pkg/apis/settings/install - - pkg/apis/settings/v1alpha1 - - pkg/apis/storage - - pkg/apis/storage/install - - pkg/apis/storage/util - - pkg/apis/storage/v1 - - pkg/apis/storage/v1alpha1 - - pkg/apis/storage/v1beta1 - - pkg/capabilities - - pkg/controller - - pkg/controller/deployment/util - - pkg/credentialprovider - - pkg/features - - pkg/fieldpath - - pkg/kubectl - - pkg/kubectl/apps - - pkg/kubectl/cmd - - pkg/kubectl/cmd/annotate - - pkg/kubectl/cmd/apiresources - - pkg/kubectl/cmd/apply - - pkg/kubectl/cmd/attach - - pkg/kubectl/cmd/auth - - pkg/kubectl/cmd/autoscale - - pkg/kubectl/cmd/certificates - - pkg/kubectl/cmd/clusterinfo - - pkg/kubectl/cmd/completion - - pkg/kubectl/cmd/config - - pkg/kubectl/cmd/convert - - pkg/kubectl/cmd/cp - - pkg/kubectl/cmd/create - - pkg/kubectl/cmd/delete - - pkg/kubectl/cmd/describe - - pkg/kubectl/cmd/diff - - pkg/kubectl/cmd/drain - - pkg/kubectl/cmd/edit - - pkg/kubectl/cmd/exec - - pkg/kubectl/cmd/explain - - pkg/kubectl/cmd/expose - - pkg/kubectl/cmd/get - - pkg/kubectl/cmd/kustomize - - pkg/kubectl/cmd/label - - pkg/kubectl/cmd/logs - - pkg/kubectl/cmd/options - - pkg/kubectl/cmd/patch - - pkg/kubectl/cmd/plugin - - pkg/kubectl/cmd/portforward - - pkg/kubectl/cmd/proxy - - pkg/kubectl/cmd/replace - - pkg/kubectl/cmd/rollingupdate - - pkg/kubectl/cmd/rollout - - pkg/kubectl/cmd/run - - pkg/kubectl/cmd/scale - - pkg/kubectl/cmd/set - - pkg/kubectl/cmd/set/env - - pkg/kubectl/cmd/taint - - pkg/kubectl/cmd/testing - - pkg/kubectl/cmd/top - - pkg/kubectl/cmd/util - - pkg/kubectl/cmd/util/editor - - pkg/kubectl/cmd/util/editor/crlf - - pkg/kubectl/cmd/util/openapi - - pkg/kubectl/cmd/util/openapi/testing - - pkg/kubectl/cmd/util/openapi/validation - - pkg/kubectl/cmd/version - - pkg/kubectl/cmd/wait - - pkg/kubectl/describe - - pkg/kubectl/describe/versioned - - pkg/kubectl/drain - - pkg/kubectl/explain - - pkg/kubectl/generate - - pkg/kubectl/generate/versioned - - pkg/kubectl/generated - - pkg/kubectl/metricsutil - - pkg/kubectl/polymorphichelpers - - pkg/kubectl/proxy - - pkg/kubectl/scheme - - pkg/kubectl/util - - pkg/kubectl/util/certificate - - pkg/kubectl/util/deployment - - pkg/kubectl/util/event - - pkg/kubectl/util/fieldpath - - pkg/kubectl/util/hash - - pkg/kubectl/util/i18n - - pkg/kubectl/util/podutils - - pkg/kubectl/util/printers - - pkg/kubectl/util/qos - - pkg/kubectl/util/rbac - - pkg/kubectl/util/resource - - pkg/kubectl/util/slice - - pkg/kubectl/util/storage - - pkg/kubectl/util/templates - - pkg/kubectl/util/term - - pkg/kubectl/validation - - pkg/kubelet/types - - pkg/master/ports - - pkg/printers - - pkg/printers/internalversion - - pkg/registry/rbac/reconciliation - - pkg/registry/rbac/validation - - pkg/security/apparmor - - pkg/serviceaccount - - pkg/util/hash - - pkg/util/interrupt - - pkg/util/labels - - pkg/util/node - - pkg/util/parsers - - pkg/util/taints - - pkg/version -- name: k8s.io/metrics - version: 1bd6a4002213382a0afb4f0b96717926e5413646 - subpackages: - - pkg/apis/metrics - - pkg/apis/metrics/v1alpha1 - - pkg/apis/metrics/v1beta1 - - pkg/client/clientset/versioned - - pkg/client/clientset/versioned/scheme - - pkg/client/clientset/versioned/typed/metrics/v1alpha1 - - pkg/client/clientset/versioned/typed/metrics/v1beta1 -- name: k8s.io/sample-apiserver - version: 236f85ce49e5a88a9b0a4eff066c5c61f51a1d95 -- name: k8s.io/sample-cli-plugin - version: 59043b4d4f841e1240827a0a30aab14cdc8e4e08 -- name: k8s.io/sample-controller - version: 324336050c979273816000d5d2c6df1ff1be9ea6 -- name: k8s.io/utils - version: c2654d5206da6b7b6ace12841e8f359bb89b443c - subpackages: - - buffer - - exec - - integer - - net - - path - - pointer - - trace -- name: sigs.k8s.io/kustomize - version: a6f65144121d1955266b0cd836ce954c04122dc8 - subpackages: - - pkg/commands/build - - pkg/constants - - pkg/expansion - - pkg/factory - - pkg/fs - - pkg/git - - pkg/gvk - - pkg/ifc - - pkg/ifc/transformer - - pkg/image - - pkg/internal/error - - pkg/loader - - pkg/patch - - pkg/patch/transformer - - pkg/resid - - pkg/resmap - - pkg/resource - - pkg/target - - pkg/transformers - - pkg/transformers/config - - pkg/transformers/config/defaultconfig - - pkg/types -- name: sigs.k8s.io/yaml - version: fd68e9863619f6ec2fdd8625fe1f02e7c877e480 -- name: vbom.ml/util - version: db5cfe13f5cc80a4990d98e2e1b0707a4d1a5394 - subpackages: - - sortorder -testImports: -- name: github.com/AaronO/go-git-http - version: 1d9485b3a98f7484772acb5f0dda28b69b958fdd - subpackages: - - auth -- name: github.com/elazarl/goproxy - version: c4fc26588b6ef8af07a191fcb6476387bdd46711 diff --git a/vendor/github.com/openshift/oc/glide.yaml b/vendor/github.com/openshift/oc/glide.yaml deleted file mode 100644 index 5e5f6245aa70..000000000000 --- a/vendor/github.com/openshift/oc/glide.yaml +++ /dev/null @@ -1,193 +0,0 @@ -package: github.com/openshift/oc -import: -# kube first -- package: k8s.io/kubernetes - repo: https://github.com/openshift/kubernetes.git - version: oc-4.2-kubernetes-1.14.0 -- package: k8s.io/apimachinery - repo: https://github.com/openshift/kubernetes-apimachinery.git - version: oc-4.2-kubernetes-1.14.0 -- package: k8s.io/client-go - repo: https://github.com/openshift/kubernetes-client-go.git - version: oc-4.2-kubernetes-1.14.0 -- package: k8s.io/cli-runtime - repo: https://github.com/openshift/kubernetes-cli-runtime.git - version: oc-4.2-kubernetes-1.14.0 -- package: k8s.io/api - version: kubernetes-1.14.0 -- package: k8s.io/apiextensions-apiserver - version: kubernetes-1.14.0 -- package: k8s.io/apiserver - version: kubernetes-1.14.0 -- package: k8s.io/cloud-provider - version: kubernetes-1.14.0 -- package: k8s.io/cluster-bootstrap - version: kubernetes-1.14.0 -- package: k8s.io/code-generator - version: kubernetes-1.14.0 -- package: k8s.io/component-base - version: kubernetes-1.14.0 -- package: k8s.io/csi-api - version: kubernetes-1.14.0 -- package: k8s.io/csi-translation-lib - version: kubernetes-1.14.0 -- package: k8s.io/kube-aggregator - version: kubernetes-1.14.0 -- package: k8s.io/kube-controller-manager - version: kubernetes-1.14.0 -- package: k8s.io/kubelet - version: kubernetes-1.14.0 -- package: k8s.io/kube-proxy - version: kubernetes-1.14.0 -- package: k8s.io/kube-scheduler - version: kubernetes-1.14.0 -- package: k8s.io/metrics - version: kubernetes-1.14.0 -- package: k8s.io/sample-apiserver - version: kubernetes-1.14.0 -- package: k8s.io/sample-cli-plugin - version: kubernetes-1.14.0 -- package: k8s.io/sample-controller - version: kubernetes-1.14.0 - # this matches the 1.14 branch from kube -- package: k8s.io/gengo - version: 51747d6e00da1fc578d5a333a93bb2abcbce7a95 -- package: k8s.io/utils - version: c2654d5206da6b7b6ace12841e8f359bb89b443c -- package: k8s.io/kube-openapi - version: b3a7cee44a305be0a69e1b9ac03018307287e1b0 -- package: k8s.io/klog - version: 8139d8cb77af419532b33dfa7dd09fbc5f1d344f # recent klog bump broke glog flags test in k8s.io/apiserver, pin -- package: github.com/coreos/etcd - version: v3.3.10 -- package: google.golang.org/grpc - version: v1.13.0 -- package: github.com/grpc-ecosystem/grpc-gateway - version: v1.3.0 -- package: github.com/coreos/bbolt - version: v1.3.1-coreos.6 -- package: github.com/google/cadvisor - version: v0.32.0 - -# openshift second -- package: github.com/openshift/api - version: master -- package: github.com/openshift/client-go - version: master -- package: github.com/openshift/library-go - version: master -- package: github.com/openshift/source-to-image - version: master - -# forks third -# master -- package: github.com/onsi/ginkgo - repo: https://github.com/openshift/onsi-ginkgo.git - version: release-v1.2.0 -- package: github.com/containers/image - repo: https://github.com/openshift/containers-image.git - version: openshift-3.8 -# cli -- package: github.com/docker/distribution - repo: https://github.com/openshift/docker-distribution.git - version: image-registry-3.11 -- package: github.com/docker/docker - version: a9fbbdc8dd8794b20af358382ab780559bca589d - -# ours: shared with kube, but forced by openshift -# master: co-exist with klog -- package: github.com/golang/glog - repo: https://github.com/openshift/golang-glog.git - version: delegate-to-klog -# cli -- package: github.com/gonum/blas - version: 37e82626499e1df7c54aeaba0959fd6e7e8dc1e4 -# cli -- package: github.com/gonum/floats - version: f74b330d45c56584a6ea7a27f5c64ea2900631e9 -# cli -- package: github.com/gonum/graph - version: 50b27dea7ebbfb052dfaf91681afc6fde28d8796 -# cli -- package: github.com/gonum/internal - version: e57e4534cf9b3b00ef6c0175f59d8d2d34f60914 -# cli -- package: github.com/gonum/lapack - version: 5ed4b826becd1807e09377508f51756586d1a98c -# cli -- package: github.com/gonum/matrix - version: dd6034299e4242c9f0ea36735e6d4264dfcb3f9f -# because of genapidocs. This is the current kube level -- package: github.com/go-openapi/loads - version: a80dea3052f00e5f032e860dd7355cd0cc67e24d -# retrieve latest version that fixes deadlock on kqueue -- package: github.com/fsnotify/fsnotify - version: v1.4.7 -# pinned to the level that kubernetes uses. Not sure why glide isn't matching this -- package: github.com/sirupsen/logrus - version: 89742aefa4b206dcf400792f3bd35b542998eb3b -# pinned to a level that kubernetes uses. -- package: github.com/evanphx/json-patch - version: 5858425f75500d40c52783dce87d085a483ce135 -# pinned to a level that kubernetes 1.12 use -- package: github.com/ghodss/yaml - version: 73d445a93680fa1a78ae23a5839bad48f32ba1ee - -# mine: specific to openshift -# builds/cli -- package: github.com/fsouza/go-dockerclient - version: da3951ba2e9e02bc0e7642150b3e265aed7e1df3 -# auth (for testing) -- package: github.com/vjeantet/ldapserver - version: v1.0 -# auth (for sally) -- package: github.com/RangelReale/osincli - repo: https://github.com/openshift/osincli.git - version: fababb0555f21315d1a34af6615a16eaab44396b -# auth (for oc kerberos on linux + mac) -- package: github.com/apcera/gssapi - version: release-2.6.3 -# auth (for oc kerberos on windows) -- package: github.com/alexbrainman/sspi - version: e580b900e9f5657daa5473021296289be6da2661 -# new-app -- package: github.com/joho/godotenv - version: 6d367c18edf6ca7fd004efd6863e4c5728fa858e -# new-app -- package: github.com/moby/buildkit - version: c3a857e3fca0a5cadd44ffd886a977559841aeaa -# master? for bindata -- package: github.com/jteeuwen/go-bindata - version: a0ff2567cfb70903282db057e799fd826784d41d -# used to be ~v1 to keep up with fixes. It didn't build past v1.0.8. -- package: github.com/miekg/dns - version: v1.0.8 -# etcd pins a very old version that has contention issues -- package: github.com/google/btree - version: master - -# why do we have this? -- package: google.golang.org/appengine - version: 12d5545dc1cfa6047a286d5e853841b6471f4c19 - -# to avoid go4.org website outages on glide update -- package: go4.org - repo: https://github.com/go4org/go4 - version: 03efcb870d84809319ea509714dd6d19a1498483 - subpackages: - - errorutil - -# force glide to pull this in -- package: github.com/google/uuid - -# due to https://github.com/Masterminds/glide/issues/881 manually show where -# to get gonum.org/v1/gonum from -- package: gonum.org/v1/gonum - repo: https://github.com/gonum/gonum.git - version: cebdade430ccb61c1feba4878085f6cf8cb3320e - -# set alias location for this package due to -# https://github.com/Masterminds/glide/issues/1057; upstream did this in -# https://github.com/kubernetes/kubernetes/pull/72138 -- package: bitbucket.org/ww/goautoneg - repo: https://github.com/munnerz/goautoneg.git diff --git a/vendor/github.com/openshift/oc/hack/lib/init.sh b/vendor/github.com/openshift/oc/hack/lib/init.sh deleted file mode 100644 index 2af4a12a82e3..000000000000 --- a/vendor/github.com/openshift/oc/hack/lib/init.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -# This script is meant to be the entrypoint for OpenShift Bash scripts to import all of the support -# libraries at once in order to make Bash script preambles as minimal as possible. This script recur- -# sively `source`s *.sh files in this directory tree. As such, no files should be `source`ed outside -# of this script to ensure that we do not attempt to overwrite read-only variables. - -set -o errexit -set -o nounset -set -o pipefail - -OS_ROOT="$(dirname "${BASH_SOURCE}")/../.." - -# Asks golang what it thinks the host platform is. The go tool chain does some -# slightly different things when the target platform matches the host platform. -function os::build::host_platform() { - echo "$(go env GOHOSTOS)/$(go env GOHOSTARCH)" -} -readonly -f os::build::host_platform diff --git a/vendor/github.com/openshift/oc/hack/update-generated-completions.sh b/vendor/github.com/openshift/oc/hack/update-generated-completions.sh deleted file mode 100755 index 0dbc1b7cf5ca..000000000000 --- a/vendor/github.com/openshift/oc/hack/update-generated-completions.sh +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/env bash - -# This script sets up a go workspace locally and generates shell auto-completion scripts. - -source "$(dirname "${BASH_SOURCE}")/lib/init.sh" - -function os::build::gen-completions() { - local dest="$1" - local shell="$2" - local skipprefix="${3:-}" - - # We do this in a tmpdir in case the dest has other non-autogenned files - # We don't want to include them in the list of gen'd files - local tmpdir="${OS_ROOT}/_tmp/gen_comp" - mkdir -p "${tmpdir}" - # generate the new files - ${OS_ROOT}/oc completion ${shell} > $tmpdir/oc - # create the list of generated files - ls "${tmpdir}" | LC_ALL=C sort > "${tmpdir}/.files_generated" - - # remove all old generated file from the destination - while read file; do - if [[ -e "${tmpdir}/${file}" && -n "${skipprefix}" ]]; then - local original generated - original=$(grep -v "^${skipprefix}" "${dest}/${file}") || : - generated=$(grep -v "^${skipprefix}" "${tmpdir}/${file}") || : - if [[ "${original}" == "${generated}" ]]; then - # overwrite generated with original. - mv "${dest}/${file}" "${tmpdir}/${file}" - fi - else - rm "${dest}/${file}" || true - fi - done <"${dest}/.files_generated" - - # put the new generated file into the destination - find "${tmpdir}" -exec rsync -pt {} "${dest}" \; >/dev/null - #cleanup - rm -rf "${tmpdir}" - - echo "Assets generated in ${dest}" -} -readonly -f os::build::gen-completions - -platform="$(os::build::host_platform)" -if [[ "${platform}" != "linux/amd64" ]]; then - os::log::warning "Generating completions on ${platform} may not be identical to running on linux/amd64 due to conditional compilation." -fi - -OUTPUT_DIR_ROOT="${1:-${OS_ROOT}/contrib/completions}" - -mkdir -p "${OUTPUT_DIR_ROOT}/bash" || echo $? > /dev/null -mkdir -p "${OUTPUT_DIR_ROOT}/zsh" || echo $? > /dev/null - -os::build::gen-completions "${OUTPUT_DIR_ROOT}/bash" "bash" -os::build::gen-completions "${OUTPUT_DIR_ROOT}/zsh" "zsh" diff --git a/vendor/github.com/openshift/oc/hack/verify-generated-completions.sh b/vendor/github.com/openshift/oc/hack/verify-generated-completions.sh deleted file mode 100755 index e36f16aafe2c..000000000000 --- a/vendor/github.com/openshift/oc/hack/verify-generated-completions.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/env bash - -source "$(dirname "${BASH_SOURCE}")/lib/init.sh" - -function cleanup() { - return_code=$? - rm -rf "${TMP_COMPLETION_ROOT}" - exit "${return_code}" -} -trap "cleanup" EXIT - -COMPLETION_ROOT_REL="contrib/completions" -COMPLETION_ROOT="${OS_ROOT}/${COMPLETION_ROOT_REL}" -TMP_COMPLETION_ROOT_REL="_output/verify-generated-completions/" -TMP_COMPLETION_ROOT="${OS_ROOT}/${TMP_COMPLETION_ROOT_REL}" - -platform="$(os::build::host_platform)" -if [[ "${platform}" != "linux/amd64" ]]; then - os::log::warning "Completions cannot be verified on non-Linux systems (${platform})" - exit 0 -fi - -${OS_ROOT}/hack/update-generated-completions.sh ${TMP_COMPLETION_ROOT_REL} -diff -Naupr -x 'OWNERS' ${COMPLETION_ROOT} ${TMP_COMPLETION_ROOT} diff --git a/vendor/github.com/openshift/oc/images/cli-artifacts/Dockerfile.rhel b/vendor/github.com/openshift/oc/images/cli-artifacts/Dockerfile.rhel deleted file mode 100644 index 6df27a12e788..000000000000 --- a/vendor/github.com/openshift/oc/images/cli-artifacts/Dockerfile.rhel +++ /dev/null @@ -1,14 +0,0 @@ -# This Dockerfile builds an image containing the Mac and Windows version of oc -# layered on top of the Linux cli image. -FROM registry.svc.ci.openshift.org/ocp/builder:golang-1.12 AS builder -WORKDIR /go/src/github.com/openshift/oc -COPY . . -RUN yum install -y --setopt=skip_missing_names_on_install=False gpgme-devel libassuan-devel -RUN make cross-build-darwin-amd64 cross-build-windows-amd64 --warn-undefined-variables - -FROM registry.svc.ci.openshift.org/ocp/4.2:cli -COPY --from=builder /go/src/github.com/openshift/oc/_output/bin/darwin_amd64/oc /usr/share/openshift/mac/oc -COPY --from=builder /go/src/github.com/openshift/oc/_output/bin/windows_amd64/oc.exe /usr/share/openshift/windows/oc.exe -LABEL io.k8s.display-name="OpenShift Clients" \ - io.k8s.description="OpenShift is a platform for developing, building, and deploying containerized applications." \ - io.openshift.tags="openshift,cli" diff --git a/vendor/github.com/openshift/oc/images/cli/Dockerfile.rhel b/vendor/github.com/openshift/oc/images/cli/Dockerfile.rhel deleted file mode 100644 index 1a878d692c97..000000000000 --- a/vendor/github.com/openshift/oc/images/cli/Dockerfile.rhel +++ /dev/null @@ -1,12 +0,0 @@ -FROM registry.svc.ci.openshift.org/ocp/builder:golang-1.12 AS builder -WORKDIR /go/src/github.com/openshift/oc -COPY . . -RUN yum install -y --setopt=skip_missing_names_on_install=False gpgme-devel libassuan-devel -RUN make build --warn-undefined-variables - -FROM registry.svc.ci.openshift.org/ocp/4.2:base -COPY --from=builder /go/src/github.com/openshift/oc/oc /usr/bin/ -RUN for i in kubectl openshift-deploy openshift-docker-build openshift-sti-build openshift-git-clone openshift-manage-dockerfile openshift-extract-image-content openshift-recycle; do ln -s /usr/bin/oc /usr/bin/$i; done -LABEL io.k8s.display-name="OpenShift Client" \ - io.k8s.description="OpenShift is a platform for developing, building, and deploying containerized applications." \ - io.openshift.tags="openshift,cli" diff --git a/vendor/github.com/openshift/oc/images/deployer/Dockerfile.rhel b/vendor/github.com/openshift/oc/images/deployer/Dockerfile.rhel deleted file mode 100644 index ab829f389bfa..000000000000 --- a/vendor/github.com/openshift/oc/images/deployer/Dockerfile.rhel +++ /dev/null @@ -1,8 +0,0 @@ -FROM registry.svc.ci.openshift.org/ocp/4.2:cli - -LABEL io.k8s.display-name="OpenShift Deployer" \ - io.k8s.description="This is a component of OpenShift and executes the user deployment process to roll out new containers. It may be used as a base image for building your own custom deployer image." \ - io.openshift.tags="openshift,deployer" -# The deployer doesn't require a root user. -USER 1001 -ENTRYPOINT ["/usr/bin/openshift-deploy"] diff --git a/vendor/github.com/openshift/oc/images/recycler/Dockerfile.rhel b/vendor/github.com/openshift/oc/images/recycler/Dockerfile.rhel deleted file mode 100644 index 59da4b4ca07a..000000000000 --- a/vendor/github.com/openshift/oc/images/recycler/Dockerfile.rhel +++ /dev/null @@ -1,6 +0,0 @@ -FROM registry.svc.ci.openshift.org/ocp/4.2:cli - -LABEL io.k8s.display-name="OpenShift Volume Recycler" \ - io.k8s.description="This is a component of OpenShift and is used to prepare persistent volumes for reuse after they are deleted." \ - io.openshift.tags="openshift,recycler" -ENTRYPOINT ["/usr/bin/openshift-recycle"] diff --git a/vendor/github.com/openshift/oc/oc.spec b/vendor/github.com/openshift/oc/oc.spec deleted file mode 100644 index a84087694a2c..000000000000 --- a/vendor/github.com/openshift/oc/oc.spec +++ /dev/null @@ -1,126 +0,0 @@ -#debuginfo not supported with Go -%global debug_package %{nil} -# modifying the Go binaries breaks the DWARF debugging -%global __os_install_post %{_rpmconfigdir}/brp-compress - -%global gopath %{_datadir}/gocode -%global import_path github.com/openshift/oc - -%global golang_version 1.12 -%global product_name OpenShift - -%{!?version: %global version 0.0.1} -%{!?release: %global release 1} - -Name: openshift-clients -Version: %{version} -Release: %{release}%{dist} -Summary: OpenShift client binaries -License: ASL 2.0 -URL: https://%{import_path} - -# If go_arches not defined fall through to implicit golang archs -%if 0%{?go_arches:1} -ExclusiveArch: %{go_arches} -%else -ExclusiveArch: x86_64 aarch64 ppc64le s390x -%endif - -#BuildRequires: bsdtar -BuildRequires: golang >= %{golang_version} -BuildRequires: krb5-devel -BuildRequires: rsync - -Provides: atomic-openshift-clients -Obsoletes: atomic-openshift-clients -Requires: bash-completion - -%description -%{summary} - -%package redistributable -Summary: OpenShift Client binaries for Linux, Mac OSX, and Windows -Provides: atomic-openshift-clients-redistributable -Obsoletes: atomic-openshift-clients-redistributable - -%description redistributable -%{summary} - -%prep - -%build -%ifarch x86_64 - # Create Binaries for all supported arches - make build cross-build -%else - %ifarch %{ix86} - GOOS=linux - GOARCH=386 - %endif - %ifarch ppc64le - GOOS=linux - GOARCH=ppc64le - %endif - %ifarch %{arm} aarch64 - GOOS=linux - GOARCH=arm64 - %endif - %ifarch s390x - GOOS=linux - GOARCH=s390x - %endif - %{source_git_vars} make build -%endif - -%install -install -d %{buildroot}%{_bindir} - - # Install for the local platform -install -p -m 755 oc %{buildroot}%{_bindir}/oc - -%ifarch x86_64 -# Install client executable for windows and mac -install -d %{buildroot}%{_datadir}/%{name}/{linux,macosx,windows} -install -p -m 755 ./oc %{buildroot}%{_datadir}/%{name}/linux/oc -install -p -m 755 ./_output/bin/darwin_amd64/oc %{buildroot}/%{_datadir}/%{name}/macosx/oc -install -p -m 755 ./_output/bin/windows_amd64/oc.exe %{buildroot}/%{_datadir}/%{name}/windows/oc.exe -%endif - -ln -s ./oc %{buildroot}%{_bindir}/kubectl - -# Install man1 man pages -install -d -m 0755 %{buildroot}%{_mandir}/man1 -./genman %{buildroot}%{_mandir}/man1 oc - - # Install bash completions -install -d -m 755 %{buildroot}%{_sysconfdir}/bash_completion.d/ -for bin in oc #kubectl -do - echo "+++ INSTALLING BASH COMPLETIONS FOR ${bin} " - %{buildroot}%{_bindir}/${bin} completion bash > %{buildroot}%{_sysconfdir}/bash_completion.d/${bin} - chmod 644 %{buildroot}%{_sysconfdir}/bash_completion.d/${bin} -done - -%files -%license LICENSE -%{_bindir}/oc -%{_bindir}/kubectl -%{_sysconfdir}/bash_completion.d/oc -#%{_sysconfdir}/bash_completion.d/kubectl -%{_mandir}/man1/oc* - -%ifarch x86_64 -%files redistributable -%license LICENSE -%dir %{_datadir}/%{name}/linux/ -%dir %{_datadir}/%{name}/macosx/ -%dir %{_datadir}/%{name}/windows/ -%{_datadir}/%{name}/linux/oc -#%{_datadir}/%{name}/linux/kubectl -%{_datadir}/%{name}/macosx/oc -#%{_datadir}/%{name}/macosx/kubectl -%{_datadir}/%{name}/windows/oc.exe -#%{_datadir}/%{name}/windows/kubectl.exe -%endif - -%changelog diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/admin.go b/vendor/github.com/openshift/oc/pkg/cli/admin/admin.go deleted file mode 100644 index 3dd6e6d03bdf..000000000000 --- a/vendor/github.com/openshift/oc/pkg/cli/admin/admin.go +++ /dev/null @@ -1,141 +0,0 @@ -package admin - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/kubectl/cmd/certificates" - "k8s.io/kubernetes/pkg/kubectl/cmd/taint" - - "github.com/spf13/cobra" - - "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/kubernetes/pkg/kubectl/cmd/drain" - kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" - ktemplates "k8s.io/kubernetes/pkg/kubectl/util/templates" - - "github.com/openshift/oc/pkg/cli/admin/buildchain" - "github.com/openshift/oc/pkg/cli/admin/cert" - "github.com/openshift/oc/pkg/cli/admin/createbootstrapprojecttemplate" - "github.com/openshift/oc/pkg/cli/admin/createerrortemplate" - "github.com/openshift/oc/pkg/cli/admin/createkubeconfig" - "github.com/openshift/oc/pkg/cli/admin/createlogintemplate" - "github.com/openshift/oc/pkg/cli/admin/createproviderselectiontemplate" - "github.com/openshift/oc/pkg/cli/admin/groups" - "github.com/openshift/oc/pkg/cli/admin/migrate" - migrateetcd "github.com/openshift/oc/pkg/cli/admin/migrate/etcd" - migrateimages "github.com/openshift/oc/pkg/cli/admin/migrate/images" - migratehpa "github.com/openshift/oc/pkg/cli/admin/migrate/legacyhpa" - migratestorage "github.com/openshift/oc/pkg/cli/admin/migrate/storage" - migratetemplateinstances "github.com/openshift/oc/pkg/cli/admin/migrate/templateinstances" - "github.com/openshift/oc/pkg/cli/admin/mustgather" - "github.com/openshift/oc/pkg/cli/admin/network" - "github.com/openshift/oc/pkg/cli/admin/node" - "github.com/openshift/oc/pkg/cli/admin/policy" - "github.com/openshift/oc/pkg/cli/admin/project" - "github.com/openshift/oc/pkg/cli/admin/prune" - "github.com/openshift/oc/pkg/cli/admin/release" - "github.com/openshift/oc/pkg/cli/admin/top" - "github.com/openshift/oc/pkg/cli/admin/upgrade" - "github.com/openshift/oc/pkg/cli/admin/verifyimagesignature" - "github.com/openshift/oc/pkg/cli/kubectlwrappers" - "github.com/openshift/oc/pkg/cli/options" - cmdutil "github.com/openshift/oc/pkg/helpers/cmd" -) - -var adminLong = ktemplates.LongDesc(` - Administrative Commands - - Actions for administering an OpenShift cluster are exposed here.`) - -func NewCommandAdmin(name, fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { - // Main command - cmds := &cobra.Command{ - Use: name, - Short: "Tools for managing a cluster", - Long: fmt.Sprintf(adminLong), - Run: kcmdutil.DefaultSubCommandRun(streams.ErrOut), - } - - groups := ktemplates.CommandGroups{ - { - Message: "Cluster Management:", - Commands: []*cobra.Command{ - upgrade.New(f, fullName, streams), - top.NewCommandTop(top.TopRecommendedName, fullName+" "+top.TopRecommendedName, f, streams), - mustgather.NewMustGatherCommand(f, streams), - }, - }, - { - Message: "Node Management:", - Commands: []*cobra.Command{ - cmdutil.ReplaceCommandName("kubectl", fullName, drain.NewCmdDrain(f, streams)), - cmdutil.ReplaceCommandName("kubectl", fullName, ktemplates.Normalize(drain.NewCmdCordon(f, streams))), - cmdutil.ReplaceCommandName("kubectl", fullName, ktemplates.Normalize(drain.NewCmdUncordon(f, streams))), - cmdutil.ReplaceCommandName("kubectl", fullName, ktemplates.Normalize(taint.NewCmdTaint(f, streams))), - node.NewCmdLogs(fullName, f, streams), - }, - }, - { - Message: "Security and Policy:", - Commands: []*cobra.Command{ - project.NewCmdNewProject(project.NewProjectRecommendedName, fullName+" "+project.NewProjectRecommendedName, f, streams), - policy.NewCmdPolicy(policy.PolicyRecommendedName, fullName+" "+policy.PolicyRecommendedName, f, streams), - groups.NewCmdGroups(groups.GroupsRecommendedName, fullName+" "+groups.GroupsRecommendedName, f, streams), - withShortDescription(certificates.NewCmdCertificate(f, streams), "Approve or reject certificate requests"), - network.NewCmdPodNetwork(network.PodNetworkCommandName, fullName+" "+network.PodNetworkCommandName, f, streams), - }, - }, - { - Message: "Maintenance:", - Commands: []*cobra.Command{ - prune.NewCommandPrune(prune.PruneRecommendedName, fullName+" "+prune.PruneRecommendedName, f, streams), - migrate.NewCommandMigrate( - migrate.MigrateRecommendedName, fullName+" "+migrate.MigrateRecommendedName, f, streams, - // Migration commands - migrateimages.NewCmdMigrateImageReferences("image-references", fullName+" "+migrate.MigrateRecommendedName+" image-references", f, streams), - migratestorage.NewCmdMigrateAPIStorage("storage", fullName+" "+migrate.MigrateRecommendedName+" storage", f, streams), - migrateetcd.NewCmdMigrateTTLs("etcd-ttl", fullName+" "+migrate.MigrateRecommendedName+" etcd-ttl", f, streams), - migratehpa.NewCmdMigrateLegacyHPA("legacy-hpa", fullName+" "+migrate.MigrateRecommendedName+" legacy-hpa", f, streams), - migratetemplateinstances.NewCmdMigrateTemplateInstances("template-instances", fullName+" "+migrate.MigrateRecommendedName+" template-instances", f, streams), - ), - }, - }, - { - Message: "Configuration:", - Commands: []*cobra.Command{ - createkubeconfig.NewCommandCreateKubeConfig(createkubeconfig.CreateKubeConfigCommandName, fullName+" "+createkubeconfig.CreateKubeConfigCommandName, streams), - - createbootstrapprojecttemplate.NewCommandCreateBootstrapProjectTemplate(f, createbootstrapprojecttemplate.CreateBootstrapProjectTemplateCommand, fullName+" "+createbootstrapprojecttemplate.CreateBootstrapProjectTemplateCommand, streams), - - createlogintemplate.NewCommandCreateLoginTemplate(f, createlogintemplate.CreateLoginTemplateCommand, fullName+" "+createlogintemplate.CreateLoginTemplateCommand, streams), - createproviderselectiontemplate.NewCommandCreateProviderSelectionTemplate(f, createproviderselectiontemplate.CreateProviderSelectionTemplateCommand, fullName+" "+createproviderselectiontemplate.CreateProviderSelectionTemplateCommand, streams), - createerrortemplate.NewCommandCreateErrorTemplate(f, createerrortemplate.CreateErrorTemplateCommand, fullName+" "+createerrortemplate.CreateErrorTemplateCommand, streams), - }, - }, - } - - cmds.AddCommand(cert.NewCmdCert(cert.CertRecommendedName, fullName+" "+cert.CertRecommendedName, streams)) - - groups.Add(cmds) - cmdutil.ActsAsRootCommand(cmds, []string{"options"}, groups...) - - cmds.AddCommand( - release.NewCmd(f, fullName, streams), - buildchain.NewCmdBuildChain(name, fullName+" "+buildchain.BuildChainRecommendedCommandName, f, streams), - verifyimagesignature.NewCmdVerifyImageSignature(name, fullName+" "+verifyimagesignature.VerifyRecommendedName, f, streams), - - // part of every root command - kubectlwrappers.NewCmdConfig(fullName, "config", f, streams), - kubectlwrappers.NewCmdCompletion(fullName, streams), - - // hidden - options.NewCmdOptions(streams), - ) - - return cmds -} - -func withShortDescription(cmd *cobra.Command, desc string) *cobra.Command { - cmd.Short = desc - return cmd -} diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/buildchain/buildchain.go b/vendor/github.com/openshift/oc/pkg/cli/admin/buildchain/buildchain.go deleted file mode 100644 index 75fb22e14eb7..000000000000 --- a/vendor/github.com/openshift/oc/pkg/cli/admin/buildchain/buildchain.go +++ /dev/null @@ -1,211 +0,0 @@ -package buildchain - -import ( - "fmt" - "io" - "strings" - - "github.com/openshift/library-go/pkg/image/imageutil" - - "github.com/spf13/cobra" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/klog" - kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" - "k8s.io/kubernetes/pkg/kubectl/util/templates" - - "github.com/openshift/api/image" - buildv1client "github.com/openshift/client-go/build/clientset/versioned/typed/build/v1" - imagev1client "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1" - projectv1client "github.com/openshift/client-go/project/clientset/versioned/typed/project/v1" - osutil "github.com/openshift/oc/pkg/helpers/cmd" - "github.com/openshift/oc/pkg/helpers/describe" - imagegraph "github.com/openshift/oc/pkg/helpers/graph/imagegraph/nodes" -) - -// BuildChainRecommendedCommandName is the recommended command name -const BuildChainRecommendedCommandName = "build-chain" - -var ( - buildChainLong = templates.LongDesc(` - Output the inputs and dependencies of your builds - - Supported formats for the generated graph are dot and a human-readable output. - Tag and namespace are optional and if they are not specified, 'latest' and the - default namespace will be used respectively.`) - - buildChainExample = templates.Examples(` - # Build the dependency tree for the 'latest' tag in - %[1]s - - # Build the dependency tree for 'v2' tag in dot format and visualize it via the dot utility - %[1]s :v2 -o dot | dot -T svg -o deps.svg - - # Build the dependency tree across all namespaces for the specified image stream tag found in 'test' namespace - %[1]s -n test --all`) -) - -// BuildChainOptions contains all the options needed for build-chain -type BuildChainOptions struct { - name string - - defaultNamespace string - namespaces sets.String - allNamespaces bool - triggerOnly bool - reverse bool - - output string - - buildClient buildv1client.BuildV1Interface - imageClient imagev1client.ImageV1Interface - projectClient projectv1client.ProjectV1Interface -} - -// NewCmdBuildChain implements the OpenShift experimental build-chain command -func NewCmdBuildChain(name, fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { - options := &BuildChainOptions{ - namespaces: sets.NewString(), - } - cmd := &cobra.Command{ - Use: "build-chain IMAGESTREAMTAG", - Short: "Output the inputs and dependencies of your builds", - Long: buildChainLong, - Example: fmt.Sprintf(buildChainExample, fullName), - Run: func(cmd *cobra.Command, args []string) { - kcmdutil.CheckErr(options.Complete(f, cmd, args, streams.Out)) - kcmdutil.CheckErr(options.Validate()) - kcmdutil.CheckErr(options.RunBuildChain()) - }, - } - - cmd.Flags().BoolVar(&options.allNamespaces, "all", false, "If true, build dependency tree for the specified image stream tag across all namespaces") - cmd.Flags().BoolVar(&options.triggerOnly, "trigger-only", true, "If true, only include dependencies based on build triggers. If false, include all dependencies.") - cmd.Flags().BoolVar(&options.reverse, "reverse", false, "If true, show the istags dependencies instead of its dependants.") - cmd.Flags().StringVarP(&options.output, "output", "o", "", "Output format of dependency tree") - return cmd -} - -// Complete completes the required options for build-chain -func (o *BuildChainOptions) Complete(f kcmdutil.Factory, cmd *cobra.Command, args []string, out io.Writer) error { - if len(args) != 1 { - return kcmdutil.UsageErrorf(cmd, "Must pass an image stream tag. If only an image stream name is specified, 'latest' will be used for the tag.") - } - - clientConfig, err := f.ToRESTConfig() - if err != nil { - return err - } - o.buildClient, err = buildv1client.NewForConfig(clientConfig) - if err != nil { - return err - } - o.imageClient, err = imagev1client.NewForConfig(clientConfig) - if err != nil { - return err - } - o.projectClient, err = projectv1client.NewForConfig(clientConfig) - if err != nil { - return err - } - - resource := schema.GroupResource{} - mapper, err := f.ToRESTMapper() - if err != nil { - return err - } - resource, o.name, err = osutil.ResolveResource(image.Resource("imagestreamtags"), args[0], mapper) - if err != nil { - return err - } - - switch resource { - case image.Resource("imagestreamtags"): - o.name = normalizeImageStreamTag(o.name) - klog.V(4).Infof("Using %q as the image stream tag to look dependencies for", o.name) - default: - return fmt.Errorf("invalid resource provided: %v", resource) - } - - // Setup namespace - if o.allNamespaces { - // TODO: Handle different uses of build-chain; user and admin - projectList, err := o.projectClient.Projects().List(metav1.ListOptions{}) - if err != nil { - return err - } - for _, project := range projectList.Items { - klog.V(4).Infof("Found namespace %q", project.Name) - o.namespaces.Insert(project.Name) - } - } - - o.defaultNamespace, _, err = f.ToRawKubeConfigLoader().Namespace() - if err != nil { - return err - } - klog.V(4).Infof("Using %q as the namespace for %q", o.defaultNamespace, o.name) - o.namespaces.Insert(o.defaultNamespace) - klog.V(4).Infof("Will look for deps in %s", strings.Join(o.namespaces.List(), ",")) - - return nil -} - -// normalizeImageStreamTag normalizes an image stream tag by defaulting to 'latest' -// if no tag has been specified. -func normalizeImageStreamTag(name string) string { - stripped, tag, ok := imageutil.SplitImageStreamTag(name) - if !ok { - // Default to latest - return imageutil.JoinImageStreamTag(stripped, tag) - } - return name -} - -// Validate returns validation errors regarding build-chain -func (o *BuildChainOptions) Validate() error { - if len(o.name) == 0 { - return fmt.Errorf("image stream tag cannot be empty") - } - if len(o.defaultNamespace) == 0 { - return fmt.Errorf("default namespace cannot be empty") - } - if o.output != "" && o.output != "dot" { - return fmt.Errorf("output must be either empty or 'dot'") - } - if o.buildClient == nil { - return fmt.Errorf("buildConfig client must not be nil") - } - if o.imageClient == nil { - return fmt.Errorf("imageStreamTag client must not be nil") - } - if o.projectClient == nil { - return fmt.Errorf("project client must not be nil") - } - return nil -} - -// RunBuildChain contains all the necessary functionality for the OpenShift -// experimental build-chain command -func (o *BuildChainOptions) RunBuildChain() error { - ist := imagegraph.MakeImageStreamTagObjectMeta2(o.defaultNamespace, o.name) - - desc, err := describe.NewChainDescriber(o.buildClient, o.namespaces, o.output).Describe(ist, !o.triggerOnly, o.reverse) - if err != nil { - if _, isNotFoundErr := err.(describe.NotFoundErr); isNotFoundErr { - // Try to get the imageStreamTag via a direct GET - if _, getErr := o.imageClient.ImageStreamTags(o.defaultNamespace).Get(o.name, metav1.GetOptions{}); getErr != nil { - return getErr - } - fmt.Printf("Image stream tag %q in %q doesn't have any dependencies.\n", o.name, o.defaultNamespace) - return nil - } - return err - } - - fmt.Println(desc) - - return nil -} diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/buildchain/test/multiple-namespaces-bcs.yaml b/vendor/github.com/openshift/oc/pkg/cli/admin/buildchain/test/multiple-namespaces-bcs.yaml deleted file mode 100644 index 41d71f10a2b7..000000000000 --- a/vendor/github.com/openshift/oc/pkg/cli/admin/buildchain/test/multiple-namespaces-bcs.yaml +++ /dev/null @@ -1,147 +0,0 @@ -apiVersion: v1 -items: -- apiVersion: build.openshift.io/v1 - kind: BuildConfig - metadata: - creationTimestamp: 2015-07-22T11:25:00Z - name: ruby-hello-world - namespace: default - resourceVersion: "1125" - selfLink: /apia/build.openshift.io/v1/namespaces/default/buildconfigs/ruby-hello-world - uid: 4a10e762-3064-11e5-8da2-080027c5bfa9 - spec: - output: - to: - kind: ImageStreamTag - name: ruby-hello-world:latest - namespace: test - resources: {} - source: - git: - uri: https://github.com/openshift/ruby-hello-world - type: Git - strategy: - dockerStrategy: - from: - kind: ImageStreamTag - name: ruby-22-centos7:latest - namespace: master - type: Docker - triggers: - - github: - secret: q_ZtlnBcu7ca48ie8dNi - type: GitHub - - generic: - secret: 3kYKtANjVRCOPoM0uLNp - type: Generic - - imageChange: - lastTriggeredImageID: centos/ruby-22-centos7:latest - type: ImageChange - status: - lastVersion: 1 -- apiVersion: build.openshift.io/v1 - kind: BuildConfig - metadata: - creationTimestamp: 2015-07-24T07:41:19Z - labels: - name: ruby-sample-build - template: application-template-stibuild - name: ruby-sample-build - namespace: test - resourceVersion: "9848" - selfLink: /apia/build.openshift.io/v1/namespaces/test/buildconfigs/ruby-sample-build - uid: 5f52f442-31d7-11e5-868e-080027c5bfa9 - spec: - output: - to: - kind: ImageStreamTag - name: origin-ruby-sample:latest - namespace: another - resources: {} - source: - git: - uri: https://github.com/openshift/ruby-hello-world.git - type: Git - strategy: - sourceStrategy: - from: - kind: ImageStreamTag - name: ruby-22-centos7:latest - namespace: master - type: Source - triggers: - - github: - secret: secret101 - type: GitHub - - generic: - secret: secret101 - type: Generic - - imageChange: - lastTriggeredImageID: centos/ruby-22-centos7:latest - type: ImageChange - status: - lastVersion: 1 -- apiVersion: build.openshift.io/v1 - kind: BuildConfig - metadata: - creationTimestamp: 2015-07-22T12:07:57Z - name: ruby-sample-build-invalidtag - namespace: test - resourceVersion: "1605" - selfLink: /apia/build.openshift.io/v1/namespaces/test/buildconfigs/ruby-sample-build-invalidtag - uid: 4a633dd5-306a-11e5-8da2-080027c5bfa9 - spec: - output: - to: - kind: ImageStreamTag - name: origin-ruby-sample:latest - resources: {} - source: - git: - uri: git://github.com/openshift/ruby-hello-world.git - type: Git - strategy: - sourceStrategy: - from: - kind: DockerImage - name: centos/ruby-22-centos7 - incremental: true - type: Source - triggers: - - imageChange: {} - type: ImageChange - status: - lastVersion: 0 -- apiVersion: build.openshift.io/v1 - kind: BuildConfig - metadata: - creationTimestamp: 2015-07-22T12:07:57Z - name: ruby-sample-build-validtag - namespace: test - resourceVersion: "1604" - selfLink: /apia/build.openshift.io/v1/namespaces/test/buildconfigs/ruby-sample-build-validtag - uid: 4a623b89-306a-11e5-8da2-080027c5bfa9 - spec: - output: - to: - kind: ImageStreamTag - name: origin-ruby-sample:latest - resources: {} - source: - git: - uri: git://github.com/openshift/ruby-hello-world.git - type: Git - strategy: - sourceStrategy: - from: - kind: DockerImage - name: centos/ruby-22-centos7 - incremental: true - type: Source - triggers: - - imageChange: {} - type: ImageChange - status: - lastVersion: 0 -kind: List -metadata: {} diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/buildchain/test/multiple-trigger-bcs.yaml b/vendor/github.com/openshift/oc/pkg/cli/admin/buildchain/test/multiple-trigger-bcs.yaml deleted file mode 100644 index 6d8eb1b41992..000000000000 --- a/vendor/github.com/openshift/oc/pkg/cli/admin/buildchain/test/multiple-trigger-bcs.yaml +++ /dev/null @@ -1,220 +0,0 @@ -# Sets up a multi-parent build config tree -# centos/ruby-22-centos7:latest -# -> bc - parent1 (input, trigger) -# -> parent1img:latest -# -> bc - child1 (input) -# -> bc - child2 (input, trigger) -# -> bc - child3 (input) -# -> bc - parent2 (input, trigger) -# -> parent2img:latest -# -> bc - child1 (trigger) -# -> bc - child3 (trigger) -# -> bc - parent3 (input, trigger) -# -> parent3img:latest -# -> bc - child2 (trigger) -# -> bc - child3 (trigger) -# -# bc child1 has [input] parent1img, and [trigger] parent2img -# bc child2 has [input, trigger] parent1img, and [trigger] parent3img -# bc child3 has [input] parent1img, [trigger] parent2img, [trigger] parent3img -# -apiVersion: v1 -items: -- apiVersion: build.openshift.io/v1 - kind: BuildConfig - metadata: - name: parent1 - namespace: test - spec: - output: - to: - kind: ImageStreamTag - name: parent1img:latest - resources: {} - source: - git: - uri: https://github.com/openshift/ruby-hello-world.git - type: Git - strategy: - dockerStrategy: - from: - kind: ImageStreamTag - name: ruby-22-centos7:latest - type: Docker - triggers: - - github: - secret: q_ZtlnBcu7ca48ie8dNi - type: GitHub - - generic: - secret: 3kYKtANjVRCOPoM0uLNp - type: Generic - - imageChange: {} - type: ImageChange -- apiVersion: build.openshift.io/v1 - kind: BuildConfig - metadata: - name: parent2 - namespace: test - spec: - output: - to: - kind: ImageStreamTag - name: parent2img:latest - resources: {} - source: - git: - uri: https://github.com/openshift/ruby-hello-world.git - type: Git - strategy: - dockerStrategy: - from: - kind: ImageStreamTag - name: ruby-22-centos7:latest - type: Docker - triggers: - - github: - secret: q_ZtlnBcu7ca48ie8dNi - type: GitHub - - generic: - secret: 3kYKtANjVRCOPoM0uLNp - type: Generic - - imageChange: {} - type: ImageChange -- apiVersion: build.openshift.io/v1 - kind: BuildConfig - metadata: - name: parent3 - namespace: test - spec: - output: - to: - kind: ImageStreamTag - name: parent3img:latest - resources: {} - source: - git: - uri: https://github.com/openshift/ruby-hello-world.git - type: Git - strategy: - dockerStrategy: - from: - kind: ImageStreamTag - name: ruby-22-centos7:latest - type: Docker - triggers: - - github: - secret: q_ZtlnBcu7ca48ie8dNi - type: GitHub - - generic: - secret: 3kYKtANjVRCOPoM0uLNp - type: Generic - - imageChange: {} - type: ImageChange -- apiVersion: build.openshift.io/v1 - kind: BuildConfig - metadata: - name: child1 - namespace: test - spec: - output: - to: - kind: ImageStreamTag - name: child1img:latest - source: - git: - uri: https://github.com/openshift/ruby-hello-world.git - type: Git - strategy: - sourceStrategy: - from: - kind: ImageStreamTag - name: parent1img:latest - type: Source - triggers: - - github: - secret: secret101 - type: GitHub - - generic: - secret: secret101 - type: Generic - - imageChange: - from: - name: parent2img:latest - kind: ImageStreamTag - type: ImageChange -- apiVersion: build.openshift.io/v1 - kind: BuildConfig - metadata: - name: child2 - namespace: test - spec: - output: - to: - kind: ImageStreamTag - name: child2img:latest - source: - git: - uri: https://github.com/openshift/ruby-hello-world.git - type: Git - strategy: - sourceStrategy: - from: - kind: DockerImage - name: centos/ruby-22-centos7:latest - type: Source - triggers: - - github: - secret: secret101 - type: GitHub - - generic: - secret: secret101 - type: Generic - - imageChange: - from: - name: parent1img:latest - kind: ImageStreamTag - type: ImageChange - - imageChange: - from: - name: parent3img:latest - kind: ImageStreamTag - type: ImageChange -- apiVersion: build.openshift.io/v1 - kind: BuildConfig - metadata: - name: child3 - namespace: test - spec: - output: - to: - kind: ImageStreamTag - name: child3img:latest - source: - git: - uri: https://github.com/openshift/ruby-hello-world.git - type: Git - strategy: - sourceStrategy: - from: - kind: ImageStreamTag - name: parent1img:latest - type: Source - triggers: - - github: - secret: secret101 - type: GitHub - - generic: - secret: secret101 - type: Generic - - imageChange: - from: - name: parent2img:latest - kind: ImageStreamTag - type: ImageChange - - imageChange: - from: - name: parent3img:latest - kind: ImageStreamTag - type: ImageChange -kind: List -metadata: {} diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/buildchain/test/single-namespace-bcs.yaml b/vendor/github.com/openshift/oc/pkg/cli/admin/buildchain/test/single-namespace-bcs.yaml deleted file mode 100644 index 187125d29829..000000000000 --- a/vendor/github.com/openshift/oc/pkg/cli/admin/buildchain/test/single-namespace-bcs.yaml +++ /dev/null @@ -1,143 +0,0 @@ -apiVersion: v1 -items: -- apiVersion: build.openshift.io/v1 - kind: BuildConfig - metadata: - creationTimestamp: 2015-07-22T11:25:00Z - name: ruby-hello-world - namespace: test - resourceVersion: "1125" - selfLink: /apis/build.openshift.io/v1/namespaces/test/buildconfigs/ruby-hello-world - uid: 4a10e762-3064-11e5-8da2-080027c5bfa9 - spec: - output: - to: - kind: ImageStreamTag - name: ruby-hello-world:latest - resources: {} - source: - git: - uri: https://github.com/openshift/ruby-hello-world - type: Git - strategy: - dockerStrategy: - from: - kind: ImageStreamTag - name: ruby-22-centos7:latest - type: Docker - triggers: - - github: - secret: q_ZtlnBcu7ca48ie8dNi - type: GitHub - - generic: - secret: 3kYKtANjVRCOPoM0uLNp - type: Generic - - imageChange: - lastTriggeredImageID: centos/ruby-22-centos7:latest - type: ImageChange - status: - lastVersion: 1 -- apiVersion: build.openshift.io/v1 - kind: BuildConfig - metadata: - creationTimestamp: 2015-07-24T07:41:19Z - labels: - name: ruby-sample-build - template: application-template-stibuild - name: ruby-sample-build - namespace: test - resourceVersion: "9848" - selfLink: /apis/build.openshift.io/v1/namespaces/test/buildconfigs/ruby-sample-build - uid: 5f52f442-31d7-11e5-868e-080027c5bfa9 - spec: - output: - to: - kind: ImageStreamTag - name: origin-ruby-sample:latest - resources: {} - source: - git: - uri: https://github.com/openshift/ruby-hello-world.git - type: Git - strategy: - sourceStrategy: - from: - kind: ImageStreamTag - name: ruby-22-centos7:latest - type: Source - triggers: - - github: - secret: secret101 - type: GitHub - - generic: - secret: secret101 - type: Generic - - imageChange: - lastTriggeredImageID: centos/ruby-22-centos7:latest - type: ImageChange - status: - lastVersion: 1 -- apiVersion: build.openshift.io/v1 - kind: BuildConfig - metadata: - creationTimestamp: 2015-07-22T12:07:57Z - name: ruby-sample-build-invalidtag - namespace: test - resourceVersion: "1605" - selfLink: /apis/build.openshift.io/v1/namespaces/test/buildconfigs/ruby-sample-build-invalidtag - uid: 4a633dd5-306a-11e5-8da2-080027c5bfa9 - spec: - output: - to: - kind: ImageStreamTag - name: origin-ruby-sample:latest - resources: {} - source: - git: - uri: git://github.com/openshift/ruby-hello-world.git - type: Git - strategy: - sourceStrategy: - from: - kind: DockerImage - name: centos/ruby-22-centos7 - incremental: true - type: Source - triggers: - - imageChange: {} - type: ImageChange - status: - lastVersion: 0 -- apiVersion: build.openshift.io/v1 - kind: BuildConfig - metadata: - creationTimestamp: 2015-07-22T12:07:57Z - name: ruby-sample-build-validtag - namespace: test - resourceVersion: "1604" - selfLink: /apis/build.openshift.io/v1/namespaces/test/buildconfigs/ruby-sample-build-validtag - uid: 4a623b89-306a-11e5-8da2-080027c5bfa9 - spec: - output: - to: - kind: ImageStreamTag - name: origin-ruby-sample:latest - resources: {} - source: - git: - uri: git://github.com/openshift/ruby-hello-world.git - type: Git - strategy: - sourceStrategy: - from: - kind: DockerImage - name: centos/ruby-22-centos7 - incremental: true - type: Source - triggers: - - imageChange: {} - type: ImageChange - status: - lastVersion: 0 -kind: List -metadata: {} diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/cert/cert.go b/vendor/github.com/openshift/oc/pkg/cli/admin/cert/cert.go deleted file mode 100644 index ee928feed233..000000000000 --- a/vendor/github.com/openshift/oc/pkg/cli/admin/cert/cert.go +++ /dev/null @@ -1,37 +0,0 @@ -package cert - -import ( - "github.com/spf13/cobra" - - "k8s.io/cli-runtime/pkg/genericclioptions" - cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" -) - -const CertRecommendedName = "ca" - -// NewCmdCert implements the OpenShift cli ca command -func NewCmdCert(name, fullName string, streams genericclioptions.IOStreams) *cobra.Command { - // Parent command to which all subcommands are added. - cmds := &cobra.Command{ - Use: name, - Long: "Manage certificates and keys", - Short: "", - Run: cmdutil.DefaultSubCommandRun(streams.ErrOut), - Deprecated: "and will be removed in the future version", - Hidden: true, - } - - subCommands := []*cobra.Command{ - NewCommandEncrypt(EncryptCommandName, fullName+" "+EncryptCommandName, streams), - NewCommandDecrypt(DecryptCommandName, fullName+" "+DecryptCommandName, fullName+" "+EncryptCommandName, streams), - } - - for _, cmd := range subCommands { - // Unsetting Short description will not show this command in help - cmd.Short = "" - cmd.Deprecated = "and will be removed in the future version" - cmds.AddCommand(cmd) - } - - return cmds -} diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/cert/decrypt.go b/vendor/github.com/openshift/oc/pkg/cli/admin/cert/decrypt.go deleted file mode 100644 index 5f7a0c49ceea..000000000000 --- a/vendor/github.com/openshift/oc/pkg/cli/admin/cert/decrypt.go +++ /dev/null @@ -1,157 +0,0 @@ -package cert - -import ( - "crypto/x509" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - - "github.com/openshift/library-go/pkg/certs" - "github.com/openshift/oc/pkg/helpers/term" - "github.com/spf13/cobra" - - "k8s.io/cli-runtime/pkg/genericclioptions" - kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" - "k8s.io/kubernetes/pkg/kubectl/util/templates" -) - -const DecryptCommandName = "decrypt" - -type DecryptOptions struct { - // EncryptedFile is a file containing an encrypted PEM block. - EncryptedFile string - // EncryptedData is a byte slice containing an encrypted PEM block. - EncryptedData []byte - // EncryptedReader is used to read an encrypted PEM block if no EncryptedFile or EncryptedData is provided. Cannot be a terminal reader. - EncryptedReader io.Reader - - // DecryptedFile is a destination file to write decrypted data to. - DecryptedFile string - // DecryptedWriter is used to write decrypted data to if no DecryptedFile is provided - DecryptedWriter io.Writer - - // KeyFile is a file containing a PEM block with the password to use to decrypt the data - KeyFile string -} - -var decryptExample = templates.Examples(` - # Decrypt an encrypted file to a cleartext file: - %[1]s --key=secret.key --in=secret.encrypted --out=secret.decrypted - - # Decrypt from stdin to stdout: - %[1]s --key=secret.key < secret2.encrypted > secret2.decrypted`) - -func NewDecryptOptions(streams genericclioptions.IOStreams) *DecryptOptions { - return &DecryptOptions{ - EncryptedReader: streams.In, - DecryptedWriter: streams.Out, - } -} - -func NewCommandDecrypt(commandName string, fullName, encryptFullName string, streams genericclioptions.IOStreams) *cobra.Command { - o := NewDecryptOptions(streams) - cmd := &cobra.Command{ - Use: commandName, - Short: fmt.Sprintf("Decrypt data encrypted with %q", encryptFullName), - Example: fmt.Sprintf(decryptExample, fullName), - Run: func(cmd *cobra.Command, args []string) { - kcmdutil.CheckErr(o.Validate(args)) - kcmdutil.CheckErr(o.Decrypt()) - }, - } - - cmd.Flags().StringVar(&o.EncryptedFile, "in", o.EncryptedFile, fmt.Sprintf("File containing encrypted data, in the format written by %q.", encryptFullName)) - cmd.Flags().StringVar(&o.DecryptedFile, "out", o.DecryptedFile, "File to write the decrypted data to. Written to stdout if omitted.") - - cmd.Flags().StringVar(&o.KeyFile, "key", o.KeyFile, fmt.Sprintf("The file to read the decrypting key from. Must be a PEM file in the format written by %q.", encryptFullName)) - - // autocompletion hints - cmd.MarkFlagFilename("in") - cmd.MarkFlagFilename("out") - cmd.MarkFlagFilename("key") - - return cmd -} - -func (o *DecryptOptions) Validate(args []string) error { - if len(args) != 0 { - return errors.New("no arguments are supported") - } - - if len(o.EncryptedFile) == 0 && len(o.EncryptedData) == 0 && (o.EncryptedReader == nil || term.IsTerminalReader(o.EncryptedReader)) { - return errors.New("no input data specified") - } - if len(o.EncryptedFile) > 0 && len(o.EncryptedData) > 0 { - return errors.New("cannot specify both an input file and data") - } - - if len(o.KeyFile) == 0 { - return errors.New("no key specified") - } - - return nil -} - -func (o *DecryptOptions) Decrypt() error { - // Get PEM data block - var data []byte - switch { - case len(o.EncryptedFile) > 0: - if d, err := ioutil.ReadFile(o.EncryptedFile); err != nil { - return err - } else { - data = d - } - case len(o.EncryptedData) > 0: - data = o.EncryptedData - case o.EncryptedReader != nil && !term.IsTerminalReader(o.EncryptedReader): - if d, err := ioutil.ReadAll(o.EncryptedReader); err != nil { - return err - } else { - data = d - } - } - if len(data) == 0 { - return fmt.Errorf("no input data specified") - } - dataBlock, ok := certs.BlockFromBytes(data, certs.StringSourceEncryptedBlockType) - if !ok { - return fmt.Errorf("input does not contain a valid PEM block of type %q", certs.StringSourceEncryptedBlockType) - } - - // Get password - keyBlock, ok, err := certs.BlockFromFile(o.KeyFile, certs.StringSourceKeyBlockType) - if err != nil { - return err - } - if !ok { - return fmt.Errorf("%s does not contain a valid PEM block of type %q", o.KeyFile, certs.StringSourceKeyBlockType) - } - if len(keyBlock.Bytes) == 0 { - return fmt.Errorf("%s does not contain a key", o.KeyFile) - } - password := keyBlock.Bytes - - // Decrypt - plaintext, err := x509.DecryptPEMBlock(dataBlock, password) - if err != nil { - return err - } - - // Write decrypted data - switch { - case len(o.DecryptedFile) > 0: - if err := ioutil.WriteFile(o.DecryptedFile, plaintext, os.FileMode(0600)); err != nil { - return err - } - case o.DecryptedWriter != nil: - fmt.Fprint(o.DecryptedWriter, string(plaintext)) - if term.IsTerminalWriter(o.DecryptedWriter) { - fmt.Fprintln(o.DecryptedWriter) - } - } - - return nil -} diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/cert/encrypt.go b/vendor/github.com/openshift/oc/pkg/cli/admin/cert/encrypt.go deleted file mode 100644 index 39c51ed5ef15..000000000000 --- a/vendor/github.com/openshift/oc/pkg/cli/admin/cert/encrypt.go +++ /dev/null @@ -1,208 +0,0 @@ -package cert - -import ( - "crypto/rand" - "crypto/x509" - "encoding/pem" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "unicode" - "unicode/utf8" - - "github.com/openshift/library-go/pkg/certs" - - "github.com/openshift/oc/pkg/helpers/term" - "github.com/spf13/cobra" - - "k8s.io/cli-runtime/pkg/genericclioptions" - kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" - "k8s.io/kubernetes/pkg/kubectl/util/templates" -) - -const EncryptCommandName = "encrypt" - -type EncryptOptions struct { - // CleartextFile contains cleartext data to encrypt. - CleartextFile string - // CleartextData is cleartext data to encrypt. - CleartextData []byte - // CleartextReader reads cleartext data to encrypt if CleartextReader and CleartextFile are unspecified. - CleartextReader io.Reader - - // EncryptedFile has encrypted data written to it. - EncryptedFile string - // EncryptedWriter has encrypted data written to it if EncryptedFile is unspecified. - EncryptedWriter io.Writer - - // KeyFile contains the password in PEM format (as previously written by GenKeyFile) - KeyFile string - // GenKeyFile indicates a key should be generated and written - GenKeyFile string - - // PromptWriter is used to write status and prompt messages - PromptWriter io.Writer -} - -var encryptExample = templates.Examples(` - # Encrypt the content of secret.txt with a generated key: - %[1]s --genkey=secret.key --in=secret.txt --out=secret.encrypted - - # Encrypt the content of secret2.txt with an existing key: - %[1]s --key=secret.key < secret2.txt > secret2.encrypted`) - -func NewEncryptOptions(streams genericclioptions.IOStreams) *EncryptOptions { - return &EncryptOptions{ - CleartextReader: streams.In, - EncryptedWriter: streams.Out, - PromptWriter: streams.ErrOut, - } -} - -func NewCommandEncrypt(commandName string, fullName string, streams genericclioptions.IOStreams) *cobra.Command { - o := NewEncryptOptions(streams) - cmd := &cobra.Command{ - Use: commandName, - Short: "Encrypt data with AES-256-CBC encryption", - Example: fmt.Sprintf(encryptExample, fullName), - Run: func(cmd *cobra.Command, args []string) { - kcmdutil.CheckErr(o.Validate(args)) - kcmdutil.CheckErr(o.Encrypt()) - }, - } - - cmd.Flags().StringVar(&o.CleartextFile, "in", o.CleartextFile, "File containing the data to encrypt. Read from stdin if omitted.") - cmd.Flags().StringVar(&o.EncryptedFile, "out", o.EncryptedFile, "File to write the encrypted data to. Written to stdout if omitted.") - - cmd.Flags().StringVar(&o.KeyFile, "key", o.KeyFile, "File containing the encrypting key from in the format written by --genkey.") - cmd.Flags().StringVar(&o.GenKeyFile, "genkey", o.GenKeyFile, "File to write a randomly generated key to.") - - // autocompletion hints - cmd.MarkFlagFilename("in") - cmd.MarkFlagFilename("out") - cmd.MarkFlagFilename("key") - cmd.MarkFlagFilename("genkey") - - return cmd -} - -func (o *EncryptOptions) Validate(args []string) error { - if len(args) != 0 { - return errors.New("no arguments are supported") - } - - if len(o.CleartextFile) == 0 && len(o.CleartextData) == 0 && o.CleartextReader == nil { - return errors.New("an input file, data, or reader is required") - } - if len(o.CleartextFile) > 0 && len(o.CleartextData) > 0 { - return errors.New("cannot specify both an input file and data") - } - - if len(o.EncryptedFile) == 0 && o.EncryptedWriter == nil { - return errors.New("an output file or writer is required") - } - - if len(o.GenKeyFile) > 0 && len(o.KeyFile) > 0 { - return errors.New("either --genkey or --key may be specified, not both") - } - if len(o.GenKeyFile) == 0 && len(o.KeyFile) == 0 { - return errors.New("--genkey or --key is required") - } - - return nil -} - -func (o *EncryptOptions) Encrypt() error { - // Get data - var data []byte - var warnWhitespace = true - switch { - case len(o.CleartextFile) > 0: - if d, err := ioutil.ReadFile(o.CleartextFile); err != nil { - return err - } else { - data = d - } - case len(o.CleartextData) > 0: - // Don't warn in cases where we're explicitly being given the data to use - warnWhitespace = false - data = o.CleartextData - case o.CleartextReader != nil && term.IsTerminalReader(o.CleartextReader) && o.PromptWriter != nil: - // Read a single line from stdin with prompting - data = []byte(term.PromptForString(o.CleartextReader, o.PromptWriter, "Data to encrypt: ")) - case o.CleartextReader != nil: - // Read data from stdin without prompting (allows binary data and piping) - if d, err := ioutil.ReadAll(o.CleartextReader); err != nil { - return err - } else { - data = d - } - } - if warnWhitespace && (o.PromptWriter != nil) && (len(data) > 0) { - r1, _ := utf8.DecodeRune(data) - r2, _ := utf8.DecodeLastRune(data) - if unicode.IsSpace(r1) || unicode.IsSpace(r2) { - fmt.Fprintln(o.PromptWriter, "Warning: Data includes leading or trailing whitespace, which will be included in the encrypted value") - } - } - - // Get key - var key []byte - switch { - case len(o.KeyFile) > 0: - if block, ok, err := certs.BlockFromFile(o.KeyFile, certs.StringSourceKeyBlockType); err != nil { - return err - } else if !ok { - return fmt.Errorf("%s does not contain a valid PEM block of type %q", o.KeyFile, certs.StringSourceKeyBlockType) - } else if len(block.Bytes) == 0 { - return fmt.Errorf("%s does not contain a key", o.KeyFile) - } else { - key = block.Bytes - } - case len(o.GenKeyFile) > 0: - key = make([]byte, 32) - if _, err := rand.Read(key); err != nil { - return err - } - } - if len(key) == 0 { - return errors.New("--genkey or --key is required") - } - - // Encrypt - dataBlock, err := x509.EncryptPEMBlock(rand.Reader, certs.StringSourceEncryptedBlockType, data, key, x509.PEMCipherAES256) - if err != nil { - return err - } - - // Write data - if len(o.EncryptedFile) > 0 { - if err := certs.BlockToFile(o.EncryptedFile, dataBlock, os.FileMode(0644)); err != nil { - return err - } - } else if o.EncryptedWriter != nil { - encryptedBytes, err := certs.BlockToBytes(dataBlock) - if err != nil { - return err - } - n, err := o.EncryptedWriter.Write(encryptedBytes) - if err != nil { - return err - } - if n != len(encryptedBytes) { - return fmt.Errorf("could not completely write encrypted data") - } - } - - // Write key - if len(o.GenKeyFile) > 0 { - keyBlock := &pem.Block{Bytes: key, Type: certs.StringSourceKeyBlockType} - if err := certs.BlockToFile(o.GenKeyFile, keyBlock, os.FileMode(0600)); err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/createbootstrapprojecttemplate/create_bootstrap_project_template.go b/vendor/github.com/openshift/oc/pkg/cli/admin/createbootstrapprojecttemplate/create_bootstrap_project_template.go deleted file mode 100644 index dd172abcc9e3..000000000000 --- a/vendor/github.com/openshift/oc/pkg/cli/admin/createbootstrapprojecttemplate/create_bootstrap_project_template.go +++ /dev/null @@ -1,79 +0,0 @@ -package createbootstrapprojecttemplate - -import ( - "errors" - - "github.com/spf13/cobra" - - "k8s.io/cli-runtime/pkg/genericclioptions" - kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" - "k8s.io/kubernetes/pkg/kubectl/scheme" - "k8s.io/kubernetes/pkg/printers" -) - -const CreateBootstrapProjectTemplateCommand = "create-bootstrap-project-template" - -type CreateBootstrapProjectTemplateOptions struct { - PrintFlags *genericclioptions.PrintFlags - - Name string - Args []string - - Printer printers.ResourcePrinter - - genericclioptions.IOStreams -} - -func NewCreateBootstrapProjectTemplateOptions(streams genericclioptions.IOStreams) *CreateBootstrapProjectTemplateOptions { - return &CreateBootstrapProjectTemplateOptions{ - PrintFlags: genericclioptions.NewPrintFlags("created").WithTypeSetter(scheme.Scheme).WithDefaultOutput("json"), - Name: DefaultTemplateName, - IOStreams: streams, - } -} - -func NewCommandCreateBootstrapProjectTemplate(f kcmdutil.Factory, commandName string, fullName string, streams genericclioptions.IOStreams) *cobra.Command { - o := NewCreateBootstrapProjectTemplateOptions(streams) - cmd := &cobra.Command{ - Use: commandName, - Short: "Create a bootstrap project template", - Run: func(cmd *cobra.Command, args []string) { - kcmdutil.CheckErr(o.Complete(args)) - kcmdutil.CheckErr(o.Validate()) - kcmdutil.CheckErr(o.Run()) - }, - } - - cmd.Flags().StringVar(&o.Name, "name", o.Name, "The name of the template to output.") - o.PrintFlags.AddFlags(cmd) - - return cmd -} - -func (o *CreateBootstrapProjectTemplateOptions) Complete(args []string) error { - o.Args = args - var err error - o.Printer, err = o.PrintFlags.ToPrinter() - if err != nil { - return err - } - return nil -} - -func (o *CreateBootstrapProjectTemplateOptions) Validate() error { - if len(o.Args) != 0 { - return errors.New("no arguments are supported") - } - if len(o.Name) == 0 { - return errors.New("--name must be provided") - } - - return nil -} - -func (o *CreateBootstrapProjectTemplateOptions) Run() error { - template := DefaultTemplate() - template.Name = o.Name - - return o.Printer.PrintObj(template, o.Out) -} diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/createbootstrapprojecttemplate/sample_template.go b/vendor/github.com/openshift/oc/pkg/cli/admin/createbootstrapprojecttemplate/sample_template.go deleted file mode 100644 index c7ef29837ca6..000000000000 --- a/vendor/github.com/openshift/oc/pkg/cli/admin/createbootstrapprojecttemplate/sample_template.go +++ /dev/null @@ -1,72 +0,0 @@ -package createbootstrapprojecttemplate - -import ( - rbacv1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/serializer" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1" - - "github.com/openshift/api/annotations" - projectv1 "github.com/openshift/api/project/v1" - templatev1 "github.com/openshift/api/template/v1" -) - -const ( - DefaultTemplateName = "project-request" - - AdminRoleName = "admin" - - ProjectNameParam = "PROJECT_NAME" - ProjectDisplayNameParam = "PROJECT_DISPLAYNAME" - ProjectDescriptionParam = "PROJECT_DESCRIPTION" - ProjectAdminUserParam = "PROJECT_ADMIN_USER" - ProjectRequesterParam = "PROJECT_REQUESTING_USER" - - ProjectRequester = "openshift.io/requester" -) - -var ( - parameters = []string{ProjectNameParam, ProjectDisplayNameParam, ProjectDescriptionParam, ProjectAdminUserParam, ProjectRequesterParam} -) - -func DefaultTemplate() *templatev1.Template { - scheme := runtime.NewScheme() - utilruntime.Must(rbacv1.AddToScheme(scheme)) - utilruntime.Must(projectv1.Install(scheme)) - utilruntime.Must(templatev1.Install(scheme)) - codec := serializer.NewCodecFactory(scheme).LegacyCodec(scheme.PrioritizedVersionsAllGroups()...) - - ret := &templatev1.Template{} - ret.Name = DefaultTemplateName - - ns := "${" + ProjectNameParam + "}" - - project := &projectv1.Project{} - project.Name = ns - project.Annotations = map[string]string{ - annotations.OpenShiftDescription: "${" + ProjectDescriptionParam + "}", - annotations.OpenShiftDisplayName: "${" + ProjectDisplayNameParam + "}", - ProjectRequester: "${" + ProjectRequesterParam + "}", - } - objBytes, err := runtime.Encode(codec, project) - if err != nil { - panic(err) - } - ret.Objects = append(ret.Objects, runtime.RawExtension{Raw: objBytes}) - - binding := rbacv1helpers.NewRoleBindingForClusterRole(AdminRoleName, ns).Users("${" + ProjectAdminUserParam + "}").BindingOrDie() - objBytes, err = runtime.Encode(codec, &binding) - if err != nil { - panic(err) - } - ret.Objects = append(ret.Objects, runtime.RawExtension{Raw: objBytes}) - - for _, parameterName := range parameters { - parameter := templatev1.Parameter{} - parameter.Name = parameterName - ret.Parameters = append(ret.Parameters, parameter) - } - - return ret -} diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/createerrortemplate/create_error_template.go b/vendor/github.com/openshift/oc/pkg/cli/admin/createerrortemplate/create_error_template.go deleted file mode 100644 index bb204c8461cc..000000000000 --- a/vendor/github.com/openshift/oc/pkg/cli/admin/createerrortemplate/create_error_template.go +++ /dev/null @@ -1,114 +0,0 @@ -package createerrortemplate - -import ( - "errors" - "io" - - "github.com/spf13/cobra" - - "k8s.io/cli-runtime/pkg/genericclioptions" - kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" - "k8s.io/kubernetes/pkg/kubectl/util/templates" -) - -const CreateErrorTemplateCommand = "create-error-template" - -var errorLongDescription = templates.LongDesc(` - Create a template for customizing the error page - - This command creates a basic template to use as a starting point for - customizing the authentication error page. Save the output to a file and edit - the template to change the look and feel or add content. - - To use the template, set oauthConfig.templates.error in the master - configuration to point to the template file. For example, - - oauthConfig: - templates: - error: templates/error.html - `) - -type CreateErrorTemplateOptions struct { - genericclioptions.IOStreams -} - -func NewCreateErrorTemplateOptions(streams genericclioptions.IOStreams) *CreateErrorTemplateOptions { - return &CreateErrorTemplateOptions{ - IOStreams: streams, - } -} - -func NewCommandCreateErrorTemplate(f kcmdutil.Factory, commandName string, fullName string, streams genericclioptions.IOStreams) *cobra.Command { - o := NewCreateErrorTemplateOptions(streams) - cmd := &cobra.Command{ - Use: commandName, - Short: "Create an error page template", - Long: errorLongDescription, - Run: func(cmd *cobra.Command, args []string) { - kcmdutil.CheckErr(o.Validate(args)) - kcmdutil.CheckErr(o.Run()) - }, - } - - return cmd -} - -func (o *CreateErrorTemplateOptions) Validate(args []string) error { - if len(args) != 0 { - return errors.New("no arguments are supported") - } - - return nil -} - -func (o *CreateErrorTemplateOptions) Run() error { - _, err := io.WriteString(o.Out, ErrorPageTemplateExample) - return err -} - -// ErrorPageTemplateExample is a basic template for customizing the error page. -const ErrorPageTemplateExample = ` - - - - Error - - - - -
- - {{ if eq .ErrorCode "mapping_claim_error" }} - Could not create your user. Contact your administrator to resolve this issue. - {{ else }} - {{ .Error }} - {{ end }} -
- - - -` diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/createkubeconfig/create_kubeconfig.go b/vendor/github.com/openshift/oc/pkg/cli/admin/createkubeconfig/create_kubeconfig.go deleted file mode 100644 index a9ce2c9d74c9..000000000000 --- a/vendor/github.com/openshift/oc/pkg/cli/admin/createkubeconfig/create_kubeconfig.go +++ /dev/null @@ -1,249 +0,0 @@ -package createkubeconfig - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - - "github.com/spf13/cobra" - "k8s.io/klog" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/client-go/util/cert" - kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" - "k8s.io/kubernetes/pkg/kubectl/util/templates" - - "github.com/openshift/library-go/pkg/crypto" - "github.com/openshift/oc/pkg/helpers/kubeconfig" - - "k8s.io/client-go/tools/clientcmd" - clientcmdapi "k8s.io/client-go/tools/clientcmd/api" -) - -const CreateKubeConfigCommandName = "create-kubeconfig" - -var createKubeConfigLongDesc = templates.LongDesc(` - Create's a .kubeconfig file at <--kubeconfig> that looks like this: - - clusters: - - cluster: - certificate-authority-data: - server: <--master> - name: <--cluster> - - cluster: - certificate-authority-data: - server: <--public-master> - name: public-<--cluster> - contexts: - - context: - cluster: <--cluster> - user: <--user> - namespace: <--namespace> - name: <--context> - - context: - cluster: public-<--cluster> - user: <--user> - namespace: <--namespace> - name: public-<--context> - current-context: <--context> - kind: Config - users: - - name: <--user> - user: - client-certificate-data: - client-key-data: `) - -type CreateKubeConfigOptions struct { - APIServerURL string - PublicAPIServerURL string - APIServerCAFiles []string - - CertFile string - KeyFile string - - ContextNamespace string - - KubeConfigFile string - - genericclioptions.IOStreams -} - -func NewCreateKubeConfigOptions(streams genericclioptions.IOStreams) *CreateKubeConfigOptions { - return &CreateKubeConfigOptions{ - APIServerURL: "https://localhost:8443", - APIServerCAFiles: []string{"openshift.local.config/master/ca.crt"}, - ContextNamespace: metav1.NamespaceDefault, - KubeConfigFile: ".kubeconfig", - IOStreams: streams, - } -} - -func NewCommandCreateKubeConfig(commandName string, fullName string, streams genericclioptions.IOStreams) *cobra.Command { - o := NewCreateKubeConfigOptions(streams) - cmd := &cobra.Command{ - Use: commandName, - Short: "Create a basic .kubeconfig file from client certs", - Long: createKubeConfigLongDesc, - Run: func(cmd *cobra.Command, args []string) { - kcmdutil.CheckErr(o.Validate(args)) - if _, err := o.CreateKubeConfig(); err != nil { - kcmdutil.CheckErr(err) - } - }, - } - - cmd.Flags().StringVar(&o.APIServerURL, "master", o.APIServerURL, "The API server's URL.") - cmd.Flags().StringVar(&o.PublicAPIServerURL, "public-master", o.PublicAPIServerURL, "The API public facing server's URL (if applicable).") - cmd.Flags().StringSliceVar(&o.APIServerCAFiles, "certificate-authority", o.APIServerCAFiles, "Files containing signing authorities to use to verify the API server's serving certificate.") - cmd.Flags().StringVar(&o.CertFile, "client-certificate", o.CertFile, "The client cert file.") - cmd.Flags().StringVar(&o.KeyFile, "client-key", o.KeyFile, "The client key file.") - cmd.Flags().StringVar(&o.ContextNamespace, "namespace", o.ContextNamespace, "Namespace for this context in .kubeconfig.") - cmd.Flags().StringVar(&o.KubeConfigFile, "kubeconfig", o.KubeConfigFile, "Path for the resulting .kubeconfig file.") - - // autocompletion hints - cmd.MarkFlagFilename("certificate-authority") - cmd.MarkFlagFilename("client-certificate") - cmd.MarkFlagFilename("client-key") - cmd.MarkFlagFilename("kubeconfig") - - return cmd -} - -func (o CreateKubeConfigOptions) Validate(args []string) error { - if len(args) != 0 { - return errors.New("no arguments are supported") - } - if len(o.KubeConfigFile) == 0 { - return errors.New("kubeconfig must be provided") - } - if len(o.CertFile) == 0 { - return errors.New("client-certificate must be provided") - } - if len(o.KeyFile) == 0 { - return errors.New("client-key must be provided") - } - if len(o.APIServerCAFiles) == 0 { - return errors.New("certificate-authority must be provided") - } else { - for _, caFile := range o.APIServerCAFiles { - if _, err := cert.NewPool(caFile); err != nil { - return fmt.Errorf("certificate-authority must be a valid certificate file: %v", err) - } - } - } - if len(o.ContextNamespace) == 0 { - return errors.New("namespace must be provided") - } - if len(o.APIServerURL) == 0 { - return errors.New("master must be provided") - } - - return nil -} - -func (o CreateKubeConfigOptions) CreateKubeConfig() (*clientcmdapi.Config, error) { - klog.V(4).Infof("creating a .kubeconfig with: %#v", o) - - // read all the referenced filenames - caData, err := readFiles(o.APIServerCAFiles, []byte("\n")) - if err != nil { - return nil, err - } - certData, err := ioutil.ReadFile(o.CertFile) - if err != nil { - return nil, err - } - keyData, err := ioutil.ReadFile(o.KeyFile) - if err != nil { - return nil, err - } - certConfig, err := crypto.GetTLSCertificateConfig(o.CertFile, o.KeyFile) - if err != nil { - return nil, err - } - - // determine all the nicknames - clusterNick, err := kubeconfig.GetClusterNicknameFromURL(o.APIServerURL) - if err != nil { - return nil, err - } - userNick, err := kubeconfig.GetUserNicknameFromCert(clusterNick, certConfig.Certs...) - if err != nil { - return nil, err - } - contextNick := kubeconfig.GetContextNickname(o.ContextNamespace, clusterNick, userNick) - - credentials := make(map[string]*clientcmdapi.AuthInfo) - credentials[userNick] = &clientcmdapi.AuthInfo{ - ClientCertificateData: certData, - ClientKeyData: keyData, - } - - // normalize the provided server to a format expected by config - o.APIServerURL, err = kubeconfig.NormalizeServerURL(o.APIServerURL) - if err != nil { - return nil, err - } - - clusters := make(map[string]*clientcmdapi.Cluster) - clusters[clusterNick] = &clientcmdapi.Cluster{ - Server: o.APIServerURL, - CertificateAuthorityData: caData, - } - - contexts := make(map[string]*clientcmdapi.Context) - contexts[contextNick] = &clientcmdapi.Context{Cluster: clusterNick, AuthInfo: userNick, Namespace: o.ContextNamespace} - - createPublic := (len(o.PublicAPIServerURL) > 0) && o.APIServerURL != o.PublicAPIServerURL - if createPublic { - publicClusterNick, err := kubeconfig.GetClusterNicknameFromURL(o.PublicAPIServerURL) - if err != nil { - return nil, err - } - publicContextNick := kubeconfig.GetContextNickname(o.ContextNamespace, publicClusterNick, userNick) - - clusters[publicClusterNick] = &clientcmdapi.Cluster{ - Server: o.PublicAPIServerURL, - CertificateAuthorityData: caData, - } - contexts[publicContextNick] = &clientcmdapi.Context{Cluster: publicClusterNick, AuthInfo: userNick, Namespace: o.ContextNamespace} - } - - kubeConfig := &clientcmdapi.Config{ - Clusters: clusters, - AuthInfos: credentials, - Contexts: contexts, - CurrentContext: contextNick, - } - - klog.V(3).Infof("Generating '%s' API client config as %s\n", userNick, o.KubeConfigFile) - // Ensure the parent dir exists - if err := os.MkdirAll(filepath.Dir(o.KubeConfigFile), os.FileMode(0755)); err != nil { - return nil, err - } - if err := clientcmd.WriteToFile(*kubeConfig, o.KubeConfigFile); err != nil { - return nil, err - } - - return kubeConfig, nil -} - -// readFiles returns a byte array containing the contents of all the given filenames, -// optionally separated by a delimiter, or an error if any of the files cannot be read -func readFiles(srcFiles []string, separator []byte) ([]byte, error) { - data := []byte{} - for _, srcFile := range srcFiles { - fileData, err := ioutil.ReadFile(srcFile) - if err != nil { - return nil, err - } - if len(data) > 0 && len(separator) > 0 { - data = append(data, separator...) - } - data = append(data, fileData...) - } - return data, nil -} diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/createlogintemplate/create_login_template.go b/vendor/github.com/openshift/oc/pkg/cli/admin/createlogintemplate/create_login_template.go deleted file mode 100644 index 68d6cbfdbe56..000000000000 --- a/vendor/github.com/openshift/oc/pkg/cli/admin/createlogintemplate/create_login_template.go +++ /dev/null @@ -1,136 +0,0 @@ -package createlogintemplate - -import ( - "errors" - "io" - - "github.com/spf13/cobra" - - "k8s.io/cli-runtime/pkg/genericclioptions" - kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" - "k8s.io/kubernetes/pkg/kubectl/util/templates" -) - -const CreateLoginTemplateCommand = "create-login-template" - -var longDescription = templates.LongDesc(` - Create a template for customizing the login page - - This command creates a basic template to use as a starting point for - customizing the login page. Save the output to a file and edit the template to - change the look and feel or add content. Be careful not to remove any parameter - values inside curly braces. - - To use the template, set oauthConfig.templates.login in the master - configuration to point to the template file. For example, - - oauthConfig: - templates: - login: templates/login.html - `) - -type CreateLoginTemplateOptions struct { - genericclioptions.IOStreams -} - -func NewCreateLoginTemplateOptions(streams genericclioptions.IOStreams) *CreateLoginTemplateOptions { - return &CreateLoginTemplateOptions{ - IOStreams: streams, - } -} - -func NewCommandCreateLoginTemplate(f kcmdutil.Factory, commandName string, fullName string, streams genericclioptions.IOStreams) *cobra.Command { - o := NewCreateLoginTemplateOptions(streams) - cmd := &cobra.Command{ - Use: commandName, - Short: "Create a login template", - Long: longDescription, - Run: func(cmd *cobra.Command, args []string) { - kcmdutil.CheckErr(o.Validate(args)) - kcmdutil.CheckErr(o.Run()) - }, - } - - return cmd -} - -func (o CreateLoginTemplateOptions) Validate(args []string) error { - if len(args) != 0 { - return errors.New("no arguments are supported") - } - - return nil -} - -func (o *CreateLoginTemplateOptions) Run() error { - _, err := io.WriteString(o.Out, LoginTemplateExample) - return err -} - -// LoginTemplateExample is a basic template for customizing the login page. -const LoginTemplateExample = ` - - - - Login - - - - - {{ if .Error }} -
{{ .Error }}
- - {{ end }} - - -
- - - -
- -
-
- -
- -
- -
-
- -
- - - -
- - - -` diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/createproviderselectiontemplate/create_provider_selection_template.go b/vendor/github.com/openshift/oc/pkg/cli/admin/createproviderselectiontemplate/create_provider_selection_template.go deleted file mode 100644 index c8a564bb082a..000000000000 --- a/vendor/github.com/openshift/oc/pkg/cli/admin/createproviderselectiontemplate/create_provider_selection_template.go +++ /dev/null @@ -1,111 +0,0 @@ -package createproviderselectiontemplate - -import ( - "errors" - "io" - - "github.com/spf13/cobra" - - "k8s.io/cli-runtime/pkg/genericclioptions" - kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" - "k8s.io/kubernetes/pkg/kubectl/util/templates" -) - -const CreateProviderSelectionTemplateCommand = "create-provider-selection-template" - -var providerSelectionLongDescription = templates.LongDesc(` - Create a template for customizing the provider selection page - - This command creates a basic template to use as a starting point for - customizing the login provider selection page. Save the output to a file and edit - the template to change the look and feel or add content. Be careful not to remove - any parameter values inside curly braces. - - To use the template, set oauthConfig.templates.providerSelection in the master - configuration to point to the template file. For example, - - oauthConfig: - templates: - providerSelection: templates/provider-selection.html - `) - -type CreateProviderSelectionTemplateOptions struct { - genericclioptions.IOStreams -} - -func NewCreateProviderSelectionTemplateOptions(streams genericclioptions.IOStreams) *CreateProviderSelectionTemplateOptions { - return &CreateProviderSelectionTemplateOptions{ - IOStreams: streams, - } -} - -func NewCommandCreateProviderSelectionTemplate(f kcmdutil.Factory, commandName string, fullName string, streams genericclioptions.IOStreams) *cobra.Command { - o := NewCreateProviderSelectionTemplateOptions(streams) - cmd := &cobra.Command{ - Use: commandName, - Short: "Create a provider selection template", - Long: providerSelectionLongDescription, - Run: func(cmd *cobra.Command, args []string) { - kcmdutil.CheckErr(o.Validate(args)) - kcmdutil.CheckErr(o.Run()) - }, - } - - return cmd -} - -func (o CreateProviderSelectionTemplateOptions) Validate(args []string) error { - if len(args) != 0 { - return errors.New("no arguments are supported") - } - - return nil -} - -func (o *CreateProviderSelectionTemplateOptions) Run() error { - _, err := io.WriteString(o.Out, SelectProviderTemplateExample) - return err -} - -// SelectProviderTemplateExample is a basic template for customizing the provider selection page. -const SelectProviderTemplateExample = ` - - - - Login - - - - - {{ range $provider := .Providers }} -
- - {{ if eq $provider.Name "anypassword" }} - Log in with any username and password - {{ else }} - {{$provider.Name}} - {{ end }} -
- {{ end }} - - - -` diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/groups/examples/examples_test.go b/vendor/github.com/openshift/oc/pkg/cli/admin/groups/examples/examples_test.go deleted file mode 100644 index 91fbcd538098..000000000000 --- a/vendor/github.com/openshift/oc/pkg/cli/admin/groups/examples/examples_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package examples - -import ( - "bytes" - "io/ioutil" - "testing" - - legacyconfigv1 "github.com/openshift/api/legacyconfig/v1" - "github.com/openshift/library-go/pkg/config/helpers" - "github.com/openshift/oc/pkg/helpers/groupsync/ldap" -) - -func TestLDAPSyncConfigFixtures(t *testing.T) { - fixtures := []string{} - - // build a list of common configurations for all schemas - schemas := []string{"rfc2307", "ad", "augmented-ad"} - for _, schema := range schemas { - fixtures = append(fixtures, schema+"/sync-config.yaml") - fixtures = append(fixtures, schema+"/sync-config-dn-everywhere.yaml") - fixtures = append(fixtures, schema+"/sync-config-partially-user-defined.yaml") - fixtures = append(fixtures, schema+"/sync-config-user-defined.yaml") - fixtures = append(fixtures, schema+"/sync-config-paging.yaml") - } - fixtures = append(fixtures, "rfc2307/sync-config-tolerating.yaml") - - for _, fixture := range fixtures { - yamlConfig, err := ioutil.ReadFile("./../../../../../testdata/ldap/" + fixture) - if err != nil { - t.Errorf("could not read fixture at %q: %v", fixture, err) - continue - } - - uncast, err := helpers.ReadYAML(bytes.NewBuffer([]byte(yamlConfig)), legacyconfigv1.InstallLegacy) - if err != nil { - t.Error(err) - } - ldapConfig := uncast.(*legacyconfigv1.LDAPSyncConfig) - - if results := ldap.ValidateLDAPSyncConfig(ldapConfig); len(results.Errors) > 0 { - t.Errorf("validation of fixture at %q failed with %d errors:", fixture, len(results.Errors)) - for _, err := range results.Errors { - t.Error(err) - } - } - } -} diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/groups/groups.go b/vendor/github.com/openshift/oc/pkg/cli/admin/groups/groups.go deleted file mode 100644 index 236bb94a78ef..000000000000 --- a/vendor/github.com/openshift/oc/pkg/cli/admin/groups/groups.go +++ /dev/null @@ -1,37 +0,0 @@ -package groups - -import ( - "github.com/spf13/cobra" - "k8s.io/cli-runtime/pkg/genericclioptions" - kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" - "k8s.io/kubernetes/pkg/kubectl/util/templates" - - "github.com/openshift/oc/pkg/cli/admin/groups/new" - "github.com/openshift/oc/pkg/cli/admin/groups/sync" - "github.com/openshift/oc/pkg/cli/admin/groups/users" -) - -const GroupsRecommendedName = "groups" - -var groupLong = templates.LongDesc(` - Manage groups in your cluster - - Groups are sets of users that can be used when describing policy.`) - -func NewCmdGroups(name, fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { - // Parent command to which all subcommands are added. - cmds := &cobra.Command{ - Use: name, - Short: "Manage groups", - Long: groupLong, - Run: kcmdutil.DefaultSubCommandRun(streams.ErrOut), - } - - cmds.AddCommand(new.NewCmdNewGroup(new.NewGroupRecommendedName, fullName+" "+new.NewGroupRecommendedName, f, streams)) - cmds.AddCommand(users.NewCmdAddUsers(users.AddRecommendedName, fullName+" "+users.AddRecommendedName, f, streams)) - cmds.AddCommand(users.NewCmdRemoveUsers(users.RemoveRecommendedName, fullName+" "+users.RemoveRecommendedName, f, streams)) - cmds.AddCommand(sync.NewCmdSync(sync.SyncRecommendedName, fullName+" "+sync.SyncRecommendedName, f, streams)) - cmds.AddCommand(sync.NewCmdPrune(sync.PruneRecommendedName, fullName+" "+sync.PruneRecommendedName, f, streams)) - - return cmds -} diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/groups/new/new.go b/vendor/github.com/openshift/oc/pkg/cli/admin/groups/new/new.go deleted file mode 100644 index 50e8356bab43..000000000000 --- a/vendor/github.com/openshift/oc/pkg/cli/admin/groups/new/new.go +++ /dev/null @@ -1,147 +0,0 @@ -package new - -import ( - "errors" - "fmt" - - "github.com/spf13/cobra" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/cli-runtime/pkg/printers" - kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" - "k8s.io/kubernetes/pkg/kubectl/scheme" - "k8s.io/kubernetes/pkg/kubectl/util/templates" - - userv1 "github.com/openshift/api/user/v1" - userv1typedclient "github.com/openshift/client-go/user/clientset/versioned/typed/user/v1" -) - -const NewGroupRecommendedName = "new" - -var ( - newLong = templates.LongDesc(` - Create a new group. - - This command will create a new group with an optional list of users.`) - - newExample = templates.Examples(` - # Add a group with no users - %[1]s my-group - - # Add a group with two users - %[1]s my-group user1 user2 - - # Add a group with one user and shorter output - %[1]s my-group user1 -o name`) -) - -type NewGroupOptions struct { - PrintFlags *genericclioptions.PrintFlags - Printer printers.ResourcePrinter - - GroupClient userv1typedclient.GroupsGetter - - Group string - Users []string - - DryRun bool - - genericclioptions.IOStreams -} - -func NewNewGroupOptions(streams genericclioptions.IOStreams) *NewGroupOptions { - return &NewGroupOptions{ - PrintFlags: genericclioptions.NewPrintFlags("created").WithTypeSetter(scheme.Scheme), - IOStreams: streams, - } -} - -func NewCmdNewGroup(name, fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { - o := NewNewGroupOptions(streams) - cmd := &cobra.Command{ - Use: name + " GROUP [USER ...]", - Short: "Create a new group", - Long: newLong, - Example: fmt.Sprintf(newExample, fullName), - Run: func(cmd *cobra.Command, args []string) { - kcmdutil.CheckErr(o.Complete(f, cmd, args)) - kcmdutil.CheckErr(o.Validate()) - kcmdutil.CheckErr(o.Run()) - }, - } - o.PrintFlags.AddFlags(cmd) - kcmdutil.AddDryRunFlag(cmd) - - return cmd -} - -func (o *NewGroupOptions) Complete(f kcmdutil.Factory, cmd *cobra.Command, args []string) error { - if len(args) < 1 { - return errors.New("You must specify at least one argument: GROUP [USER ...]") - } - - o.Group = args[0] - if len(args) > 1 { - o.Users = append(o.Users, args[1:]...) - } - - clientConfig, err := f.ToRESTConfig() - if err != nil { - return err - } - o.GroupClient, err = userv1typedclient.NewForConfig(clientConfig) - if err != nil { - return err - } - - o.DryRun = kcmdutil.GetDryRunFlag(cmd) - if o.DryRun { - o.PrintFlags.Complete("%s (dry run)") - } - o.Printer, err = o.PrintFlags.ToPrinter() - if err != nil { - return err - } - - return nil -} - -func (o *NewGroupOptions) Validate() error { - if len(o.Group) == 0 { - return fmt.Errorf("group is required") - } - - return nil -} - -func (o *NewGroupOptions) Run() error { - group := &userv1.Group{ - // this is ok because we know exactly how we want to be serialized - TypeMeta: metav1.TypeMeta{APIVersion: userv1.SchemeGroupVersion.String(), Kind: "Group"}, - ObjectMeta: metav1.ObjectMeta{ - Name: o.Group, - }, - } - - usedNames := sets.String{} - for _, user := range o.Users { - if usedNames.Has(user) { - continue - } - usedNames.Insert(user) - - group.Users = append(group.Users, user) - } - - if !o.DryRun { - var err error - group, err = o.GroupClient.Groups().Create(group) - if err != nil { - return err - } - } - - return o.Printer.PrintObj(group, o.Out) -} diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/groups/sync/ad.go b/vendor/github.com/openshift/oc/pkg/cli/admin/groups/sync/ad.go deleted file mode 100644 index c049eea8d698..000000000000 --- a/vendor/github.com/openshift/oc/pkg/cli/admin/groups/sync/ad.go +++ /dev/null @@ -1,54 +0,0 @@ -package sync - -import ( - legacyconfigv1 "github.com/openshift/api/legacyconfig/v1" - "github.com/openshift/library-go/pkg/security/ldapclient" - ldapquery "github.com/openshift/library-go/pkg/security/ldapquery" - "github.com/openshift/oc/pkg/helpers/groupsync" - "github.com/openshift/oc/pkg/helpers/groupsync/ad" - "github.com/openshift/oc/pkg/helpers/groupsync/interfaces" -) - -var _ SyncBuilder = &ADBuilder{} -var _ PruneBuilder = &ADBuilder{} - -type ADBuilder struct { - ClientConfig ldapclient.Config - Config *legacyconfigv1.ActiveDirectoryConfig - - adLDAPInterface *ad.ADLDAPInterface -} - -func (b *ADBuilder) GetGroupLister() (interfaces.LDAPGroupLister, error) { - return b.getADLDAPInterface() -} - -func (b *ADBuilder) GetGroupNameMapper() (interfaces.LDAPGroupNameMapper, error) { - return &syncgroups.DNLDAPGroupNameMapper{}, nil -} - -func (b *ADBuilder) GetUserNameMapper() (interfaces.LDAPUserNameMapper, error) { - return syncgroups.NewUserNameMapper(b.Config.UserNameAttributes), nil -} - -func (b *ADBuilder) GetGroupMemberExtractor() (interfaces.LDAPMemberExtractor, error) { - return b.getADLDAPInterface() -} - -func (b *ADBuilder) getADLDAPInterface() (*ad.ADLDAPInterface, error) { - if b.adLDAPInterface != nil { - return b.adLDAPInterface, nil - } - - userQuery, err := ldapquery.NewLDAPQuery(ToLDAPQuery(b.Config.AllUsersQuery)) - if err != nil { - return nil, err - } - b.adLDAPInterface = ad.NewADLDAPInterface(b.ClientConfig, - userQuery, b.Config.GroupMembershipAttributes, b.Config.UserNameAttributes) - return b.adLDAPInterface, nil -} - -func (b *ADBuilder) GetGroupDetector() (interfaces.LDAPGroupDetector, error) { - return b.getADLDAPInterface() -} diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/groups/sync/augmented_ad.go b/vendor/github.com/openshift/oc/pkg/cli/admin/groups/sync/augmented_ad.go deleted file mode 100644 index 01487d17258e..000000000000 --- a/vendor/github.com/openshift/oc/pkg/cli/admin/groups/sync/augmented_ad.go +++ /dev/null @@ -1,67 +0,0 @@ -package sync - -import ( - legacyconfigv1 "github.com/openshift/api/legacyconfig/v1" - "github.com/openshift/library-go/pkg/security/ldapclient" - ldapquery "github.com/openshift/library-go/pkg/security/ldapquery" - "github.com/openshift/oc/pkg/helpers/groupsync" - "github.com/openshift/oc/pkg/helpers/groupsync/ad" - "github.com/openshift/oc/pkg/helpers/groupsync/interfaces" -) - -var _ SyncBuilder = &AugmentedADBuilder{} -var _ PruneBuilder = &AugmentedADBuilder{} - -type AugmentedADBuilder struct { - ClientConfig ldapclient.Config - Config *legacyconfigv1.AugmentedActiveDirectoryConfig - - augmentedADLDAPInterface *ad.AugmentedADLDAPInterface -} - -func (b *AugmentedADBuilder) GetGroupLister() (interfaces.LDAPGroupLister, error) { - return b.getAugmentedADLDAPInterface() -} - -func (b *AugmentedADBuilder) GetGroupNameMapper() (interfaces.LDAPGroupNameMapper, error) { - ldapInterface, err := b.getAugmentedADLDAPInterface() - if err != nil { - return nil, err - } - if b.Config.GroupNameAttributes != nil { - return syncgroups.NewEntryAttributeGroupNameMapper(b.Config.GroupNameAttributes, ldapInterface), nil - } - - return nil, nil -} - -func (b *AugmentedADBuilder) GetUserNameMapper() (interfaces.LDAPUserNameMapper, error) { - return syncgroups.NewUserNameMapper(b.Config.UserNameAttributes), nil -} - -func (b *AugmentedADBuilder) GetGroupMemberExtractor() (interfaces.LDAPMemberExtractor, error) { - return b.getAugmentedADLDAPInterface() -} - -func (b *AugmentedADBuilder) getAugmentedADLDAPInterface() (*ad.AugmentedADLDAPInterface, error) { - if b.augmentedADLDAPInterface != nil { - return b.augmentedADLDAPInterface, nil - } - - userQuery, err := ldapquery.NewLDAPQuery(ToLDAPQuery(b.Config.AllUsersQuery)) - if err != nil { - return nil, err - } - groupQuery, err := ldapquery.NewLDAPQueryOnAttribute(ToLDAPQuery(b.Config.AllGroupsQuery), b.Config.GroupUIDAttribute) - if err != nil { - return nil, err - } - b.augmentedADLDAPInterface = ad.NewAugmentedADLDAPInterface(b.ClientConfig, - userQuery, b.Config.GroupMembershipAttributes, b.Config.UserNameAttributes, - groupQuery, b.Config.GroupNameAttributes) - return b.augmentedADLDAPInterface, nil -} - -func (b *AugmentedADBuilder) GetGroupDetector() (interfaces.LDAPGroupDetector, error) { - return b.getAugmentedADLDAPInterface() -} diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/groups/sync/interfaces.go b/vendor/github.com/openshift/oc/pkg/cli/admin/groups/sync/interfaces.go deleted file mode 100644 index 22be9c3909fa..000000000000 --- a/vendor/github.com/openshift/oc/pkg/cli/admin/groups/sync/interfaces.go +++ /dev/null @@ -1,52 +0,0 @@ -package sync - -import ( - legacyconfigv1 "github.com/openshift/api/legacyconfig/v1" - userv1client "github.com/openshift/client-go/user/clientset/versioned/typed/user/v1" - "github.com/openshift/library-go/pkg/security/ldapquery" - "github.com/openshift/oc/pkg/helpers/groupsync/interfaces" -) - -// SyncBuilder describes an object that can build all the schema-specific parts of an LDAPGroupSyncer -type SyncBuilder interface { - GetGroupLister() (interfaces.LDAPGroupLister, error) - GetGroupNameMapper() (interfaces.LDAPGroupNameMapper, error) - GetUserNameMapper() (interfaces.LDAPUserNameMapper, error) - GetGroupMemberExtractor() (interfaces.LDAPMemberExtractor, error) -} - -// PruneBuilder describes an object that can build all the schema-specific parts of an LDAPGroupPruner -type PruneBuilder interface { - GetGroupLister() (interfaces.LDAPGroupLister, error) - GetGroupNameMapper() (interfaces.LDAPGroupNameMapper, error) - GetGroupDetector() (interfaces.LDAPGroupDetector, error) -} - -// GroupNameRestrictions desribes an object that holds blacklists and whitelists -type GroupNameRestrictions interface { - GetWhitelist() []string - GetBlacklist() []string -} - -// OpenShiftGroupNameRestrictions describes an object that holds blacklists and whitelists as well as -// a client that can retrieve OpenShift groups to satisfy those lists -type OpenShiftGroupNameRestrictions interface { - GroupNameRestrictions - GetClient() userv1client.GroupInterface -} - -// MappedNameRestrictions describes an object that holds user name mappings for a group sync job -type MappedNameRestrictions interface { - GetGroupNameMappings() map[string]string -} - -func ToLDAPQuery(in legacyconfigv1.LDAPQuery) ldapquery.SerializeableLDAPQuery { - return ldapquery.SerializeableLDAPQuery{ - BaseDN: in.BaseDN, - Scope: in.Scope, - DerefAliases: in.DerefAliases, - TimeLimit: in.TimeLimit, - Filter: in.Filter, - PageSize: in.PageSize, - } -} diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/groups/sync/prune.go b/vendor/github.com/openshift/oc/pkg/cli/admin/groups/sync/prune.go deleted file mode 100644 index 52a5344192bd..000000000000 --- a/vendor/github.com/openshift/oc/pkg/cli/admin/groups/sync/prune.go +++ /dev/null @@ -1,221 +0,0 @@ -package sync - -import ( - "errors" - "fmt" - - "github.com/spf13/cobra" - - kerrs "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/cli-runtime/pkg/genericclioptions" - kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" - "k8s.io/kubernetes/pkg/kubectl/util/templates" - - legacyconfigv1 "github.com/openshift/api/legacyconfig/v1" - userv1typedclient "github.com/openshift/client-go/user/clientset/versioned/typed/user/v1" - "github.com/openshift/library-go/pkg/security/ldapclient" - "github.com/openshift/oc/pkg/helpers/groupsync" - "github.com/openshift/oc/pkg/helpers/groupsync/ldap" -) - -const PruneRecommendedName = "prune" - -var ( - pruneLong = templates.LongDesc(` - Prune OpenShift Groups referencing missing records on from an external provider. - - In order to prune OpenShift Group records using those from an external provider, determine which Groups you wish - to prune. For instance, all or some groups may be selected from the current Groups stored in OpenShift that have - been synced previously. Any combination of a literal whitelist, a whitelist file and a blacklist file is supported. - The path to a sync configuration file that was used for syncing the groups in question is required in order to - describe how data is requested from the external record store. Default behavior is to indicate all OpenShift groups - for which the external record does not exist, to run the pruning process and commit the results, use the --confirm - flag.`) - - pruneExamples = templates.Examples(` - # Prune all orphaned groups - %[1]s --sync-config=/path/to/ldap-sync-config.yaml --confirm - - # Prune all orphaned groups except the ones from the blacklist file - %[1]s --blacklist=/path/to/blacklist.txt --sync-config=/path/to/ldap-sync-config.yaml --confirm - - # Prune all orphaned groups from a list of specific groups specified in a whitelist file - %[1]s --whitelist=/path/to/whitelist.txt --sync-config=/path/to/ldap-sync-config.yaml --confirm - - # Prune all orphaned groups from a list of specific groups specified in a whitelist - %[1]s groups/group_name groups/other_name --sync-config=/path/to/ldap-sync-config.yaml --confirm`) -) - -type PruneOptions struct { - // Config is the LDAP sync config read from file - Config *legacyconfigv1.LDAPSyncConfig - ConfigFile string - - // Whitelist are the names of OpenShift group or LDAP group UIDs to use for syncing - Whitelist []string - WhitelistFile string - - // Blacklist are the names of OpenShift group or LDAP group UIDs to exclude - Blacklist []string - BlacklistFile string - - // Confirm determines whether or not to write to OpenShift - Confirm bool - - // GroupClient is the interface used to interact with OpenShift Group objects - GroupClient userv1typedclient.GroupsGetter - - genericclioptions.IOStreams -} - -func NewPruneOptions(streams genericclioptions.IOStreams) *PruneOptions { - return &PruneOptions{ - Whitelist: []string{}, - IOStreams: streams, - } -} - -func NewCmdPrune(name, fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { - o := NewPruneOptions(streams) - cmd := &cobra.Command{ - Use: fmt.Sprintf("%s [WHITELIST] [--whitelist=WHITELIST-FILE] [--blacklist=BLACKLIST-FILE] --sync-config=CONFIG-SOURCE", name), - Short: "Remove old OpenShift groups referencing missing records on an external provider", - Long: pruneLong, - Example: fmt.Sprintf(pruneExamples, fullName), - Run: func(cmd *cobra.Command, args []string) { - kcmdutil.CheckErr(o.Complete(f, cmd, args)) - kcmdutil.CheckErr(o.Validate()) - kcmdutil.CheckErr(o.Run()) - }, - } - - cmd.Flags().StringVar(&o.WhitelistFile, "whitelist", o.WhitelistFile, "path to the group whitelist file") - cmd.MarkFlagFilename("whitelist", "txt") - cmd.Flags().StringVar(&o.BlacklistFile, "blacklist", o.BlacklistFile, "path to the group blacklist file") - cmd.MarkFlagFilename("blacklist", "txt") - // TODO(deads): enable this once we're able to support string slice elements that have commas - // cmd.Flags().StringSliceVar(&o.Blacklist, "blacklist-group", o.Blacklist, "group to blacklist") - cmd.Flags().StringVar(&o.ConfigFile, "sync-config", o.ConfigFile, "path to the sync config") - cmd.MarkFlagFilename("sync-config", "yaml", "yml") - cmd.Flags().BoolVar(&o.Confirm, "confirm", o.Confirm, "if true, modify OpenShift groups; if false, display groups") - - return cmd -} - -func (o *PruneOptions) Complete(f kcmdutil.Factory, cmd *cobra.Command, args []string) error { - var err error - o.Config, err = decodeSyncConfigFromFile(o.ConfigFile) - if err != nil { - return err - } - - o.Whitelist, err = buildOpenShiftGroupNameList(args, o.WhitelistFile, o.Config.LDAPGroupUIDToOpenShiftGroupNameMapping) - if err != nil { - return err - } - - o.Blacklist, err = buildOpenShiftGroupNameList([]string{}, o.BlacklistFile, o.Config.LDAPGroupUIDToOpenShiftGroupNameMapping) - if err != nil { - return err - } - - clientConfig, err := f.ToRESTConfig() - if err != nil { - return err - } - o.GroupClient, err = userv1typedclient.NewForConfig(clientConfig) - if err != nil { - return err - } - - return nil -} - -func (o *PruneOptions) Validate() error { - results := ldap.ValidateLDAPSyncConfig(o.Config) - if o.GroupClient == nil { - results.Errors = append(results.Errors, field.Required(field.NewPath("groupInterface"), "")) - } - // TODO(skuznets): pretty-print validation results - if len(results.Errors) > 0 { - return fmt.Errorf("validation of LDAP sync config failed: %v", results.Errors.ToAggregate()) - } - return nil -} - -// Run creates the GroupSyncer specified and runs it to sync groups -// the arguments are only here because its the only way to get the printer we need -func (o *PruneOptions) Run() error { - bindPassword, err := ldap.ResolveStringValue(o.Config.BindPassword) - if err != nil { - return err - } - clientConfig, err := ldapclient.NewLDAPClientConfig(o.Config.URL, o.Config.BindDN, bindPassword, o.Config.CA, o.Config.Insecure) - if err != nil { - return fmt.Errorf("could not determine LDAP client configuration: %v", err) - } - - pruneBuilder, err := buildPruneBuilder(clientConfig, o.Config) - if err != nil { - return err - } - - // populate schema-independent pruner fields - pruner := &syncgroups.LDAPGroupPruner{ - Host: clientConfig.Host(), - GroupClient: o.GroupClient.Groups(), - DryRun: !o.Confirm, - - Out: o.Out, - Err: o.ErrOut, - } - - listerMapper, err := getOpenShiftGroupListerMapper(clientConfig.Host(), o) - if err != nil { - return err - } - pruner.GroupLister = listerMapper - pruner.GroupNameMapper = listerMapper - - pruner.GroupDetector, err = pruneBuilder.GetGroupDetector() - if err != nil { - return err - } - - // Now we run the pruner and report any errors - pruneErrors := pruner.Prune() - return kerrs.NewAggregate(pruneErrors) - -} - -func buildPruneBuilder(clientConfig ldapclient.Config, pruneConfig *legacyconfigv1.LDAPSyncConfig) (PruneBuilder, error) { - switch { - case pruneConfig.RFC2307Config != nil: - return &RFC2307Builder{ClientConfig: clientConfig, Config: pruneConfig.RFC2307Config}, nil - case pruneConfig.ActiveDirectoryConfig != nil: - return &ADBuilder{ClientConfig: clientConfig, Config: pruneConfig.ActiveDirectoryConfig}, nil - case pruneConfig.AugmentedActiveDirectoryConfig != nil: - return &AugmentedADBuilder{ClientConfig: clientConfig, Config: pruneConfig.AugmentedActiveDirectoryConfig}, nil - default: - return nil, errors.New("invalid sync config type") - } -} - -// The following getters ensure that PruneOptions satisfies the name restriction interfaces - -func (o *PruneOptions) GetWhitelist() []string { - return o.Whitelist -} - -func (o *PruneOptions) GetBlacklist() []string { - return o.Blacklist -} - -func (o *PruneOptions) GetClient() userv1typedclient.GroupInterface { - return o.GroupClient.Groups() -} - -func (o *PruneOptions) GetGroupNameMappings() map[string]string { - return o.Config.LDAPGroupUIDToOpenShiftGroupNameMapping -} diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/groups/sync/rfc2307.go b/vendor/github.com/openshift/oc/pkg/cli/admin/groups/sync/rfc2307.go deleted file mode 100644 index d3239b22ca79..000000000000 --- a/vendor/github.com/openshift/oc/pkg/cli/admin/groups/sync/rfc2307.go +++ /dev/null @@ -1,70 +0,0 @@ -package sync - -import ( - legacyconfigv1 "github.com/openshift/api/legacyconfig/v1" - "github.com/openshift/library-go/pkg/security/ldapclient" - "github.com/openshift/library-go/pkg/security/ldapquery" - "github.com/openshift/oc/pkg/helpers/groupsync" - "github.com/openshift/oc/pkg/helpers/groupsync/interfaces" - "github.com/openshift/oc/pkg/helpers/groupsync/rfc2307" - "github.com/openshift/oc/pkg/helpers/groupsync/syncerror" -) - -var _ SyncBuilder = &RFC2307Builder{} -var _ PruneBuilder = &RFC2307Builder{} - -type RFC2307Builder struct { - ClientConfig ldapclient.Config - Config *legacyconfigv1.RFC2307Config - - rfc2307LDAPInterface *rfc2307.LDAPInterface - - ErrorHandler syncerror.Handler -} - -func (b *RFC2307Builder) GetGroupLister() (interfaces.LDAPGroupLister, error) { - return b.getRFC2307LDAPInterface() -} - -func (b *RFC2307Builder) GetGroupNameMapper() (interfaces.LDAPGroupNameMapper, error) { - ldapInterface, err := b.getRFC2307LDAPInterface() - if err != nil { - return nil, err - } - if b.Config.GroupNameAttributes != nil { - return syncgroups.NewEntryAttributeGroupNameMapper(b.Config.GroupNameAttributes, ldapInterface), nil - } - - return nil, nil -} - -func (b *RFC2307Builder) GetUserNameMapper() (interfaces.LDAPUserNameMapper, error) { - return syncgroups.NewUserNameMapper(b.Config.UserNameAttributes), nil -} - -func (b *RFC2307Builder) GetGroupMemberExtractor() (interfaces.LDAPMemberExtractor, error) { - return b.getRFC2307LDAPInterface() -} - -func (b *RFC2307Builder) getRFC2307LDAPInterface() (*rfc2307.LDAPInterface, error) { - if b.rfc2307LDAPInterface != nil { - return b.rfc2307LDAPInterface, nil - } - - groupQuery, err := ldapquery.NewLDAPQueryOnAttribute(ToLDAPQuery(b.Config.AllGroupsQuery), b.Config.GroupUIDAttribute) - if err != nil { - return nil, err - } - userQuery, err := ldapquery.NewLDAPQueryOnAttribute(ToLDAPQuery(b.Config.AllUsersQuery), b.Config.UserUIDAttribute) - if err != nil { - return nil, err - } - b.rfc2307LDAPInterface = rfc2307.NewLDAPInterface(b.ClientConfig, - groupQuery, b.Config.GroupNameAttributes, b.Config.GroupMembershipAttributes, - userQuery, b.Config.UserNameAttributes, b.ErrorHandler) - return b.rfc2307LDAPInterface, nil -} - -func (b *RFC2307Builder) GetGroupDetector() (interfaces.LDAPGroupDetector, error) { - return b.getRFC2307LDAPInterface() -} diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/groups/sync/sync.go b/vendor/github.com/openshift/oc/pkg/cli/admin/groups/sync/sync.go deleted file mode 100644 index 3d1a62ca3820..000000000000 --- a/vendor/github.com/openshift/oc/pkg/cli/admin/groups/sync/sync.go +++ /dev/null @@ -1,507 +0,0 @@ -package sync - -import ( - "bytes" - "errors" - "fmt" - "io/ioutil" - "strings" - - "github.com/openshift/library-go/pkg/config/helpers" - - "github.com/spf13/cobra" - - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - kerrs "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/cli-runtime/pkg/printers" - kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" - "k8s.io/kubernetes/pkg/kubectl/scheme" - "k8s.io/kubernetes/pkg/kubectl/util/templates" - - legacyconfigv1 "github.com/openshift/api/legacyconfig/v1" - userv1typedclient "github.com/openshift/client-go/user/clientset/versioned/typed/user/v1" - "github.com/openshift/library-go/pkg/security/ldapclient" - "github.com/openshift/oc/pkg/helpers/groupsync" - "github.com/openshift/oc/pkg/helpers/groupsync/interfaces" - "github.com/openshift/oc/pkg/helpers/groupsync/ldap" - "github.com/openshift/oc/pkg/helpers/groupsync/syncerror" -) - -const SyncRecommendedName = "sync" - -var ( - syncLong = templates.LongDesc(` - Sync OpenShift Groups with records from an external provider. - - In order to sync OpenShift Group records with those from an external provider, determine which Groups you wish - to sync and where their records live. For instance, all or some groups may be selected from the current Groups - stored in OpenShift that have been synced previously, or similarly all or some groups may be selected from those - stored on an LDAP server. The path to a sync configuration file is required in order to describe how data is - requested from the external record store and migrated to OpenShift records. Default behavior is to do a dry-run - without changing OpenShift records. Passing '--confirm' will sync all groups from the LDAP server returned by the - LDAP query templates.`) - - syncExamples = templates.Examples(` - # Sync all groups from an LDAP server - %[1]s --sync-config=/path/to/ldap-sync-config.yaml --confirm - - # Sync all groups except the ones from the blacklist file from an LDAP server - %[1]s --blacklist=/path/to/blacklist.txt --sync-config=/path/to/ldap-sync-config.yaml --confirm - - # Sync specific groups specified in a whitelist file with an LDAP server - %[1]s --whitelist=/path/to/whitelist.txt --sync-config=/path/to/sync-config.yaml --confirm - - # Sync all OpenShift Groups that have been synced previously with an LDAP server - %[1]s --type=openshift --sync-config=/path/to/ldap-sync-config.yaml --confirm - - # Sync specific OpenShift Groups if they have been synced previously with an LDAP server - %[1]s groups/group1 groups/group2 groups/group3 --sync-config=/path/to/sync-config.yaml --confirm`) -) - -// GroupSyncSource determines the source of the groups to be synced -type GroupSyncSource string - -const ( - // GroupSyncSourceLDAP determines that the groups to be synced are determined from an LDAP record - GroupSyncSourceLDAP GroupSyncSource = "ldap" - // GroupSyncSourceOpenShift determines that the groups to be synced are determined from OpenShift records - GroupSyncSourceOpenShift GroupSyncSource = "openshift" -) - -var AllowedSourceTypes = []string{string(GroupSyncSourceLDAP), string(GroupSyncSourceOpenShift)} - -type SyncOptions struct { - PrintFlags *genericclioptions.PrintFlags - Printer printers.ResourcePrinter - - // Source determines the source of the list of groups to sync - Source GroupSyncSource - - // Config is the LDAP sync config read from file - Config *legacyconfigv1.LDAPSyncConfig - ConfigFile string - - // Whitelist are the names of OpenShift group or LDAP group UIDs to use for syncing - Whitelist []string - WhitelistFile string - - // Blacklist are the names of OpenShift group or LDAP group UIDs to exclude - Blacklist []string - BlacklistFile string - - Type string - - // Confirm determines whether or not to write to OpenShift - Confirm bool - - // GroupClient is the interface used to interact with OpenShift Group objects - GroupClient userv1typedclient.GroupsGetter - - genericclioptions.IOStreams -} - -func NewSyncOptions(streams genericclioptions.IOStreams) *SyncOptions { - return &SyncOptions{ - Whitelist: []string{}, - Type: string(GroupSyncSourceLDAP), - PrintFlags: genericclioptions.NewPrintFlags("").WithTypeSetter(scheme.Scheme).WithDefaultOutput("yaml"), - IOStreams: streams, - } -} - -func NewCmdSync(name, fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { - o := NewSyncOptions(streams) - cmd := &cobra.Command{ - Use: fmt.Sprintf("%s [--type=TYPE] [WHITELIST] [--whitelist=WHITELIST-FILE] --sync-config=CONFIG-FILE [--confirm]", name), - Short: "Sync OpenShift groups with records from an external provider.", - Long: syncLong, - Example: fmt.Sprintf(syncExamples, fullName), - Run: func(c *cobra.Command, args []string) { - kcmdutil.CheckErr(o.Complete(f, args)) - kcmdutil.CheckErr(o.Validate()) - kcmdutil.CheckErr(o.Run()) - }, - } - - cmd.Flags().StringVar(&o.WhitelistFile, "whitelist", o.WhitelistFile, "path to the group whitelist file") - cmd.MarkFlagFilename("whitelist", "txt") - cmd.Flags().StringVar(&o.BlacklistFile, "blacklist", o.BlacklistFile, "path to the group blacklist file") - cmd.MarkFlagFilename("blacklist", "txt") - // TODO enable this we're able to support string slice elements that have commas - // cmd.Flags().StringSliceVar(&options.Blacklist, "blacklist-group", options.Blacklist, "group to blacklist") - cmd.Flags().StringVar(&o.ConfigFile, "sync-config", o.ConfigFile, "path to the sync config") - cmd.MarkFlagFilename("sync-config", "yaml", "yml") - cmd.Flags().StringVar(&o.Type, "type", o.Type, "which groups white- and blacklist entries refer to: "+strings.Join(AllowedSourceTypes, ",")) - cmd.Flags().BoolVar(&o.Confirm, "confirm", o.Confirm, "if true, modify OpenShift groups; if false, display results of a dry-run") - - return cmd -} - -func (o *SyncOptions) Complete(f kcmdutil.Factory, args []string) error { - switch o.Type { - case string(GroupSyncSourceLDAP): - o.Source = GroupSyncSourceLDAP - case string(GroupSyncSourceOpenShift): - o.Source = GroupSyncSourceOpenShift - default: - return fmt.Errorf("unrecognized --type %q; allowed types %v", o.Type, strings.Join(AllowedSourceTypes, ",")) - } - - var err error - o.Config, err = decodeSyncConfigFromFile(o.ConfigFile) - if err != nil { - return err - } - - if o.Source == GroupSyncSourceOpenShift { - o.Whitelist, err = buildOpenShiftGroupNameList(args, o.WhitelistFile, o.Config.LDAPGroupUIDToOpenShiftGroupNameMapping) - if err != nil { - return err - } - o.Blacklist, err = buildOpenShiftGroupNameList([]string{}, o.BlacklistFile, o.Config.LDAPGroupUIDToOpenShiftGroupNameMapping) - if err != nil { - return err - } - } else { - o.Whitelist, err = buildNameList(args, o.WhitelistFile) - if err != nil { - return err - } - o.Blacklist, err = buildNameList([]string{}, o.BlacklistFile) - if err != nil { - return err - } - } - - clientConfig, err := f.ToRESTConfig() - if err != nil { - return err - } - o.GroupClient, err = userv1typedclient.NewForConfig(clientConfig) - if err != nil { - return err - } - if !o.Confirm { - o.PrintFlags.Complete("%s (dry run)") - } - o.Printer, err = o.PrintFlags.ToPrinter() - if err != nil { - return err - } - - return nil -} - -// buildOpenShiftGroupNameList builds a list of OpenShift names from file and args -// nameMapping is used to override the OpenShift names built from file and args -func buildOpenShiftGroupNameList(args []string, file string, nameMapping map[string]string) ([]string, error) { - rawList, err := buildNameList(args, file) - if err != nil { - return nil, err - } - - namesList, err := openshiftGroupNamesOnlyList(rawList) - if err != nil { - return nil, err - } - - // override items in namesList if present in mapping - if len(nameMapping) > 0 { - for i, name := range namesList { - if nameOverride, ok := nameMapping[name]; ok { - namesList[i] = nameOverride - } - } - } - - return namesList, nil -} - -// buildNameLists builds a list from file and args -func buildNameList(args []string, file string) ([]string, error) { - var list []string - if len(args) > 0 { - list = append(list, args...) - } - - // unpack file from source - if len(file) != 0 { - listData, err := readLines(file) - if err != nil { - return nil, err - } - list = append(list, listData...) - } - - return list, nil -} - -func decodeSyncConfigFromFile(configFile string) (*legacyconfigv1.LDAPSyncConfig, error) { - yamlConfig, err := ioutil.ReadFile(configFile) - if err != nil { - return nil, fmt.Errorf("could not read file %s: %v", configFile, err) - } - uncast, err := helpers.ReadYAML(bytes.NewBuffer([]byte(yamlConfig)), legacyconfigv1.InstallLegacy) - if err != nil { - return nil, fmt.Errorf("could not parse file %s: %v", configFile, err) - } - ldapConfig := uncast.(*legacyconfigv1.LDAPSyncConfig) - - if err := helpers.ResolvePaths(ldap.GetStringSourceFileReferences(&ldapConfig.BindPassword), configFile); err != nil { - return nil, fmt.Errorf("could not relativize files %s: %v", configFile, err) - } - - return ldapConfig, nil -} - -// openshiftGroupNamesOnlyBlacklist returns back a list that contains only the names of the groups. -// Since Group.Name cannot contain '/', the split is safe. Any resource ref that is not a group -// is skipped. -func openshiftGroupNamesOnlyList(list []string) ([]string, error) { - ret := []string{} - errs := []error{} - - for _, curr := range list { - tokens := strings.SplitN(curr, "/", 2) - if len(tokens) == 1 { - ret = append(ret, tokens[0]) - continue - } - - if tokens[0] != "group" && tokens[0] != "groups" { - errs = append(errs, fmt.Errorf("%q is not a valid OpenShift group", curr)) - continue - } - - ret = append(ret, tokens[1]) - } - - if len(errs) > 0 { - return nil, kerrs.NewAggregate(errs) - } - - return ret, nil -} - -// readLines interprets a file as plaintext and returns a string array of the lines of text in the file -func readLines(path string) ([]string, error) { - bytes, err := ioutil.ReadFile(path) - if err != nil { - return nil, fmt.Errorf("could not open file %s: %v", path, err) - } - rawLines := strings.Split(string(bytes), "\n") - var trimmedLines []string - for _, line := range rawLines { - if len(strings.TrimSpace(line)) > 0 { - trimmedLines = append(trimmedLines, strings.TrimSpace(line)) - } - } - return trimmedLines, nil -} - -func ValidateSource(source GroupSyncSource) bool { - return sets.NewString(AllowedSourceTypes...).Has(string(source)) -} - -func (o *SyncOptions) Validate() error { - if !ValidateSource(o.Source) { - return fmt.Errorf("sync source must be one of the following: %v", strings.Join(AllowedSourceTypes, ",")) - } - - results := ldap.ValidateLDAPSyncConfig(o.Config) - if o.GroupClient == nil { - results.Errors = append(results.Errors, field.Required(field.NewPath("groupInterface"), "")) - } - // TODO(skuznets): pretty-print validation results - if len(results.Errors) > 0 { - return fmt.Errorf("validation of LDAP sync config failed: %v", results.Errors.ToAggregate()) - } - return nil -} - -// CreateErrorHandler creates an error handler for the LDAP sync job -func (o *SyncOptions) CreateErrorHandler() syncerror.Handler { - components := []syncerror.Handler{} - if o.Config.RFC2307Config != nil { - if o.Config.RFC2307Config.TolerateMemberOutOfScopeErrors { - components = append(components, syncerror.NewMemberLookupOutOfBoundsSuppressor(o.ErrOut)) - } - if o.Config.RFC2307Config.TolerateMemberNotFoundErrors { - components = append(components, syncerror.NewMemberLookupMemberNotFoundSuppressor(o.ErrOut)) - } - } - - return syncerror.NewCompoundHandler(components...) -} - -// Run creates the GroupSyncer specified and runs it to sync groups -// the arguments are only here because its the only way to get the printer we need -func (o *SyncOptions) Run() error { - bindPassword, err := ldap.ResolveStringValue(o.Config.BindPassword) - if err != nil { - return err - } - clientConfig, err := ldapclient.NewLDAPClientConfig(o.Config.URL, o.Config.BindDN, bindPassword, o.Config.CA, o.Config.Insecure) - if err != nil { - return fmt.Errorf("could not determine LDAP client configuration: %v", err) - } - - errorHandler := o.CreateErrorHandler() - - syncBuilder, err := buildSyncBuilder(clientConfig, o.Config, errorHandler) - if err != nil { - return err - } - - // populate schema-independent syncer fields - syncer := &syncgroups.LDAPGroupSyncer{ - Host: clientConfig.Host(), - GroupClient: o.GroupClient.Groups(), - DryRun: !o.Confirm, - - Out: o.Out, - Err: o.ErrOut, - } - - switch o.Source { - case GroupSyncSourceOpenShift: - // when your source of ldapGroupUIDs is from an openshift group, the mapping of ldapGroupUID to openshift group name is logically - // pinned by the existing mapping. - listerMapper, err := getOpenShiftGroupListerMapper(clientConfig.Host(), o) - if err != nil { - return err - } - syncer.GroupLister = listerMapper - syncer.GroupNameMapper = listerMapper - - case GroupSyncSourceLDAP: - syncer.GroupLister, err = getLDAPGroupLister(syncBuilder, o) - if err != nil { - return err - } - syncer.GroupNameMapper, err = getGroupNameMapper(syncBuilder, o) - if err != nil { - return err - } - - default: - return fmt.Errorf("invalid group source: %v", o.Source) - } - - syncer.GroupMemberExtractor, err = syncBuilder.GetGroupMemberExtractor() - if err != nil { - return err - } - - syncer.UserNameMapper, err = syncBuilder.GetUserNameMapper() - if err != nil { - return err - } - - // Now we run the Syncer and report any errors - openshiftGroups, syncErrors := syncer.Sync() - if !o.Confirm { - list := &unstructured.UnstructuredList{ - Object: map[string]interface{}{ - "kind": "List", - "apiVersion": "v1", - "metadata": map[string]interface{}{}, - }, - } - for _, item := range openshiftGroups { - unstructuredItem, err := runtime.DefaultUnstructuredConverter.ToUnstructured(item) - if err != nil { - return err - } - list.Items = append(list.Items, unstructured.Unstructured{Object: unstructuredItem}) - } - - if err := o.Printer.PrintObj(list, o.Out); err != nil { - return err - } - } - for _, err := range syncErrors { - fmt.Fprintf(o.ErrOut, "%s\n", err) - } - return kerrs.NewAggregate(syncErrors) -} - -func buildSyncBuilder(clientConfig ldapclient.Config, syncConfig *legacyconfigv1.LDAPSyncConfig, errorHandler syncerror.Handler) (SyncBuilder, error) { - switch { - case syncConfig.RFC2307Config != nil: - return &RFC2307Builder{ClientConfig: clientConfig, Config: syncConfig.RFC2307Config, ErrorHandler: errorHandler}, nil - case syncConfig.ActiveDirectoryConfig != nil: - return &ADBuilder{ClientConfig: clientConfig, Config: syncConfig.ActiveDirectoryConfig}, nil - case syncConfig.AugmentedActiveDirectoryConfig != nil: - return &AugmentedADBuilder{ClientConfig: clientConfig, Config: syncConfig.AugmentedActiveDirectoryConfig}, nil - default: - return nil, errors.New("invalid sync config type") - } -} - -func getOpenShiftGroupListerMapper(host string, info OpenShiftGroupNameRestrictions) (interfaces.LDAPGroupListerNameMapper, error) { - if len(info.GetWhitelist()) != 0 { - return syncgroups.NewOpenShiftGroupLister(info.GetWhitelist(), info.GetBlacklist(), host, info.GetClient()), nil - } else { - return syncgroups.NewAllOpenShiftGroupLister(info.GetBlacklist(), host, info.GetClient()), nil - } -} - -func getLDAPGroupLister(syncBuilder SyncBuilder, info GroupNameRestrictions) (interfaces.LDAPGroupLister, error) { - if len(info.GetWhitelist()) != 0 { - ldapWhitelist := syncgroups.NewLDAPWhitelistGroupLister(info.GetWhitelist()) - if len(info.GetBlacklist()) == 0 { - return ldapWhitelist, nil - } - return syncgroups.NewLDAPBlacklistGroupLister(info.GetBlacklist(), ldapWhitelist), nil - } - - syncLister, err := syncBuilder.GetGroupLister() - if err != nil { - return nil, err - } - if len(info.GetBlacklist()) == 0 { - return syncLister, nil - } - - return syncgroups.NewLDAPBlacklistGroupLister(info.GetBlacklist(), syncLister), nil -} - -func getGroupNameMapper(syncBuilder SyncBuilder, info MappedNameRestrictions) (interfaces.LDAPGroupNameMapper, error) { - syncNameMapper, err := syncBuilder.GetGroupNameMapper() - if err != nil { - return nil, err - } - - // if the mapping is specified, union the specified mapping with the default mapping. The specified mapping is checked first - if len(info.GetGroupNameMappings()) > 0 { - userDefinedMapper := syncgroups.NewUserDefinedGroupNameMapper(info.GetGroupNameMappings()) - if syncNameMapper == nil { - return userDefinedMapper, nil - } - return &syncgroups.UnionGroupNameMapper{GroupNameMappers: []interfaces.LDAPGroupNameMapper{userDefinedMapper, syncNameMapper}}, nil - } - return syncNameMapper, nil -} - -// The following getters ensure that SyncOptions satisfies the name restriction interfaces - -func (o *SyncOptions) GetWhitelist() []string { - return o.Whitelist -} - -func (o *SyncOptions) GetBlacklist() []string { - return o.Blacklist -} - -func (o *SyncOptions) GetClient() userv1typedclient.GroupInterface { - return o.GroupClient.Groups() -} - -func (o *SyncOptions) GetGroupNameMappings() map[string]string { - return o.Config.LDAPGroupUIDToOpenShiftGroupNameMapping -} diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/groups/users/add.go b/vendor/github.com/openshift/oc/pkg/cli/admin/groups/users/add.go deleted file mode 100644 index 8346d0f56bdc..000000000000 --- a/vendor/github.com/openshift/oc/pkg/cli/admin/groups/users/add.go +++ /dev/null @@ -1,146 +0,0 @@ -package users - -import ( - "errors" - "fmt" - - "github.com/spf13/cobra" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/cli-runtime/pkg/printers" - kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" - "k8s.io/kubernetes/pkg/kubectl/scheme" - "k8s.io/kubernetes/pkg/kubectl/util/templates" - - userv1 "github.com/openshift/api/user/v1" - userv1typedclient "github.com/openshift/client-go/user/clientset/versioned/typed/user/v1" -) - -const AddRecommendedName = "add-users" - -var ( - addLong = templates.LongDesc(` - Add users to a group. - - This command will append unique users to the list of members for a group.`) - - addExample = templates.Examples(` - # Add user1 and user2 to my-group - %[1]s my-group user1 user2`) -) - -type AddUsersOptions struct { - GroupModificationOptions *GroupModificationOptions -} - -func NewCmdAddUsers(name, fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { - o := &AddUsersOptions{ - GroupModificationOptions: NewGroupModificationOptions(streams), - } - cmd := &cobra.Command{ - Use: name + " GROUP USER [USER ...]", - Short: "Add users to a group", - Long: addLong, - Example: fmt.Sprintf(addExample, fullName), - Run: func(cmd *cobra.Command, args []string) { - kcmdutil.CheckErr(o.Complete(f, cmd, args)) - kcmdutil.CheckErr(o.Run()) - }, - } - o.GroupModificationOptions.PrintFlags.AddFlags(cmd) - kcmdutil.AddDryRunFlag(cmd) - - return cmd -} - -func (o *AddUsersOptions) Complete(f kcmdutil.Factory, cmd *cobra.Command, args []string) error { - return o.GroupModificationOptions.Complete(f, cmd, args) -} - -func (o *AddUsersOptions) Run() error { - group, err := o.GroupModificationOptions.GroupClient.Groups().Get(o.GroupModificationOptions.Group, metav1.GetOptions{}) - if err != nil { - return err - } - - existingUsers := sets.NewString(group.Users...) - for _, user := range o.GroupModificationOptions.Users { - if existingUsers.Has(user) { - continue - } - - group.Users = append(group.Users, user) - } - - if !o.GroupModificationOptions.DryRun { - group, err = o.GroupModificationOptions.GroupClient.Groups().Update(group) - if err != nil { - return err - } - } - - return o.GroupModificationOptions.PrintObj("added", group) -} - -type GroupModificationOptions struct { - PrintFlags *genericclioptions.PrintFlags - ToPrinter func(string) (printers.ResourcePrinter, error) - - GroupClient userv1typedclient.GroupsGetter - - Group string - Users []string - DryRun bool - - genericclioptions.IOStreams -} - -func NewGroupModificationOptions(streams genericclioptions.IOStreams) *GroupModificationOptions { - return &GroupModificationOptions{ - PrintFlags: genericclioptions.NewPrintFlags("").WithTypeSetter(scheme.Scheme), - IOStreams: streams, - } -} - -func (o *GroupModificationOptions) Complete(f kcmdutil.Factory, cmd *cobra.Command, args []string) error { - if len(args) < 2 { - return errors.New("you must specify at least two arguments: GROUP USER [USER ...]") - } - - o.Group = args[0] - o.Users = append(o.Users, args[1:]...) - - clientConfig, err := f.ToRESTConfig() - if err != nil { - return err - } - o.GroupClient, err = userv1typedclient.NewForConfig(clientConfig) - if err != nil { - return err - } - - o.DryRun = kcmdutil.GetDryRunFlag(cmd) - o.ToPrinter = func(operation string) (printers.ResourcePrinter, error) { - o.PrintFlags.NamePrintFlags.Operation = operation - if o.DryRun { - o.PrintFlags.Complete("%s (dry run)") - } - return o.PrintFlags.ToPrinter() - } - - return nil -} - -func (o *GroupModificationOptions) PrintObj(operation string, group *userv1.Group) error { - allTargets := fmt.Sprintf("%q", o.Users) - if len(o.Users) == 1 { - allTargets = fmt.Sprintf("%q", o.Users[0]) - } - printer, err := o.ToPrinter(fmt.Sprintf("%s: %s", operation, allTargets)) - if err != nil { - return err - } - return printer.PrintObj(group, o.Out) -} diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/groups/users/remove.go b/vendor/github.com/openshift/oc/pkg/cli/admin/groups/users/remove.go deleted file mode 100644 index 952d2da5c90e..000000000000 --- a/vendor/github.com/openshift/oc/pkg/cli/admin/groups/users/remove.go +++ /dev/null @@ -1,81 +0,0 @@ -package users - -import ( - "fmt" - - "github.com/spf13/cobra" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/cli-runtime/pkg/genericclioptions" - kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" - "k8s.io/kubernetes/pkg/kubectl/util/templates" -) - -const RemoveRecommendedName = "remove-users" - -var ( - removeLong = templates.LongDesc(` - Remove users from a group. - - This command will remove users from the list of members for a group.`) - - removeExample = templates.Examples(` - # Remove user1 and user2 from my-group - %[1]s my-group user1 user2`) -) - -type RemoveUsersOptions struct { - GroupModificationOptions *GroupModificationOptions -} - -func NewCmdRemoveUsers(name, fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { - o := &RemoveUsersOptions{ - GroupModificationOptions: NewGroupModificationOptions(streams), - } - cmd := &cobra.Command{ - Use: name + " GROUP USER [USER ...]", - Short: "Remove users from a group", - Long: removeLong, - Example: fmt.Sprintf(removeExample, fullName), - Run: func(cmd *cobra.Command, args []string) { - kcmdutil.CheckErr(o.Complete(f, cmd, args)) - kcmdutil.CheckErr(o.Run()) - }, - } - o.GroupModificationOptions.PrintFlags.AddFlags(cmd) - kcmdutil.AddDryRunFlag(cmd) - - return cmd -} - -func (o *RemoveUsersOptions) Complete(f kcmdutil.Factory, cmd *cobra.Command, args []string) error { - return o.GroupModificationOptions.Complete(f, cmd, args) -} - -func (o *RemoveUsersOptions) Run() error { - group, err := o.GroupModificationOptions.GroupClient.Groups().Get(o.GroupModificationOptions.Group, metav1.GetOptions{}) - if err != nil { - return err - } - - toDelete := sets.NewString(o.GroupModificationOptions.Users...) - newUsers := []string{} - for _, user := range group.Users { - if toDelete.Has(user) { - continue - } - - newUsers = append(newUsers, user) - } - group.Users = newUsers - - if !o.GroupModificationOptions.DryRun { - group, err = o.GroupModificationOptions.GroupClient.Groups().Update(group) - if err != nil { - return err - } - } - - return o.GroupModificationOptions.PrintObj("removed", group) -} diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/migrate/etcd/ttl.go b/vendor/github.com/openshift/oc/pkg/cli/admin/migrate/etcd/ttl.go deleted file mode 100644 index a9a114bc6835..000000000000 --- a/vendor/github.com/openshift/oc/pkg/cli/admin/migrate/etcd/ttl.go +++ /dev/null @@ -1,180 +0,0 @@ -package etcd - -import ( - "fmt" - "strings" - "time" - - "github.com/coreos/etcd/clientv3" - "github.com/coreos/etcd/pkg/transport" - "github.com/spf13/cobra" - "golang.org/x/net/context" - "k8s.io/klog" - - "k8s.io/cli-runtime/pkg/genericclioptions" - kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" - "k8s.io/kubernetes/pkg/kubectl/util/templates" -) - -var ( - internalMigrateTTLLong = templates.LongDesc(` - Attach etcd keys to v3 leases to assist in migration from etcd v2 - - This command updates keys to associate them with an etcd v3 lease. In etcd v2, keys have an - innate TTL field which has been altered in the new schema. This can be used to set a timeout - on keys migrated from the etcd v2 schema to etcd v3 is intended to be used after that upgrade - is complete on events and access tokens. Keys that are already attached to a lease will be - ignored. If another user modifies a key while this command is running you will need to re-run. - - Any resource impacted by this command will be removed from etcd after the lease-duration - expires. Be VERY CAREFUL in which values you place to --ttl-keys-prefix, and ensure you - have an up to date backup of your etcd database.`) - - internalMigrateTTLExample = templates.Examples(` - # Migrate TTLs for keys under /kubernetes.io/events to a 2 hour lease - %[1]s --etcd-address=localhost:2379 --ttl-keys-prefix=/kubernetes.io/events/ --lease-duration=2h`) -) - -type MigrateTTLReferenceOptions struct { - etcdAddress string - ttlKeysPrefix string - leaseDuration time.Duration - certFile string - keyFile string - caFile string - - genericclioptions.IOStreams -} - -func NewMigrateTTLReferenceOptions(streams genericclioptions.IOStreams) *MigrateTTLReferenceOptions { - return &MigrateTTLReferenceOptions{ - IOStreams: streams, - } -} - -// NewCmdMigrateTTLs helps move etcd v2 TTL keys to etcd v3 lease keys. -func NewCmdMigrateTTLs(name, fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { - o := NewMigrateTTLReferenceOptions(streams) - cmd := &cobra.Command{ - Use: fmt.Sprintf("%s --etcd-address=HOST --ttl-keys-prefix=PATH", name), - Short: "Attach keys to etcd v3 leases to assist in etcd v2 migrations", - Long: internalMigrateTTLLong, - Example: fmt.Sprintf(internalMigrateTTLExample, fullName), - Run: func(cmd *cobra.Command, args []string) { - kcmdutil.CheckErr(o.Run()) - }, - } - - cmd.Flags().StringVar(&o.etcdAddress, "etcd-address", o.etcdAddress, "Etcd address") - cmd.Flags().StringVar(&o.ttlKeysPrefix, "ttl-keys-prefix", o.ttlKeysPrefix, "Prefix for TTL keys") - cmd.Flags().DurationVar(&o.leaseDuration, "lease-duration", o.leaseDuration, "Lease duration (format: '2h', '120m', etc)") - cmd.Flags().StringVar(&o.certFile, "cert", o.certFile, "identify secure client using this TLS certificate file") - cmd.Flags().StringVar(&o.keyFile, "key", o.keyFile, "identify secure client using this TLS key file") - cmd.Flags().StringVar(&o.caFile, "cacert", o.caFile, "verify certificates of TLS-enabled secure servers using this CA bundle") - - return cmd -} - -func generateClientConfig(o *MigrateTTLReferenceOptions) (*clientv3.Config, error) { - if o.etcdAddress == "" { - return nil, fmt.Errorf("--etcd-address flag is required") - } - if o.ttlKeysPrefix == "" { - return nil, fmt.Errorf("--ttl-keys-prefix flag is required") - } - if o.leaseDuration < time.Second { - return nil, fmt.Errorf("--lease-duration must be at least one second") - } - - c := &clientv3.Config{ - Endpoints: []string{o.etcdAddress}, - DialTimeout: 5 * time.Second, - } - - var cfgtls *transport.TLSInfo - tlsinfo := transport.TLSInfo{} - if o.certFile != "" { - tlsinfo.CertFile = o.certFile - cfgtls = &tlsinfo - } - - if o.keyFile != "" { - tlsinfo.KeyFile = o.keyFile - cfgtls = &tlsinfo - } - - if o.caFile != "" { - tlsinfo.CAFile = o.caFile - cfgtls = &tlsinfo - } - - if cfgtls != nil { - klog.V(4).Infof("TLS configuration: %#v", cfgtls) - clientTLS, err := cfgtls.ClientConfig() - if err != nil { - return nil, err - } - c.TLS = clientTLS - } - return c, nil -} - -func (o *MigrateTTLReferenceOptions) Run() error { - c, err := generateClientConfig(o) - if err != nil { - return err - } - klog.V(4).Infof("Using client config: %#v", c) - - client, err := clientv3.New(*c) - if err != nil { - return fmt.Errorf("unable to create etcd client: %v", err) - } - - // Make sure that ttlKeysPrefix is ended with "/" so that we only get children "directories". - if !strings.HasSuffix(o.ttlKeysPrefix, "/") { - o.ttlKeysPrefix += "/" - } - ctx := context.Background() - - objectsResp, err := client.KV.Get(ctx, o.ttlKeysPrefix, clientv3.WithPrefix()) - if err != nil { - return fmt.Errorf("unable to get objects to attach to the lease: %v", err) - } - - lease, err := client.Lease.Grant(ctx, int64(o.leaseDuration/time.Second)) - if err != nil { - return fmt.Errorf("unable to create lease: %v", err) - } - fmt.Fprintf(o.Out, "info: Lease #%d with TTL %d created\n", lease.ID, lease.TTL) - - fmt.Fprintf(o.Out, "info: Attaching lease to %d entries\n", len(objectsResp.Kvs)) - errors := 0 - alreadyAttached := 0 - for _, kv := range objectsResp.Kvs { - if kv.Lease != 0 { - alreadyAttached++ - } - txnResp, err := client.KV.Txn(ctx).If( - clientv3.Compare(clientv3.ModRevision(string(kv.Key)), "=", kv.ModRevision), - ).Then( - clientv3.OpPut(string(kv.Key), string(kv.Value), clientv3.WithLease(lease.ID)), - ).Commit() - if err != nil { - fmt.Fprintf(o.ErrOut, "error: Unable to attach lease to %s: %v\n", string(kv.Key), err) - errors++ - continue - } - if !txnResp.Succeeded { - fmt.Fprintf(o.ErrOut, "error: Unable to attach lease to %s: another client is writing to etcd. You must re-run this script.\n", string(kv.Key)) - errors++ - } - } - if alreadyAttached > 0 { - fmt.Fprintf(o.Out, "info: Lease already attached to %d entries, no change made\n", alreadyAttached) - } - if errors != 0 { - return fmt.Errorf("unable to complete migration, encountered %d errors", errors) - } - return nil -} diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/migrate/images/imagerefs.go b/vendor/github.com/openshift/oc/pkg/cli/admin/migrate/images/imagerefs.go deleted file mode 100644 index 99eac41fb228..000000000000 --- a/vendor/github.com/openshift/oc/pkg/cli/admin/migrate/images/imagerefs.go +++ /dev/null @@ -1,472 +0,0 @@ -package images - -import ( - "encoding/json" - "fmt" - "net/url" - "strings" - - "github.com/spf13/cobra" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/cli-runtime/pkg/resource" - "k8s.io/kubernetes/pkg/credentialprovider" - kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" - "k8s.io/kubernetes/pkg/kubectl/polymorphichelpers" - "k8s.io/kubernetes/pkg/kubectl/util/templates" - - buildv1 "github.com/openshift/api/build/v1" - imagev1 "github.com/openshift/api/image/v1" - imagev1typedclient "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1" - "github.com/openshift/library-go/pkg/image/reference" - imageref "github.com/openshift/library-go/pkg/image/reference" - "github.com/openshift/oc/pkg/cli/admin/migrate" -) - -var ( - internalMigrateImagesLong = templates.LongDesc(` - Migrate references to Docker images - - This command updates embedded Docker image references on the server in place. By default it - will update image streams and images, and may be used to update resources with a pod template - (deployments, replication controllers, daemon sets). - - References are changed by providing a mapping between a source registry and name and the - desired registry and name. Either name or registry can be set to '*' to change all values. - The registry value "docker.io" is special and will handle any image reference that refers to - the DockerHub. You may pass multiple mappings - the first matching mapping will be applied - per resource. - - The following resource types may be migrated by this command: - - * buildconfigs - * daemonsets - * deploymentconfigs - * images - * imagestreams - * jobs - * pods - * replicationcontrollers - * secrets (docker) - - Only images, imagestreams, and secrets are updated by default. Updating images and image - streams requires administrative privileges.`) - - internalMigrateImagesExample = templates.Examples(` - # Perform a dry-run of migrating all "docker.io" references to "myregistry.com" - %[1]s docker.io/*=myregistry.com/* - - # To actually perform the migration, the confirm flag must be appended - %[1]s docker.io/*=myregistry.com/* --confirm - - # To see more details of what will be migrated, use the loglevel and output flags - %[1]s docker.io/*=myregistry.com/* --loglevel=2 -o yaml - - # Migrate from a service IP to an internal service DNS name - %[1]s 172.30.1.54/*=registry.openshift.svc.cluster.local/* - - # Migrate from a service IP to an internal service DNS name for all deployment configs and builds - %[1]s 172.30.1.54/*=registry.openshift.svc.cluster.local/* --include=buildconfigs,deploymentconfigs`) -) - -type MigrateImageReferenceOptions struct { - migrate.ResourceOptions - - Client imagev1typedclient.ImageStreamsGetter - Mappings ImageReferenceMappings - UpdatePodSpecFn polymorphichelpers.UpdatePodSpecForObjectFunc -} - -func NewMigrateImageReferenceOptions(streams genericclioptions.IOStreams) *MigrateImageReferenceOptions { - return &MigrateImageReferenceOptions{ - ResourceOptions: *migrate.NewResourceOptions(streams).WithIncludes([]string{"imagestream", "image", "secrets"}), - } -} - -// NewCmdMigrateImageReferences implements a MigrateImages command -func NewCmdMigrateImageReferences(name, fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { - o := NewMigrateImageReferenceOptions(streams) - cmd := &cobra.Command{ - Use: fmt.Sprintf("%s REGISTRY/NAME=REGISTRY/NAME [...]", name), - Short: "Update embedded Docker image references", - Long: internalMigrateImagesLong, - Example: fmt.Sprintf(internalMigrateImagesExample, fullName), - Run: func(cmd *cobra.Command, args []string) { - kcmdutil.CheckErr(o.Complete(f, cmd, args)) - kcmdutil.CheckErr(o.Validate()) - kcmdutil.CheckErr(o.Run()) - }, - } - - o.ResourceOptions.Bind(cmd) - - return cmd -} - -func (o *MigrateImageReferenceOptions) Complete(f kcmdutil.Factory, c *cobra.Command, args []string) error { - var remainingArgs []string - for _, s := range args { - if !strings.Contains(s, "=") { - remainingArgs = append(remainingArgs, s) - continue - } - mapping, err := ParseMapping(s) - if err != nil { - return err - } - o.Mappings = append(o.Mappings, mapping) - } - - o.UpdatePodSpecFn = polymorphichelpers.UpdatePodSpecForObjectFn - - if len(remainingArgs) > 0 { - return fmt.Errorf("all arguments must be valid FROM=TO mappings") - } - - o.ResourceOptions.SaveFn = o.save - if err := o.ResourceOptions.Complete(f, c); err != nil { - return err - } - - clientConfig, err := f.ToRESTConfig() - if err != nil { - return err - } - o.Client, err = imagev1typedclient.NewForConfig(clientConfig) - if err != nil { - return err - } - - return nil -} - -func (o MigrateImageReferenceOptions) Validate() error { - if len(o.Mappings) == 0 { - return fmt.Errorf("at least one mapping argument must be specified: REGISTRY/NAME=REGISTRY/NAME") - } - return o.ResourceOptions.Validate() -} - -func (o MigrateImageReferenceOptions) Run() error { - return o.ResourceOptions.Visitor().Visit(func(info *resource.Info) (migrate.Reporter, error) { - return o.transform(info.Object) - }) -} - -// save invokes the API to alter an object. The reporter passed to this method is the same returned by -// the migration visitor method (for this type, transformImageReferences). It should return an error -// if the input type cannot be saved. It returns migrate.ErrRecalculate if migration should be re-run -// on the provided object. -func (o *MigrateImageReferenceOptions) save(info *resource.Info, reporter migrate.Reporter) error { - switch t := info.Object.(type) { - case *imagev1.ImageStream: - // update status first so that a subsequent spec update won't pull incorrect values - if reporter.(imageChangeInfo).status { - updated, err := o.Client.ImageStreams(t.Namespace).UpdateStatus(t) - if err != nil { - return migrate.DefaultRetriable(info, err) - } - info.Refresh(updated, true) - return migrate.ErrRecalculate - } - if reporter.(imageChangeInfo).spec { - updated, err := o.Client.ImageStreams(t.Namespace).Update(t) - if err != nil { - return migrate.DefaultRetriable(info, err) - } - info.Refresh(updated, true) - } - return nil - default: - if _, err := resource.NewHelper(info.Client, info.Mapping).Replace(info.Namespace, info.Name, false, info.Object); err != nil { - return migrate.DefaultRetriable(info, err) - } - } - return nil -} - -// transform checks image references on the provided object and returns either a reporter (indicating -// that the object was recognized and whether it was updated) or an error. -func (o *MigrateImageReferenceOptions) transform(obj runtime.Object) (migrate.Reporter, error) { - fn := o.Mappings.MapReference - switch t := obj.(type) { - case *imagev1.Image: - var changed bool - if updated := fn(t.DockerImageReference); updated != t.DockerImageReference { - changed = true - t.DockerImageReference = updated - } - return migrate.ReporterBool(changed), nil - case *imagev1.ImageStream: - var info imageChangeInfo - if len(t.Spec.DockerImageRepository) > 0 { - info.spec = updateString(&t.Spec.DockerImageRepository, fn) - } - for _, ref := range t.Spec.Tags { - if ref.From == nil || ref.From.Kind != "DockerImage" { - continue - } - info.spec = updateString(&ref.From.Name, fn) || info.spec - } - for _, events := range t.Status.Tags { - for i := range events.Items { - info.status = updateString(&events.Items[i].DockerImageReference, fn) || info.status - } - } - return info, nil - case *corev1.Secret: - switch t.Type { - case corev1.SecretTypeDockercfg: - var v credentialprovider.DockerConfig - if err := json.Unmarshal(t.Data[corev1.DockerConfigKey], &v); err != nil { - return nil, err - } - if !updateDockerConfig(v, o.Mappings.MapDockerAuthKey) { - return migrate.ReporterBool(false), nil - } - data, err := json.Marshal(v) - if err != nil { - return nil, err - } - t.Data[corev1.DockerConfigKey] = data - return migrate.ReporterBool(true), nil - case corev1.SecretTypeDockerConfigJson: - var v credentialprovider.DockerConfigJson - if err := json.Unmarshal(t.Data[corev1.DockerConfigJsonKey], &v); err != nil { - return nil, err - } - if !updateDockerConfig(v.Auths, o.Mappings.MapDockerAuthKey) { - return migrate.ReporterBool(false), nil - } - data, err := json.Marshal(v) - if err != nil { - return nil, err - } - t.Data[corev1.DockerConfigJsonKey] = data - return migrate.ReporterBool(true), nil - default: - return migrate.ReporterBool(false), nil - } - case *buildv1.BuildConfig: - var changed bool - if to := t.Spec.Output.To; to != nil && to.Kind == "DockerImage" { - changed = updateString(&to.Name, fn) || changed - } - for i, image := range t.Spec.Source.Images { - if image.From.Kind == "DockerImage" { - changed = updateString(&t.Spec.Source.Images[i].From.Name, fn) || changed - } - } - if c := t.Spec.Strategy.CustomStrategy; c != nil && c.From.Kind == "DockerImage" { - changed = updateString(&c.From.Name, fn) || changed - } - if c := t.Spec.Strategy.DockerStrategy; c != nil && c.From != nil && c.From.Kind == "DockerImage" { - changed = updateString(&c.From.Name, fn) || changed - } - if c := t.Spec.Strategy.SourceStrategy; c != nil && c.From.Kind == "DockerImage" { - changed = updateString(&c.From.Name, fn) || changed - } - return migrate.ReporterBool(changed), nil - default: - if o.UpdatePodSpecFn != nil { - var changed bool - supports, err := o.UpdatePodSpecFn(obj, func(spec *corev1.PodSpec) error { - changed = updatePodSpec(spec, fn) - return nil - }) - if !supports { - return nil, nil - } - if err != nil { - return nil, err - } - return migrate.ReporterBool(changed), nil - } - } - // TODO: implement use of the generic PodTemplate accessor from the factory to handle - // any object with a pod template - return nil, nil -} - -// imageChangeInfo indicates whether the spec or status of an image stream was changed. -type imageChangeInfo struct { - spec, status bool -} - -func (i imageChangeInfo) Changed() bool { - return i.spec || i.status -} - -type TransformImageFunc func(in string) string - -func updateString(value *string, fn TransformImageFunc) bool { - result := fn(*value) - if result != *value { - *value = result - return true - } - return false -} - -func updatePodSpec(spec *corev1.PodSpec, fn TransformImageFunc) bool { - var changed bool - for i := range spec.Containers { - changed = updateString(&spec.Containers[i].Image, fn) || changed - } - return changed -} - -func updateDockerConfig(cfg credentialprovider.DockerConfig, fn TransformImageFunc) bool { - var changed bool - for k, v := range cfg { - original := k - if updateString(&k, fn) { - changed = true - delete(cfg, original) - cfg[k] = v - } - } - return changed -} - -// ImageReferenceMapping represents a transformation of an image reference. -type ImageReferenceMapping struct { - FromRegistry string - FromName string - ToRegistry string - ToName string -} - -// ParseMapping converts a string in the form "(REGISTRY|*)/(NAME|*)" to an ImageReferenceMapping -// or returns a user-facing error. REGISTRY is the image registry value (hostname) or "docker.io". -// NAME is the full repository name (the path relative to the registry root). -// TODO: handle v2 repository names, which can have multiple segments (must fix -// ParseDockerImageReference) -func ParseMapping(s string) (ImageReferenceMapping, error) { - parts := strings.SplitN(s, "=", 2) - from := strings.SplitN(parts[0], "/", 2) - to := strings.SplitN(parts[1], "/", 2) - if len(from) < 2 || len(to) < 2 { - return ImageReferenceMapping{}, fmt.Errorf("all arguments must be of the form REGISTRY/NAME=REGISTRY/NAME, where registry or name may be '*' or a value") - } - if len(from[0]) == 0 { - return ImageReferenceMapping{}, fmt.Errorf("%q is not a valid source: registry must be specified (may be '*')", parts[0]) - } - if len(from[1]) == 0 { - return ImageReferenceMapping{}, fmt.Errorf("%q is not a valid source: name must be specified (may be '*')", parts[0]) - } - if len(to[0]) == 0 { - return ImageReferenceMapping{}, fmt.Errorf("%q is not a valid target: registry must be specified (may be '*')", parts[1]) - } - if len(to[1]) == 0 { - return ImageReferenceMapping{}, fmt.Errorf("%q is not a valid target: name must be specified (may be '*')", parts[1]) - } - if from[0] == "*" { - from[0] = "" - } - if from[1] == "*" { - from[1] = "" - } - if to[0] == "*" { - to[0] = "" - } - if to[1] == "*" { - to[1] = "" - } - if to[0] == "" && to[1] == "" { - return ImageReferenceMapping{}, fmt.Errorf("%q is not a valid target: at least one change must be specified", parts[1]) - } - if from[0] == to[0] && from[1] == to[1] { - return ImageReferenceMapping{}, fmt.Errorf("%q is not valid: must target at least one field to change", s) - } - return ImageReferenceMapping{ - FromRegistry: from[0], - FromName: from[1], - ToRegistry: to[0], - ToName: to[1], - }, nil -} - -// ImageReferenceMappings provide a convenience method for transforming an input reference -type ImageReferenceMappings []ImageReferenceMapping - -// MapReference transforms the provided Docker image reference if any mapping matches the -// input. If the reference cannot be parsed, it will not be modified. -func (m ImageReferenceMappings) MapReference(in string) string { - ref, err := reference.Parse(in) - if err != nil { - return in - } - registry := ref.DockerClientDefaults().Registry - name := ref.RepositoryName() - for _, mapping := range m { - if len(mapping.FromRegistry) > 0 && mapping.FromRegistry != registry { - continue - } - if len(mapping.FromName) > 0 && mapping.FromName != name { - continue - } - if len(mapping.ToRegistry) > 0 { - ref.Registry = mapping.ToRegistry - } - if len(mapping.ToName) > 0 { - ref.Namespace = "" - ref.Name = mapping.ToName - } - return ref.Exact() - } - return in -} - -// MapDockerAuthKey transforms the provided Docker Config host key if any mapping matches -// the input. If the reference cannot be parsed, it will not be modified. -func (m ImageReferenceMappings) MapDockerAuthKey(in string) string { - value := in - if len(value) == 0 { - value = imageref.DockerDefaultV1Registry - } - if !strings.HasPrefix(value, "https://") && !strings.HasPrefix(value, "http://") { - value = "https://" + value - } - parsed, err := url.Parse(value) - if err != nil { - return in - } - // The docker client allows exact matches: - // foo.bar.com/namespace - // Or hostname matches: - // foo.bar.com - // It also considers /v2/ and /v1/ equivalent to the hostname - // See ResolveAuthConfig in docker/registry/auth.go. - registry := parsed.Host - name := parsed.Path - switch { - case name == "/": - name = "" - case strings.HasPrefix(name, "/v2/") || strings.HasPrefix(name, "/v1/"): - name = name[4:] - case strings.HasPrefix(name, "/"): - name = name[1:] - } - for _, mapping := range m { - if len(mapping.FromRegistry) > 0 && mapping.FromRegistry != registry { - continue - } - if len(mapping.FromName) > 0 && mapping.FromName != name { - continue - } - if len(mapping.ToRegistry) > 0 { - registry = mapping.ToRegistry - } - if len(mapping.ToName) > 0 { - name = mapping.ToName - } - if len(name) > 0 { - return registry + "/" + name - } - return registry - } - return in -} diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/migrate/images/imagerefs_test.go b/vendor/github.com/openshift/oc/pkg/cli/admin/migrate/images/imagerefs_test.go deleted file mode 100644 index 81f4e6dfa468..000000000000 --- a/vendor/github.com/openshift/oc/pkg/cli/admin/migrate/images/imagerefs_test.go +++ /dev/null @@ -1,601 +0,0 @@ -package images - -import ( - "testing" - - kappsv1 "k8s.io/api/apps/v1" - batchv1 "k8s.io/api/batch/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/diff" - kapihelper "k8s.io/kubernetes/pkg/apis/core/helper" - "k8s.io/kubernetes/pkg/kubectl/polymorphichelpers" - - appsv1 "github.com/openshift/api/apps/v1" - buildv1 "github.com/openshift/api/build/v1" - imagev1 "github.com/openshift/api/image/v1" - "github.com/openshift/oc/pkg/helpers/originpolymorphichelpers" -) - -func TestImageReferenceMappingsMapReference(t *testing.T) { - testCases := []struct { - mappings ImageReferenceMappings - results map[string]string - }{ - { - mappings: ImageReferenceMappings{{FromRegistry: "docker.io", ToRegistry: "index.docker.io"}}, - results: map[string]string{ - "mysql": "index.docker.io/mysql", - "mysql:latest": "index.docker.io/mysql:latest", - "default/mysql:latest": "index.docker.io/default/mysql:latest", - - "mysql@sha256:b2f400f4a5e003b0543decf61a0a010939f3fba07bafa226f11ed7b5f1e81237": "index.docker.io/mysql@sha256:b2f400f4a5e003b0543decf61a0a010939f3fba07bafa226f11ed7b5f1e81237", - - "docker.io/mysql": "index.docker.io/mysql", - "docker.io/mysql:latest": "index.docker.io/mysql:latest", - "docker.io/default/mysql:latest": "index.docker.io/default/mysql:latest", - - "docker.io/mysql@sha256:b2f400f4a5e003b0543decf61a0a010939f3fba07bafa226f11ed7b5f1e81237": "index.docker.io/mysql@sha256:b2f400f4a5e003b0543decf61a0a010939f3fba07bafa226f11ed7b5f1e81237", - }, - }, - { - mappings: ImageReferenceMappings{{FromName: "test/other", ToRegistry: "another.registry"}}, - results: map[string]string{ - "test/other": "another.registry/test/other", - "test/other:latest": "another.registry/test/other:latest", - "myregistry.com/test/other:latest": "another.registry/test/other:latest", - - "myregistry.com/b/test/other:latest": "myregistry.com/b/test/other:latest", - }, - }, - { - mappings: ImageReferenceMappings{{FromName: "test/other", ToName: "other/test"}}, - results: map[string]string{ - "test/other": "other/test", - "test/other:latest": "other/test:latest", - "myregistry.com/test/other:latest": "myregistry.com/other/test:latest", - - "test/other/b:latest": "test/other/b:latest", - }, - }, - } - - for i, test := range testCases { - for in, out := range test.results { - result := test.mappings.MapReference(in) - if result != out { - t.Errorf("%d: expect %s -> %s, got %q", i, in, out, result) - continue - } - } - } -} - -func TestImageReferenceMappingsMapDockerAuthKey(t *testing.T) { - testCases := []struct { - mappings ImageReferenceMappings - results map[string]string - }{ - { - mappings: ImageReferenceMappings{{FromRegistry: "docker.io", ToRegistry: "index.docker.io"}}, - results: map[string]string{ - "docker.io": "index.docker.io", - "index.docker.io": "index.docker.io", - "https://index.docker.io/v1/": "https://index.docker.io/v1/", - "https://docker.io/v1/": "index.docker.io", - - "other.docker.io": "other.docker.io", - "other.docker.io/names": "other.docker.io/names", - "other.docker.io:5000/names": "other.docker.io:5000/names", - "https://other.docker.io/v1/": "https://other.docker.io/v1/", - }, - }, - { - mappings: ImageReferenceMappings{{FromRegistry: "index.docker.io", ToRegistry: "another.registry"}}, - results: map[string]string{ - "index.docker.io": "another.registry", - "index.docker.io/other": "another.registry/other", - "https://index.docker.io/v1/other": "another.registry/other", - "https://index.docker.io/v1/": "another.registry", - "https://index.docker.io/": "another.registry", - "https://index.docker.io": "another.registry", - - "docker.io": "docker.io", - "https://docker.io/v1/": "https://docker.io/v1/", - "other.docker.io": "other.docker.io", - "other.docker.io/names": "other.docker.io/names", - "other.docker.io:5000/names": "other.docker.io:5000/names", - "https://other.docker.io/v1/": "https://other.docker.io/v1/", - }, - }, - { - mappings: ImageReferenceMappings{{FromRegistry: "index.docker.io", ToRegistry: "another.registry", ToName: "extra"}}, - results: map[string]string{ - "index.docker.io": "another.registry/extra", - "index.docker.io/other": "another.registry/extra", - "https://index.docker.io/v1/other": "another.registry/extra", - "https://index.docker.io/v1/": "another.registry/extra", - "https://index.docker.io/": "another.registry/extra", - - "docker.io": "docker.io", - "https://docker.io/v1/": "https://docker.io/v1/", - "other.docker.io": "other.docker.io", - "other.docker.io/names": "other.docker.io/names", - "other.docker.io:5000/names": "other.docker.io:5000/names", - "https://other.docker.io/v1/": "https://other.docker.io/v1/", - }, - }, - } - - for i, test := range testCases { - for in, out := range test.results { - result := test.mappings.MapDockerAuthKey(in) - if result != out { - t.Errorf("%d: expect %s -> %s, got %q", i, in, out, result) - continue - } - } - } -} - -func TestTransform(t *testing.T) { - type variant struct { - changed bool - nilReporter bool - err bool - obj, expected runtime.Object - } - testCases := []struct { - mappings ImageReferenceMappings - variants []variant - }{ - { - mappings: ImageReferenceMappings{{FromRegistry: "docker.io", ToRegistry: "index.docker.io"}}, - variants: []variant{ - { - obj: &corev1.Pod{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - {Image: "docker.io/foo/bar"}, - {Image: "foo/bar"}, - }, - }, - }, - changed: true, - expected: &corev1.Pod{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - {Image: "index.docker.io/foo/bar"}, - {Image: "index.docker.io/foo/bar"}, - }, - }, - }, - }, - { - obj: &corev1.ReplicationController{ - Spec: corev1.ReplicationControllerSpec{ - Template: &corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - {Image: "docker.io/foo/bar"}, - {Image: "foo/bar"}, - }, - }, - }, - }, - }, - changed: true, - expected: &corev1.ReplicationController{ - Spec: corev1.ReplicationControllerSpec{ - Template: &corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - {Image: "index.docker.io/foo/bar"}, - {Image: "index.docker.io/foo/bar"}, - }, - }, - }, - }, - }, - }, - { - obj: &kappsv1.Deployment{ - Spec: kappsv1.DeploymentSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - {Image: "docker.io/foo/bar"}, - {Image: "foo/bar"}, - }, - }, - }, - }, - }, - changed: true, - expected: &kappsv1.Deployment{ - Spec: kappsv1.DeploymentSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - {Image: "index.docker.io/foo/bar"}, - {Image: "index.docker.io/foo/bar"}, - }, - }, - }, - }, - }, - }, - { - obj: &appsv1.DeploymentConfig{ - Spec: appsv1.DeploymentConfigSpec{ - Template: &corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - {Image: "docker.io/foo/bar"}, - {Image: "foo/bar"}, - }, - }, - }, - }, - }, - changed: true, - expected: &appsv1.DeploymentConfig{ - Spec: appsv1.DeploymentConfigSpec{ - Template: &corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - {Image: "index.docker.io/foo/bar"}, - {Image: "index.docker.io/foo/bar"}, - }, - }, - }, - }, - }, - }, - { - obj: &kappsv1.DaemonSet{ - Spec: kappsv1.DaemonSetSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - {Image: "docker.io/foo/bar"}, - {Image: "foo/bar"}, - }, - }, - }, - }, - }, - changed: true, - expected: &kappsv1.DaemonSet{ - Spec: kappsv1.DaemonSetSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - {Image: "index.docker.io/foo/bar"}, - {Image: "index.docker.io/foo/bar"}, - }, - }, - }, - }, - }, - }, - { - obj: &kappsv1.ReplicaSet{ - Spec: kappsv1.ReplicaSetSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - {Image: "docker.io/foo/bar"}, - {Image: "foo/bar"}, - }, - }, - }, - }, - }, - changed: true, - expected: &kappsv1.ReplicaSet{ - Spec: kappsv1.ReplicaSetSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - {Image: "index.docker.io/foo/bar"}, - {Image: "index.docker.io/foo/bar"}, - }, - }, - }, - }, - }, - }, - { - obj: &batchv1.Job{ - Spec: batchv1.JobSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - {Image: "docker.io/foo/bar"}, - {Image: "foo/bar"}, - }, - }, - }, - }, - }, - changed: true, - expected: &batchv1.Job{ - Spec: batchv1.JobSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - {Image: "index.docker.io/foo/bar"}, - {Image: "index.docker.io/foo/bar"}, - }, - }, - }, - }, - }, - }, - { - obj: &corev1.Node{}, - nilReporter: true, - }, - { - obj: &buildv1.BuildConfig{ - Spec: buildv1.BuildConfigSpec{ - CommonSpec: buildv1.CommonSpec{ - Output: buildv1.BuildOutput{To: &corev1.ObjectReference{Kind: "DockerImage", Name: "docker.io/foo/bar"}}, - Source: buildv1.BuildSource{ - Images: []buildv1.ImageSource{ - {From: corev1.ObjectReference{Kind: "DockerImage", Name: "docker.io/foo/bar"}}, - {From: corev1.ObjectReference{Kind: "DockerImage", Name: "foo/bar"}}, - }, - }, - Strategy: buildv1.BuildStrategy{ - DockerStrategy: &buildv1.DockerBuildStrategy{From: &corev1.ObjectReference{Kind: "DockerImage", Name: "docker.io/foo/bar"}}, - SourceStrategy: &buildv1.SourceBuildStrategy{From: corev1.ObjectReference{Kind: "DockerImage", Name: "docker.io/foo/bar"}}, - CustomStrategy: &buildv1.CustomBuildStrategy{From: corev1.ObjectReference{Kind: "DockerImage", Name: "docker.io/foo/bar"}}, - }, - }, - }, - }, - changed: true, - expected: &buildv1.BuildConfig{ - Spec: buildv1.BuildConfigSpec{ - CommonSpec: buildv1.CommonSpec{ - Output: buildv1.BuildOutput{To: &corev1.ObjectReference{Kind: "DockerImage", Name: "index.docker.io/foo/bar"}}, - Source: buildv1.BuildSource{ - Images: []buildv1.ImageSource{ - {From: corev1.ObjectReference{Kind: "DockerImage", Name: "index.docker.io/foo/bar"}}, - {From: corev1.ObjectReference{Kind: "DockerImage", Name: "index.docker.io/foo/bar"}}, - }, - }, - Strategy: buildv1.BuildStrategy{ - DockerStrategy: &buildv1.DockerBuildStrategy{From: &corev1.ObjectReference{Kind: "DockerImage", Name: "index.docker.io/foo/bar"}}, - SourceStrategy: &buildv1.SourceBuildStrategy{From: corev1.ObjectReference{Kind: "DockerImage", Name: "index.docker.io/foo/bar"}}, - CustomStrategy: &buildv1.CustomBuildStrategy{From: corev1.ObjectReference{Kind: "DockerImage", Name: "index.docker.io/foo/bar"}}, - }, - }, - }, - }, - }, - { - obj: &corev1.Secret{ - Type: corev1.SecretTypeDockercfg, - Data: map[string][]byte{ - corev1.DockerConfigKey: []byte(`{"docker.io":{"auth":"Og=="},"other.server":{"auth":"Og=="}}`), - "another": []byte(`{"auths":{"docker.io":{},"other.server":{}}}`), - }, - }, - changed: true, - expected: &corev1.Secret{ - Type: corev1.SecretTypeDockercfg, - Data: map[string][]byte{ - corev1.DockerConfigKey: []byte(`{"index.docker.io":{"auth":"Og=="},"other.server":{"auth":"Og=="}}`), - "another": []byte(`{"auths":{"docker.io":{},"other.server":{}}}`), - }, - }, - }, - { - obj: &corev1.Secret{ - Type: corev1.SecretTypeDockercfg, - Data: map[string][]byte{ - corev1.DockerConfigKey: []byte(`{"myserver.com":{"auth":"Og=="},"other.server":{"auth":"Og=="}}`), - "another": []byte(`{"auths":{"docker.io":{},"other.server":{}}}`), - }, - }, - expected: &corev1.Secret{ - Type: corev1.SecretTypeDockercfg, - Data: map[string][]byte{ - corev1.DockerConfigKey: []byte(`{"myserver.com":{"auth":"Og=="},"other.server":{"auth":"Og=="}}`), - "another": []byte(`{"auths":{"docker.io":{},"other.server":{}}}`), - }, - }, - }, - { - obj: &corev1.Secret{ - Type: corev1.SecretTypeDockerConfigJson, - Data: map[string][]byte{ - corev1.DockerConfigJsonKey: []byte(`{"auths":{"docker.io":{"auth":"Og=="},"other.server":{"auth":"Og=="}}}`), - "another": []byte(`{"auths":{"docker.io":{},"other.server":{}}}`), - }, - }, - changed: true, - expected: &corev1.Secret{ - Type: corev1.SecretTypeDockerConfigJson, - Data: map[string][]byte{ - corev1.DockerConfigJsonKey: []byte(`{"auths":{"index.docker.io":{"auth":"Og=="},"other.server":{"auth":"Og=="}}}`), - "another": []byte(`{"auths":{"docker.io":{},"other.server":{}}}`), - }, - }, - }, - { - obj: &corev1.Secret{ - Type: corev1.SecretTypeDockerConfigJson, - Data: map[string][]byte{ - corev1.DockerConfigJsonKey: []byte(`{"auths":{"myserver.com":{},"other.server":{}}}`), - "another": []byte(`{"auths":{"docker.io":{},"other.server":{}}}`), - }, - }, - expected: &corev1.Secret{ - Type: corev1.SecretTypeDockerConfigJson, - Data: map[string][]byte{ - corev1.DockerConfigJsonKey: []byte(`{"auths":{"myserver.com":{},"other.server":{}}}`), - "another": []byte(`{"auths":{"docker.io":{},"other.server":{}}}`), - }, - }, - }, - { - obj: &corev1.Secret{ - Type: corev1.SecretTypeDockercfg, - Data: map[string][]byte{ - corev1.DockerConfigKey: []byte(`{"auths":{`), - "another": []byte(`{"auths":{"docker.io":{},"other.server":{}}}`), - }, - }, - err: true, - expected: &corev1.Secret{ - Type: corev1.SecretTypeDockercfg, - Data: map[string][]byte{ - corev1.DockerConfigKey: []byte(`{"auths":{`), - "another": []byte(`{"auths":{"docker.io":{},"other.server":{}}}`), - }, - }, - }, - { - obj: &corev1.Secret{ - Type: corev1.SecretTypeDockerConfigJson, - Data: map[string][]byte{ - corev1.DockerConfigJsonKey: []byte(`{"auths":{`), - "another": []byte(`{"auths":{"docker.io":{},"other.server":{}}}`), - }, - }, - err: true, - expected: &corev1.Secret{ - Type: corev1.SecretTypeDockerConfigJson, - Data: map[string][]byte{ - corev1.DockerConfigJsonKey: []byte(`{"auths":{`), - "another": []byte(`{"auths":{"docker.io":{},"other.server":{}}}`), - }, - }, - }, - { - obj: &corev1.Secret{ - Type: corev1.SecretTypeOpaque, - Data: map[string][]byte{ - corev1.DockerConfigJsonKey: []byte(`{"auths":{"docker.io":{},"other.server":{}}}`), - }, - }, - expected: &corev1.Secret{ - Type: corev1.SecretTypeOpaque, - Data: map[string][]byte{ - corev1.DockerConfigJsonKey: []byte(`{"auths":{"docker.io":{},"other.server":{}}}`), - }, - }, - }, - { - obj: &imagev1.Image{ - DockerImageReference: "docker.io/foo/bar", - }, - changed: true, - expected: &imagev1.Image{ - DockerImageReference: "index.docker.io/foo/bar", - }, - }, - { - obj: &imagev1.Image{ - DockerImageReference: "other.docker.io/foo/bar", - }, - expected: &imagev1.Image{ - DockerImageReference: "other.docker.io/foo/bar", - }, - }, - { - obj: &imagev1.ImageStream{ - Spec: imagev1.ImageStreamSpec{ - Tags: []imagev1.TagReference{ - {Name: "foo", From: &corev1.ObjectReference{Kind: "DockerImage", Name: "docker.io/foo/bar"}}, - {Name: "bar", From: &corev1.ObjectReference{Kind: "ImageStream", Name: "docker.io/foo/bar"}}, - {Name: "baz"}, - }, - DockerImageRepository: "docker.io/foo/bar", - }, - Status: imagev1.ImageStreamStatus{ - DockerImageRepository: "docker.io/foo/bar", - Tags: []imagev1.NamedTagEventList{ - {Tag: "bar", Items: []imagev1.TagEvent{ - {DockerImageReference: "docker.io/foo/bar"}, - {DockerImageReference: "docker.io/foo/bar"}, - }}, - {Tag: "baz", Items: []imagev1.TagEvent{ - {DockerImageReference: "some.other/reference"}, - {DockerImageReference: "docker.io/foo/bar"}, - }}, - }, - }, - }, - changed: true, - expected: &imagev1.ImageStream{ - Spec: imagev1.ImageStreamSpec{ - Tags: []imagev1.TagReference{ - {Name: "foo", From: &corev1.ObjectReference{Kind: "DockerImage", Name: "index.docker.io/foo/bar"}}, - {Name: "bar", From: &corev1.ObjectReference{Kind: "ImageStream", Name: "docker.io/foo/bar"}}, - {Name: "baz"}, - }, - DockerImageRepository: "index.docker.io/foo/bar", - }, - Status: imagev1.ImageStreamStatus{ - DockerImageRepository: "docker.io/foo/bar", - Tags: []imagev1.NamedTagEventList{ - {Tag: "bar", Items: []imagev1.TagEvent{ - {DockerImageReference: "index.docker.io/foo/bar"}, - {DockerImageReference: "index.docker.io/foo/bar"}, - }}, - {Tag: "baz", Items: []imagev1.TagEvent{ - {DockerImageReference: "some.other/reference"}, - {DockerImageReference: "index.docker.io/foo/bar"}, - }}, - }, - }, - }, - }, - }, - }, - { - mappings: ImageReferenceMappings{{FromRegistry: "index.docker.io", ToRegistry: "another.registry"}}, - }, - { - mappings: ImageReferenceMappings{{FromRegistry: "index.docker.io", ToRegistry: "another.registry", ToName: "extra"}}, - }, - } - - for _, test := range testCases { - for i, v := range test.variants { - o := MigrateImageReferenceOptions{ - Mappings: test.mappings, - UpdatePodSpecFn: originpolymorphichelpers.NewUpdatePodSpecForObjectFn(polymorphichelpers.UpdatePodSpecForObjectFn), - } - reporter, err := o.transform(v.obj) - if (err != nil) != v.err { - t.Errorf("%d: %v %t", i, err, v.err) - continue - } - if err != nil { - continue - } - if (reporter == nil) != v.nilReporter { - t.Errorf("%d: reporter %#v %t", i, reporter, v.nilReporter) - continue - } - if reporter == nil { - continue - } - if reporter.Changed() != v.changed { - t.Errorf("%d: changed %#v %t", i, reporter, v.changed) - continue - } - - if !kapihelper.Semantic.DeepEqual(v.expected, v.obj) { - t.Errorf("%d: object: %s", i, diff.ObjectDiff(v.expected, v.obj)) - continue - } - } - } -} diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/migrate/legacyhpa/hpa.go b/vendor/github.com/openshift/oc/pkg/cli/admin/migrate/legacyhpa/hpa.go deleted file mode 100644 index 4cc0fdcb5b11..000000000000 --- a/vendor/github.com/openshift/oc/pkg/cli/admin/migrate/legacyhpa/hpa.go +++ /dev/null @@ -1,184 +0,0 @@ -package legacyhpa - -import ( - "fmt" - "sort" - "strings" - - "github.com/spf13/cobra" - - autoscalingv1 "k8s.io/api/autoscaling/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/cli-runtime/pkg/resource" - autoscalingv1typedclient "k8s.io/client-go/kubernetes/typed/autoscaling/v1" - kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" - "k8s.io/kubernetes/pkg/kubectl/util/templates" - - "github.com/openshift/oc/pkg/cli/admin/migrate" -) - -var ( - defaultMigrations = map[metav1.TypeMeta]metav1.TypeMeta{ - // legacy oapi group - {Kind: "DeploymentConfig", APIVersion: "v1"}: {Kind: "DeploymentConfig", APIVersion: "apps.openshift.io/v1"}, - // legacy oapi group, for the lazy - {Kind: "DeploymentConfig"}: {Kind: "DeploymentConfig", APIVersion: "apps.openshift.io/v1"}, - - // webconsole shenaniganry - {Kind: "DeploymentConfig", APIVersion: "extensions/v1beta1"}: {Kind: "DeploymentConfig", APIVersion: "apps.openshift.io/v1"}, - {Kind: "Deployment", APIVersion: "extensions/v1beta1"}: {Kind: "Deployment", APIVersion: "apps/v1"}, - {Kind: "ReplicaSet", APIVersion: "extensions/v1beta1"}: {Kind: "ReplicaSet", APIVersion: "apps/v1"}, - {Kind: "ReplicationController", APIVersion: "extensions/v1beta1"}: {Kind: "ReplicationController", APIVersion: "v1"}, - } - - internalMigrateLegacyHPALong = templates.LongDesc(fmt.Sprintf(` - Migrate Horizontal Pod Autoscalers to refer to new API groups - - This command locates and updates every Horizontal Pod Autoscaler which refers to a particular - group-version-kind to refer to some other, equivalent group-version-kind. - - The following transformations will occur: - -%s`, prettyPrintMigrations(defaultMigrations))) - - internalMigrateLegacyHPAExample = templates.Examples(` - # Perform a dry-run of updating all objects - %[1]s - - # To actually perform the update, the confirm flag must be appended - %[1]s --confirm`) -) - -func prettyPrintMigrations(versionKinds map[metav1.TypeMeta]metav1.TypeMeta) string { - lines := make([]string, 0, len(versionKinds)) - for initial, final := range versionKinds { - line := fmt.Sprintf(" - %s.%s --> %s.%s", initial.APIVersion, initial.Kind, final.APIVersion, final.Kind) - lines = append(lines, line) - } - sort.Strings(lines) - - return strings.Join(lines, "\n") -} - -type MigrateLegacyHPAOptions struct { - // maps initial gvks to final gvks in the same format - // as HPAs use (CrossVersionObjectReferences) for ease of access. - finalVersionKinds map[metav1.TypeMeta]metav1.TypeMeta - - hpaClient autoscalingv1typedclient.AutoscalingV1Interface - - migrate.ResourceOptions -} - -func NewMigrateLegacyHPAOptions(streams genericclioptions.IOStreams) *MigrateLegacyHPAOptions { - return &MigrateLegacyHPAOptions{ - ResourceOptions: *migrate.NewResourceOptions(streams).WithIncludes([]string{"horizontalpodautoscalers.autoscaling"}).WithAllNamespaces(), - } -} - -// NewCmdMigrateLegacyAPI implements a MigrateLegacyHPA command -func NewCmdMigrateLegacyHPA(name, fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { - o := NewMigrateLegacyHPAOptions(streams) - cmd := &cobra.Command{ - Use: name, - Short: "Update HPAs to point to the latest group-version-kinds", - Long: internalMigrateLegacyHPALong, - Example: fmt.Sprintf(internalMigrateLegacyHPAExample, fullName), - Run: func(cmd *cobra.Command, args []string) { - kcmdutil.CheckErr(o.Complete(name, f, cmd, args)) - kcmdutil.CheckErr(o.Validate()) - kcmdutil.CheckErr(o.Run()) - }, - } - o.ResourceOptions.Bind(cmd) - - return cmd -} - -func (o *MigrateLegacyHPAOptions) Complete(name string, f kcmdutil.Factory, c *cobra.Command, args []string) error { - if len(args) != 0 { - return fmt.Errorf("%s takes no positional arguments", name) - } - - o.ResourceOptions.SaveFn = o.save - if err := o.ResourceOptions.Complete(f, c); err != nil { - return err - } - - o.finalVersionKinds = make(map[metav1.TypeMeta]metav1.TypeMeta) - - // copy all manual transformations in - for initial, final := range defaultMigrations { - o.finalVersionKinds[initial] = final - } - - config, err := f.ToRESTConfig() - if err != nil { - return err - } - - o.hpaClient, err = autoscalingv1typedclient.NewForConfig(config) - if err != nil { - return err - } - - return nil -} - -func (o MigrateLegacyHPAOptions) Validate() error { - if len(o.ResourceOptions.Include) != 1 || o.ResourceOptions.Include[0] != "horizontalpodautoscalers.autoscaling" { - return fmt.Errorf("the only supported resources are horizontalpodautoscalers") - } - return o.ResourceOptions.Validate() -} - -func (o MigrateLegacyHPAOptions) Run() error { - return o.ResourceOptions.Visitor().Visit(func(info *resource.Info) (migrate.Reporter, error) { - return o.checkAndTransform(info.Object) - }) -} - -func (o *MigrateLegacyHPAOptions) checkAndTransform(hpaRaw runtime.Object) (migrate.Reporter, error) { - hpa, wasHPA := hpaRaw.(*autoscalingv1.HorizontalPodAutoscaler) - if !wasHPA { - return nil, fmt.Errorf("unrecognized object %#v", hpaRaw) - } - - currentVersionKind := metav1.TypeMeta{ - APIVersion: hpa.Spec.ScaleTargetRef.APIVersion, - Kind: hpa.Spec.ScaleTargetRef.Kind, - } - - newVersionKind := o.latestVersionKind(currentVersionKind) - - if currentVersionKind != newVersionKind { - hpa.Spec.ScaleTargetRef.APIVersion = newVersionKind.APIVersion - hpa.Spec.ScaleTargetRef.Kind = newVersionKind.Kind - return migrate.ReporterBool(true), nil - } - - return migrate.ReporterBool(false), nil -} - -func (o *MigrateLegacyHPAOptions) latestVersionKind(current metav1.TypeMeta) metav1.TypeMeta { - if newVersionKind, isKnown := o.finalVersionKinds[current]; isKnown { - return newVersionKind - } - - return current -} - -// save invokes the API to alter an object. The reporter passed to this method is the same returned by -// the migration visitor method. It should return an error if the input type cannot be saved -// It returns migrate.ErrRecalculate if migration should be re-run on the provided object. -func (o *MigrateLegacyHPAOptions) save(info *resource.Info, reporter migrate.Reporter) error { - hpa, wasHPA := info.Object.(*autoscalingv1.HorizontalPodAutoscaler) - if !wasHPA { - return fmt.Errorf("unrecognized object %#v", info.Object) - } - - _, err := o.hpaClient.HorizontalPodAutoscalers(hpa.Namespace).Update(hpa) - return migrate.DefaultRetriable(info, err) -} diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/migrate/legacyhpa/hpa_test.go b/vendor/github.com/openshift/oc/pkg/cli/admin/migrate/legacyhpa/hpa_test.go deleted file mode 100644 index dabebae0a2d0..000000000000 --- a/vendor/github.com/openshift/oc/pkg/cli/admin/migrate/legacyhpa/hpa_test.go +++ /dev/null @@ -1,93 +0,0 @@ -package legacyhpa - -import ( - "testing" - - autoscalingv1 "k8s.io/api/autoscaling/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestDefaultMigrations(t *testing.T) { - testCases := []struct { - name string - input metav1.TypeMeta - output metav1.TypeMeta - }{ - { - name: "legacy-dc", - input: metav1.TypeMeta{Kind: "DeploymentConfig", APIVersion: "v1"}, - output: metav1.TypeMeta{Kind: "DeploymentConfig", APIVersion: "apps.openshift.io/v1"}, - }, - { - name: "console-dc", - input: metav1.TypeMeta{Kind: "DeploymentConfig", APIVersion: "extensions/v1beta1"}, - output: metav1.TypeMeta{Kind: "DeploymentConfig", APIVersion: "apps.openshift.io/v1"}, - }, - { - name: "console-rc", - input: metav1.TypeMeta{Kind: "ReplicationController", APIVersion: "extensions/v1beta1"}, - output: metav1.TypeMeta{Kind: "ReplicationController", APIVersion: "v1"}, - }, - { - name: "console-deploy", - input: metav1.TypeMeta{Kind: "Deployment", APIVersion: "extensions/v1beta1"}, - output: metav1.TypeMeta{Kind: "Deployment", APIVersion: "apps/v1"}, - }, - { - name: "console-rs", - input: metav1.TypeMeta{Kind: "ReplicaSet", APIVersion: "extensions/v1beta1"}, - output: metav1.TypeMeta{Kind: "ReplicaSet", APIVersion: "apps/v1"}, - }, - { - name: "ok-dc", - input: metav1.TypeMeta{Kind: "DeploymentConfig", APIVersion: "apps.openshift.io/v1"}, - output: metav1.TypeMeta{Kind: "DeploymentConfig", APIVersion: "apps.openshift.io/v1"}, - }, - { - name: "other", - input: metav1.TypeMeta{Kind: "Cheddar", APIVersion: "cheese/v1alpha1"}, - output: metav1.TypeMeta{Kind: "Cheddar", APIVersion: "cheese/v1alpha1"}, - }, - } - - opts := MigrateLegacyHPAOptions{ - finalVersionKinds: defaultMigrations, - } - - for _, tc := range testCases { - tc := tc // copy the iteration variable to a non-iteration memory location - t.Run(tc.name, func(t *testing.T) { - oldHPA := &autoscalingv1.HorizontalPodAutoscaler{ - Spec: autoscalingv1.HorizontalPodAutoscalerSpec{ - ScaleTargetRef: autoscalingv1.CrossVersionObjectReference{ - APIVersion: tc.input.APIVersion, - Kind: tc.input.Kind, - Name: tc.name, - }, - }, - } - - reporter, err := opts.checkAndTransform(oldHPA) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - - expectedChanged := tc.input != tc.output - if reporter.Changed() != expectedChanged { - indicator := "" - if expectedChanged { - indicator = " not" - } - t.Errorf("expected the HPA%s to have been changed, but it had%s", indicator, indicator) - } - newVersionKind := metav1.TypeMeta{ - APIVersion: oldHPA.Spec.ScaleTargetRef.APIVersion, - Kind: oldHPA.Spec.ScaleTargetRef.Kind, - } - if newVersionKind != tc.output { - t.Errorf("expected the HPA to be updated to %v, yet it ended up as %v", tc.output, newVersionKind) - } - }) - - } -} diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/migrate/migrate.go b/vendor/github.com/openshift/oc/pkg/cli/admin/migrate/migrate.go deleted file mode 100644 index d8fdaa712821..000000000000 --- a/vendor/github.com/openshift/oc/pkg/cli/admin/migrate/migrate.go +++ /dev/null @@ -1,28 +0,0 @@ -package migrate - -import ( - "github.com/spf13/cobra" - - "k8s.io/cli-runtime/pkg/genericclioptions" - cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" - "k8s.io/kubernetes/pkg/kubectl/util/templates" -) - -const MigrateRecommendedName = "migrate" - -var migrateLong = templates.LongDesc(` - Migrate resources on the cluster - - These commands assist administrators in performing preventative maintenance on a cluster.`) - -func NewCommandMigrate(name, fullName string, f cmdutil.Factory, streams genericclioptions.IOStreams, cmds ...*cobra.Command) *cobra.Command { - // Parent command to which all subcommands are added. - cmd := &cobra.Command{ - Use: name, - Short: "Migrate data in the cluster", - Long: migrateLong, - Run: cmdutil.DefaultSubCommandRun(streams.ErrOut), - } - cmd.AddCommand(cmds...) - return cmd -} diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/migrate/migrator.go b/vendor/github.com/openshift/oc/pkg/cli/admin/migrate/migrator.go deleted file mode 100644 index d4356c8c3236..000000000000 --- a/vendor/github.com/openshift/oc/pkg/cli/admin/migrate/migrator.go +++ /dev/null @@ -1,827 +0,0 @@ -package migrate - -import ( - "fmt" - "io" - "sort" - "strings" - "sync" - "time" - - "github.com/spf13/cobra" - "k8s.io/klog" - - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/cli-runtime/pkg/printers" - "k8s.io/cli-runtime/pkg/resource" - "k8s.io/client-go/discovery" - kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" - "k8s.io/kubernetes/pkg/kubectl/scheme" -) - -// MigrateVisitFunc is invoked for each returned object, and may return a -// Reporter that can contain info to be used by save. -type MigrateVisitFunc func(info *resource.Info) (Reporter, error) - -// MigrateActionFunc is expected to persist the altered info.Object. The -// Reporter returned from Visit is passed to this function and may be used -// to carry additional information about what to save on an object. -type MigrateActionFunc func(info *resource.Info, reporter Reporter) error - -// MigrateFilterFunc can return false to skip an item, or an error. -type MigrateFilterFunc func(info *resource.Info) (bool, error) - -// Reporter indicates whether a resource requires migration. -type Reporter interface { - // Changed returns true if the resource requires migration. - Changed() bool -} - -// ReporterBool implements the Reporter interface for a boolean. -type ReporterBool bool - -func (r ReporterBool) Changed() bool { - return bool(r) -} - -func AlwaysRequiresMigration(_ *resource.Info) (Reporter, error) { - return ReporterBool(true), nil -} - -// timeStampNow returns the current time in the same format as glog -func timeStampNow() string { - return time.Now().Format("0102 15:04:05.000000") -} - -// used to check if an io.Writer is a *bufio.Writer or similar -type flusher interface { - Flush() error -} - -// used to check if an io.Writer is a *os.File or similar -type syncer interface { - Sync() error -} - -var _ io.Writer = &syncedWriter{} - -// syncedWriter makes the given writer goroutine safe -// it will attempt to flush and sync on each write -type syncedWriter struct { - lock sync.Mutex - writer io.Writer -} - -func (w *syncedWriter) Write(p []byte) (int, error) { - w.lock.Lock() - n, err := w.write(p) - w.lock.Unlock() - return n, err -} - -// must only be called when w.lock is held -func (w *syncedWriter) write(p []byte) (int, error) { - n, err := w.writer.Write(p) - // attempt to flush buffered IO - if f, ok := w.writer.(flusher); ok { - f.Flush() // ignore error - } - // attempt to sync file - if s, ok := w.writer.(syncer); ok { - s.Sync() // ignore error - } - return n, err -} - -// ResourceOptions assists in performing migrations on any object that -// can be retrieved via the API. -type ResourceOptions struct { - PrintFlags *genericclioptions.PrintFlags - - Printer printers.ResourcePrinter - - Unstructured bool - AllNamespaces bool - Include []string - Filenames []string - Confirm bool - Output string - FromKey string - ToKey string - - OverlappingResources []sets.String - DefaultExcludes []schema.GroupResource - - Builder *resource.Builder - SaveFn MigrateActionFunc - PrintFn MigrateActionFunc - FilterFn MigrateFilterFunc - DryRun bool - Summarize bool - - // Number of parallel workers to use. - // Any migrate command that sets this must make sure that - // its SaveFn, PrintFn and FilterFn are goroutine safe. - // If multiple workers may attempt to write to Out or ErrOut - // at the same time, SyncOut must also be set to true. - // This should not be exposed as a CLI flag. Instead it - // should have a fixed value that is high enough to saturate - // the desired bandwidth when parallel processing is desired. - Workers int - // If true, Out and ErrOut will be wrapped to make them goroutine safe. - SyncOut bool - - genericclioptions.IOStreams -} - -func NewResourceOptions(streams genericclioptions.IOStreams) *ResourceOptions { - return &ResourceOptions{ - PrintFlags: genericclioptions.NewPrintFlags("migrated").WithTypeSetter(scheme.Scheme), - IOStreams: streams, - AllNamespaces: true, - } -} - -func (o *ResourceOptions) WithIncludes(include []string) *ResourceOptions { - o.Include = include - return o -} - -func (o *ResourceOptions) WithExcludes(defaultExcludes []schema.GroupResource) *ResourceOptions { - o.DefaultExcludes = defaultExcludes - return o -} - -func (o *ResourceOptions) WithOverlappingResources(resources []sets.String) *ResourceOptions { - o.OverlappingResources = resources - return o -} - -func (o *ResourceOptions) WithUnstructured() *ResourceOptions { - o.Unstructured = true - return o -} - -func (o *ResourceOptions) WithAllNamespaces() *ResourceOptions { - o.AllNamespaces = true - return o -} - -func (o *ResourceOptions) Bind(c *cobra.Command) { - c.Flags().StringSliceVar(&o.Include, "include", o.Include, "Resource types to migrate. Passing --filename will override this flag.") - c.Flags().BoolVarP(&o.AllNamespaces, "all-namespaces", "A", o.AllNamespaces, "Migrate objects in all namespaces. Defaults to true.") - c.Flags().BoolVar(&o.Confirm, "confirm", o.Confirm, "If true, all requested objects will be migrated. Defaults to false.") - - c.Flags().StringVar(&o.FromKey, "from-key", o.FromKey, "If specified, only migrate items with a key (namespace/name or name) greater than or equal to this value") - c.Flags().StringVar(&o.ToKey, "to-key", o.ToKey, "If specified, only migrate items with a key (namespace/name or name) less than this value") - - o.PrintFlags.AddFlags(c) - - usage := "Filename, directory, or URL to docker-compose.yml file to use" - kcmdutil.AddJsonFilenameFlag(c.Flags(), &o.Filenames, usage) -} - -func (o *ResourceOptions) Complete(f kcmdutil.Factory, c *cobra.Command) error { - o.Output = kcmdutil.GetFlagString(c, "output") - - if o.DryRun { - o.PrintFlags.Complete("%s (dry run)") - } - - var err error - o.Printer, err = o.PrintFlags.ToPrinter() - if err != nil { - return err - } - - switch { - case len(o.Output) > 0: - first := true - o.PrintFn = func(info *resource.Info, _ Reporter) error { - // We would normally pass an API list to the printer, however this command is special - // and does not have all of the infos it wants to print at the same time. - if o.Output == "yaml" && !first { - fmt.Fprintln(o.Out, "---") - } - first = false - return o.Printer.PrintObj(info.Object, o.Out) - } - o.DryRun = true - case o.Confirm: - o.DryRun = false - default: - o.DryRun = true - } - - namespace, explicitNamespace, err := f.ToRawKubeConfigLoader().Namespace() - if err != nil { - return err - } - allNamespaces := !explicitNamespace && o.AllNamespaces - - if len(o.FromKey) > 0 || len(o.ToKey) > 0 { - o.FilterFn = func(info *resource.Info) (bool, error) { - var key string - if info.Mapping.Scope.Name() == meta.RESTScopeNameNamespace { - key = info.Namespace + "/" + info.Name - } else { - if !allNamespaces { - return false, nil - } - key = info.Name - } - if len(o.FromKey) > 0 && o.FromKey > key { - return false, nil - } - if len(o.ToKey) > 0 && o.ToKey <= key { - return false, nil - } - return true, nil - } - } - - // use the factory's caching discovery client - discoveryClient, err := f.ToDiscoveryClient() - if err != nil { - return err - } - // but invalidate its cache to force it to fetch the latest data - discoveryClient.Invalidate() - // and do a no-op call to cause the latest data to be written to disk - _, _ = discoveryClient.ServerResources() - // so that the REST mapper will never use stale discovery data - mapper, err := f.ToRESTMapper() - if err != nil { - return err - } - - // if o.Include has * we need to update it via discovery and o.DefaultExcludes and o.OverlappingResources - resourceNames := sets.NewString() - for i, s := range o.Include { - if resourceNames.Has(s) { - continue - } - if s != "*" { - resourceNames.Insert(s) - continue - } - - exclude := sets.NewString() - for _, gr := range o.DefaultExcludes { - if len(o.OverlappingResources) > 0 { - for _, others := range o.OverlappingResources { - if !others.Has(gr.String()) { - continue - } - exclude.Insert(others.List()...) - break - } - } - exclude.Insert(gr.String()) - } - - candidate := sets.NewString() - - // keep this logic as close to the point of use as possible so that we limit our dependency on discovery - // since discovery is cached this does not repeatedly call out to the API - all, err := FindAllCanonicalResources(discoveryClient, mapper) - if err != nil { - return fmt.Errorf("could not calculate the list of available resources: %v", err) - } - - for _, gr := range all { - // if the user specifies a resource that matches resource or resource+group, skip it - if resourceNames.Has(gr.Resource) || resourceNames.Has(gr.String()) || exclude.Has(gr.String()) { - continue - } - candidate.Insert(gr.String()) - } - candidate.Delete(exclude.List()...) - include := candidate - if len(o.OverlappingResources) > 0 { - include = sets.NewString() - for _, k := range candidate.List() { - reduce := k - for _, others := range o.OverlappingResources { - if !others.Has(k) { - continue - } - reduce = others.List()[0] - break - } - include.Insert(reduce) - } - } - klog.V(4).Infof("Found the following resources from the server: %v", include.List()) - last := o.Include[i+1:] - o.Include = append([]string{}, o.Include[:i]...) - o.Include = append(o.Include, include.List()...) - o.Include = append(o.Include, last...) - break - } - - // we need at least one worker - if o.Workers == 0 { - o.Workers = 1 - } - - // make sure we do not print to std out / err from multiple workers at once - if len(o.Output) > 0 && o.Workers > 1 { - o.SyncOut = true - } - // the command requires synchronized output - if o.SyncOut { - o.Out = &syncedWriter{writer: o.Out} - o.ErrOut = &syncedWriter{writer: o.ErrOut} - } - - o.Builder = f.NewBuilder(). - AllNamespaces(allNamespaces). - FilenameParam(false, &resource.FilenameOptions{Recursive: false, Filenames: o.Filenames}). - ContinueOnError(). - DefaultNamespace(). - RequireObject(true). - SelectAllParam(true). - Flatten(). - RequestChunksOf(500) - - if o.Unstructured { - o.Builder.Unstructured() - } else { - o.Builder.WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...) - } - - if !allNamespaces { - o.Builder.NamespaceParam(namespace) - } - - if len(o.Filenames) == 0 { - o.Builder.ResourceTypes(o.Include...) - } - - return nil -} - -func (o *ResourceOptions) Validate() error { - if len(o.Filenames) == 0 && len(o.Include) == 0 { - return fmt.Errorf("you must specify at least one resource or resource type to migrate with --include or --filenames") - } - if o.Workers < 1 { - return fmt.Errorf("invalid value %d for workers, must be at least 1", o.Workers) - } - return nil -} - -func (o *ResourceOptions) Visitor() *ResourceVisitor { - return &ResourceVisitor{ - Out: o.Out, - Builder: &resourceBuilder{builder: o.Builder}, - SaveFn: o.SaveFn, - PrintFn: o.PrintFn, - FilterFn: o.FilterFn, - DryRun: o.DryRun, - Workers: o.Workers, - } -} - -// Builder allows for mocking of resource.Builder -type Builder interface { - // Visitor returns a resource.Visitor that ignores errors that match the given resource.ErrMatchFuncs - Visitor(fns ...resource.ErrMatchFunc) (resource.Visitor, error) -} - -type resourceBuilder struct { - builder *resource.Builder -} - -func (r *resourceBuilder) Visitor(fns ...resource.ErrMatchFunc) (resource.Visitor, error) { - result := r.builder.Do().IgnoreErrors(fns...) - return result, result.Err() -} - -type ResourceVisitor struct { - Out io.Writer - - Builder Builder - - SaveFn MigrateActionFunc - PrintFn MigrateActionFunc - FilterFn MigrateFilterFunc - - DryRun bool - - Workers int -} - -func (o *ResourceVisitor) Visit(fn MigrateVisitFunc) error { - dryRun := o.DryRun - summarize := true - actionFn := o.SaveFn - switch { - case o.PrintFn != nil: - actionFn = o.PrintFn - dryRun = true - summarize = false - case dryRun: - actionFn = nil - } - out := o.Out - - // Ignore any resource that does not support GET - visitor, err := o.Builder.Visitor(errors.IsMethodNotSupported, errors.IsNotFound) - if err != nil { - return err - } - - // the producer (result.Visit) uses this to send data to the workers - work := make(chan workData, 10*o.Workers) // 10 slots per worker - // the workers use this to send processed work to the consumer (migrateTracker) - results := make(chan resultData, 10*o.Workers) // 10 slots per worker - - // migrateTracker tracks stats for this migrate run - t := &migrateTracker{ - out: out, - dryRun: dryRun, - resourcesWithErrors: sets.NewString(), - results: results, - } - - // use a wait group to track when workers have finished processing - workersWG := sync.WaitGroup{} - // spawn and track all workers - for w := 0; w < o.Workers; w++ { - workersWG.Add(1) - go func() { - defer workersWG.Done() - worker := &migrateWorker{ - retries: 10, // how many times should this worker retry per resource - work: work, - results: results, - migrateFn: fn, - actionFn: actionFn, - filterFn: o.FilterFn, - } - worker.run() - }() - } - - // use another wait group to track when the consumer (migrateTracker) has finished tracking stats - consumerWG := sync.WaitGroup{} - consumerWG.Add(1) - go func() { - defer consumerWG.Done() - t.run() - }() - - err = visitor.Visit(func(info *resource.Info, err error) error { - // send data from producer visitor to workers - work <- workData{info: info, err: err} - return nil - }) - - // signal that we are done sending work - close(work) - // wait for the workers to finish processing - workersWG.Wait() - // signal that all workers have processed and sent completed work - close(results) - // wait for the consumer to finish recording the results from processing - consumerWG.Wait() - - if summarize { - if dryRun { - fmt.Fprintf(out, "summary (dry run): total=%d errors=%d ignored=%d unchanged=%d migrated=%d\n", t.found, t.errors, t.ignored, t.unchanged, t.found-t.errors-t.unchanged-t.ignored) - } else { - fmt.Fprintf(out, "summary: total=%d errors=%d ignored=%d unchanged=%d migrated=%d\n", t.found, t.errors, t.ignored, t.unchanged, t.found-t.errors-t.unchanged-t.ignored) - } - } - - if t.resourcesWithErrors.Len() > 0 { - fmt.Fprintf(out, "info: to rerun only failing resources, add --include=%s\n", strings.Join(t.resourcesWithErrors.List(), ",")) - } - - switch { - case err != nil: - fmt.Fprintf(out, "error: exited without processing all resources: %v\n", err) - err = kcmdutil.ErrExit - case t.errors > 0: - fmt.Fprintf(out, "error: %d resources failed to migrate\n", t.errors) - err = kcmdutil.ErrExit - } - return err -} - -// ErrUnchanged may be returned by MigrateActionFunc to indicate that the object -// did not need migration (but that could only be determined when the action was taken). -var ErrUnchanged = fmt.Errorf("migration was not necessary") - -// ErrRecalculate may be returned by MigrateActionFunc to indicate that the object -// has changed and needs to have its information recalculated prior to being saved. -// Use when a resource requires multiple API operations to persist (for instance, -// both status and spec must be changed). -var ErrRecalculate = fmt.Errorf("recalculate migration") - -// MigrateError is an exported alias to error to allow external packages to use ErrRetriable and ErrNotRetriable -type MigrateError error - -// ErrRetriable is a wrapper for an error that a migrator may use to indicate the -// specific error can be retried. -type ErrRetriable struct { - MigrateError -} - -func (ErrRetriable) Temporary() bool { return true } - -// ErrNotRetriable is a wrapper for an error that a migrator may use to indicate the -// specific error cannot be retried. -type ErrNotRetriable struct { - MigrateError -} - -func (ErrNotRetriable) Temporary() bool { return false } - -// TemporaryError is a wrapper interface that is used to determine if an error can be retried. -type TemporaryError interface { - error - // Temporary should return true if this is a temporary error - Temporary() bool -} - -// attemptResult is an enumeration of the result of a migration -type attemptResult int - -const ( - attemptResultSuccess attemptResult = iota - attemptResultError - attemptResultUnchanged - attemptResultIgnore -) - -// workData stores a single item of work that needs to be processed by a worker -type workData struct { - info *resource.Info - err error -} - -// resultData stores the processing result from a worker -// note that in the case of retries, a single workData can produce multiple resultData -type resultData struct { - found bool - retry bool - result attemptResult - data workData -} - -// migrateTracker abstracts transforming and saving resources and can be used to keep track -// of how many total resources have been updated. -type migrateTracker struct { - out io.Writer - - dryRun bool - - found, ignored, unchanged, errors int - - resourcesWithErrors sets.String - - results <-chan resultData -} - -// report prints a message to out that includes info about the current resource. If the optional error is -// provided it will be written as well. -func (t *migrateTracker) report(prefix string, info *resource.Info, err error) { - ns := info.Namespace - if len(ns) > 0 { - ns = " -n " + ns - } - groupResource := info.Mapping.Resource.GroupResource() - groupResourceStr := (&groupResource).String() - if err != nil { - fmt.Fprintf(t.out, "E%s %-10s%s %s/%s: %v\n", timeStampNow(), prefix, ns, groupResourceStr, info.Name, err) - } else { - fmt.Fprintf(t.out, "I%s %-10s%s %s/%s\n", timeStampNow(), prefix, ns, groupResourceStr, info.Name) - } -} - -// run executes until t.results is closed -// it processes each result and updates its stats as appropriate -func (t *migrateTracker) run() { - for r := range t.results { - if r.found { - t.found++ - } - if r.retry { - t.report("retry:", r.data.info, r.data.err) - continue // retry attempts do not have results to process - } - - switch r.result { - case attemptResultError: - t.report("error:", r.data.info, r.data.err) - t.errors++ - groupResource := r.data.info.Mapping.Resource.GroupResource() - t.resourcesWithErrors.Insert((&groupResource).String()) - case attemptResultIgnore: - t.ignored++ - if klog.V(2) { - t.report("ignored:", r.data.info, nil) - } - case attemptResultUnchanged: - t.unchanged++ - if klog.V(2) { - t.report("unchanged:", r.data.info, nil) - } - case attemptResultSuccess: - if klog.V(1) { - if t.dryRun { - t.report("migrated (dry run):", r.data.info, nil) - } else { - t.report("migrated:", r.data.info, nil) - } - } - } - } -} - -// migrateWorker processes data sent from t.work and sends the results to t.results -type migrateWorker struct { - retries int - work <-chan workData - results chan<- resultData - migrateFn MigrateVisitFunc - actionFn MigrateActionFunc - filterFn MigrateFilterFunc -} - -// run processes data until t.work is closed -func (t *migrateWorker) run() { - for data := range t.work { - // if we have no error and a filter func, determine if we need to ignore this resource - if data.err == nil && t.filterFn != nil { - ok, err := t.filterFn(data.info) - // error if we cannot figure out how to filter this resource - if err != nil { - t.results <- resultData{found: true, result: attemptResultError, data: workData{info: data.info, err: err}} - continue - } - // we want to ignore this resource - if !ok { - t.results <- resultData{found: true, result: attemptResultIgnore, data: data} - continue - } - } - - // there was an error so do not attempt to process this data - if data.err != nil { - t.results <- resultData{result: attemptResultError, data: data} - continue - } - - // we have no error and the resource was not ignored, so attempt to process it - // try to invoke the migrateFn and saveFn on info, retrying any recalculation requests up to t.retries times - result, err := t.try(data.info, t.retries) - t.results <- resultData{found: true, result: result, data: workData{info: data.info, err: err}} - } -} - -// try will mutate the info and attempt to save, recalculating if there are any retries left. -// The result of the attempt or an error will be returned. -func (t *migrateWorker) try(info *resource.Info, retries int) (attemptResult, error) { - reporter, err := t.migrateFn(info) - if err != nil { - return attemptResultError, err - } - if reporter == nil { - return attemptResultIgnore, nil - } - if !reporter.Changed() { - return attemptResultUnchanged, nil - } - if t.actionFn != nil { - if err := t.actionFn(info, reporter); err != nil { - if err == ErrUnchanged { - return attemptResultUnchanged, nil - } - if canRetry(err) { - if retries > 0 { - if bool(klog.V(1)) && err != ErrRecalculate { - // signal that we had to retry on this resource - t.results <- resultData{retry: true, data: workData{info: info, err: err}} - } - result, err := t.try(info, retries-1) - switch result { - case attemptResultUnchanged, attemptResultIgnore: - result = attemptResultSuccess - } - return result, err - } - } - return attemptResultError, err - } - } - return attemptResultSuccess, nil -} - -// canRetry returns true if the provided error indicates a retry is possible. -func canRetry(err error) bool { - if temp, ok := err.(TemporaryError); ok && temp.Temporary() { - return true - } - return err == ErrRecalculate -} - -// DefaultRetriable adds retry information to the provided error, and will refresh the -// info if the client info is stale. If the refresh fails the error is made fatal. -// All other errors are left in their natural state - they will not be retried unless -// they define a Temporary() method that returns true. -func DefaultRetriable(info *resource.Info, err error) error { - switch { - case err == nil: - return nil - case errors.IsNotFound(err): - // tolerate the deletion of resources during migration - // report unchanged since we did not actually migrate this object - return ErrUnchanged - case errors.IsMethodNotSupported(err): - return ErrNotRetriable{err} - case errors.IsConflict(err): - if refreshErr := info.Get(); refreshErr != nil { - // tolerate the deletion of resources during migration - // report unchanged since we did not actually migrate this object - if errors.IsNotFound(refreshErr) { - return ErrUnchanged - } - return ErrNotRetriable{err} - } - return ErrRetriable{err} - case errors.IsServerTimeout(err): - return ErrRetriable{err} - default: - return err - } -} - -// FindAllCanonicalResources returns all resources that: -// 1. map directly to their kind (Kind -> Resource -> Kind) -// 2. are not subresources -// 3. can be listed and updated -// Note that this may return some virtual resources (like imagestreamtags) that can be otherwise represented. -// TODO: add a field to APIResources for "virtual" (or that points to the canonical resource). -func FindAllCanonicalResources(d discovery.ServerResourcesInterface, m meta.RESTMapper) ([]schema.GroupResource, error) { - set := make(map[schema.GroupResource]struct{}) - - // this call doesn't fail on aggregated apiserver failures - all, err := d.ServerResources() - if err != nil { - return nil, err - } - - for _, serverResource := range all { - gv, err := schema.ParseGroupVersion(serverResource.GroupVersion) - if err != nil { - continue - } - for _, r := range serverResource.APIResources { - // ignore subresources - if strings.Contains(r.Name, "/") { - continue - } - // ignore resources that cannot be listed and updated - if !sets.NewString(r.Verbs...).HasAll("list", "update") { - continue - } - // because discovery info doesn't tell us whether the object is virtual or not, perform a lookup - // by the kind for resource (which should be the canonical resource) and then verify that the reverse - // lookup (KindsFor) does not error. - if mapping, err := m.RESTMapping(schema.GroupKind{Group: gv.Group, Kind: r.Kind}, gv.Version); err == nil { - if _, err := m.KindsFor(mapping.Resource); err == nil { - set[mapping.Resource.GroupResource()] = struct{}{} - } - } - } - } - - var groupResources []schema.GroupResource - for k := range set { - groupResources = append(groupResources, k) - } - sort.Sort(groupResourcesByName(groupResources)) - return groupResources, nil -} - -type groupResourcesByName []schema.GroupResource - -func (g groupResourcesByName) Len() int { return len(g) } -func (g groupResourcesByName) Less(i, j int) bool { - if g[i].Resource < g[j].Resource { - return true - } - if g[i].Resource > g[j].Resource { - return false - } - return g[i].Group < g[j].Group -} -func (g groupResourcesByName) Swap(i, j int) { g[i], g[j] = g[j], g[i] } diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/migrate/migrator_test.go b/vendor/github.com/openshift/oc/pkg/cli/admin/migrate/migrator_test.go deleted file mode 100644 index 0509d1dab113..000000000000 --- a/vendor/github.com/openshift/oc/pkg/cli/admin/migrate/migrator_test.go +++ /dev/null @@ -1,131 +0,0 @@ -package migrate - -import ( - "fmt" - "runtime" - "sync" - "testing" - - "k8s.io/klog" - - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/cli-runtime/pkg/resource" -) - -// TestResourceVisitor_Visit is used to check for race conditions -func TestResourceVisitor_Visit(t *testing.T) { - var level klog.Level - // save its original value - origVerbosity := level.Get() - // set log level high enough so we write to ResourceOptions.Out on each success - level.Set("1") - // restore the original flag value when we return - defer func() { - level.Set(fmt.Sprintf("%d", origVerbosity)) - }() - - type fields struct { - Out mapWriter - Builder testBuilder - SaveFn *countSaveFn - PrintFn MigrateActionFunc - FilterFn MigrateFilterFunc - DryRun bool - Workers int - } - type args struct { - fn MigrateVisitFunc - } - tests := []struct { - name string - fields fields - args args - wantErr bool - }{ - { - name: "migrate storage race detection", - fields: fields{ - Out: make(mapWriter), // detect writes via multiple goroutines - Builder: testBuilder(5000), // send a specific amount of infos to SaveFn - SaveFn: new(countSaveFn), // make sure we process all resources - PrintFn: nil, // must be nil to use SaveFn - FilterFn: nil, // we want no filtering - DryRun: false, // must be false to use SaveFn - Workers: 32 * runtime.NumCPU(), // same as migrate storage - }, - args: args{ - fn: AlwaysRequiresMigration, // same as migrate storage - }, - wantErr: false, // should never error - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - o := &ResourceVisitor{ - Out: tt.fields.Out, - Builder: tt.fields.Builder, - SaveFn: tt.fields.SaveFn.save, - PrintFn: tt.fields.PrintFn, - FilterFn: tt.fields.FilterFn, - DryRun: tt.fields.DryRun, - Workers: tt.fields.Workers, - } - // how many infos are we expected to process - expectedInfos := int(tt.fields.Builder) - // countSaveFn will spawn one goroutine per info it sees - tt.fields.SaveFn.w.Add(expectedInfos) - // process the infos - if err := o.Visit(tt.args.fn); (err != nil) != tt.wantErr { - t.Errorf("ResourceVisitor.Visit() error = %v, wantErr %v", err, tt.wantErr) - } - // wait for all countSaveFn goroutines to finish - tt.fields.SaveFn.w.Wait() - // check that we saw the correct amount of infos throughout - writes := len(tt.fields.Out) - 1 // minus one for the summary output - saves := tt.fields.SaveFn.n - if expectedInfos != writes || expectedInfos != saves { - t.Errorf("ResourceVisitor.Visit() incorrect counts seen, expectedInfos=%d writes=%d saves=%d out=%v", - expectedInfos, writes, saves, tt.fields.Out) - } - }) - } -} - -// mapWriter is an io.Writer that is guaranteed to panic if accessed via multiple goroutines at the same time -type mapWriter map[int]string - -func (m mapWriter) Write(p []byte) (n int, err error) { - l := len(m) // makes it easy to track how many times Write is called - m[l] = string(p) // string for debugging - return len(p), nil -} - -// countSaveFn is used to build a MigrateActionFunc (SaveFn) that records how many times it was called -// goroutine safe -type countSaveFn struct { - w sync.WaitGroup - m sync.Mutex - n int -} - -func (c *countSaveFn) save(_ *resource.Info, _ Reporter) error { - // do not block workers on the mutex, we do not want to accidentally serialize our code in a way that masks race conditions - go func() { - c.m.Lock() - c.n++ - c.m.Unlock() - c.w.Done() - }() - return nil -} - -// testBuilder emits a resource.Visitor that calls resource.VisitorFunc n times -type testBuilder int - -func (t testBuilder) Visitor(_ ...resource.ErrMatchFunc) (resource.Visitor, error) { - infos := make(resource.InfoListVisitor, t) // the resource.VisitorFunc will be called t times - for i := range infos { - infos[i] = &resource.Info{Mapping: &meta.RESTMapping{}} // just enough to prevent NPEs - } - return infos, nil -} diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/migrate/storage/storage.go b/vendor/github.com/openshift/oc/pkg/cli/admin/migrate/storage/storage.go deleted file mode 100644 index 818d32c69b63..000000000000 --- a/vendor/github.com/openshift/oc/pkg/cli/admin/migrate/storage/storage.go +++ /dev/null @@ -1,395 +0,0 @@ -package storage - -import ( - "fmt" - "runtime" - "time" - - "github.com/spf13/cobra" - "golang.org/x/time/rate" - "k8s.io/klog" - - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/cli-runtime/pkg/resource" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/rest" - "k8s.io/client-go/util/flowcontrol" - kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" - "k8s.io/kubernetes/pkg/kubectl/util/templates" - - "github.com/openshift/oc/pkg/cli/admin/migrate" -) - -var ( - internalMigrateStorageLong = templates.LongDesc(` - Migrate internal object storage via update - - This command invokes an update operation on every API object reachable by the caller. This forces - the server to write to the underlying storage if the object representation has changed. Use this - command to ensure that the most recent storage changes have been applied to all objects (storage - version, storage encoding, any newer object defaults). - - To operate on a subset of resources, use the --include flag. If you encounter errors during a run - the command will output a list of resources that received errors, which you can then re-run the - command on. You may also specify --from-key and --to-key to restrict the set of resource names - to operate on (key is NAMESPACE/NAME for resources in namespaces or NAME for cluster scoped - resources). --from-key is inclusive if specified, while --to-key is exclusive. - - By default, events are not migrated since they expire within a very short period of time. If you - have significantly increased the expiration time of events, run a migration with --include=events - - WARNING: This is a slow command and will put significant load on an API server. It may also - result in significant intra-cluster traffic.`) - - internalMigrateStorageExample = templates.Examples(` - # Perform an update of all objects - %[1]s - - # Only migrate pods - %[1]s --include=pods - - # Only pods that are in namespaces starting with "bar" - %[1]s --include=pods --from-key=bar/ --to-key=bar/\xFF`) -) - -const ( - // longThrottleLatency defines threshold for logging requests. All requests being - // throttle for more than longThrottleLatency will be logged. - longThrottleLatency = 50 * time.Millisecond - - // 1 MB == 1000 KB - mbToKB = 1000 - // 1 KB == 1000 bytes - kbToBytes = 1000 - // 1 byte == 8 bits - // we use a float to avoid truncating on division - byteToBits = 8.0 - - // consider any network IO limit less than 30 Mbps to be "slow" - // we use this as a heuristic to prevent ResourceExpired errors caused by paging - slowBandwidth = 30 -) - -type MigrateAPIStorageOptions struct { - migrate.ResourceOptions - - // Total network IO in megabits per second across all workers. - // Zero means "no rate limit." - bandwidth int - // used to enforce bandwidth value - limiter *tokenLimiter - - // unstructured client used to make no-op PUTs - client dynamic.Interface -} - -func NewMigrateAPIStorageOptions(streams genericclioptions.IOStreams) *MigrateAPIStorageOptions { - return &MigrateAPIStorageOptions{ - - bandwidth: 10, - - ResourceOptions: *migrate.NewResourceOptions(streams). - WithIncludes([]string{"*"}). - WithUnstructured(). - WithExcludes([]schema.GroupResource{ - // openshift resources: - {Resource: "appliedclusterresourcequotas"}, - {Resource: "imagestreamimages"}, {Resource: "imagestreamtags"}, {Resource: "imagestreammappings"}, {Resource: "imagestreamimports"}, - {Resource: "projectrequests"}, {Resource: "projects"}, - {Resource: "clusterrolebindings"}, {Resource: "rolebindings"}, - {Resource: "clusterroles"}, {Resource: "roles"}, - {Resource: "resourceaccessreviews"}, {Resource: "localresourceaccessreviews"}, {Resource: "subjectaccessreviews"}, - {Resource: "selfsubjectrulesreviews"}, {Resource: "localsubjectaccessreviews"}, - {Resource: "useridentitymappings"}, - {Resource: "podsecuritypolicyreviews"}, {Resource: "podsecuritypolicyselfsubjectreviews"}, {Resource: "podsecuritypolicysubjectreviews"}, - - // kubernetes resources: - {Resource: "bindings"}, - {Resource: "deploymentconfigrollbacks"}, - {Resource: "events"}, - {Resource: "componentstatuses"}, - {Resource: "replicationcontrollerdummies.extensions"}, - {Resource: "podtemplates"}, - {Resource: "selfsubjectaccessreviews", Group: "authorization.k8s.io"}, {Resource: "localsubjectaccessreviews", Group: "authorization.k8s.io"}, - }). - WithOverlappingResources([]sets.String{ - // openshift resources: - sets.NewString("deploymentconfigs.apps.openshift.io", "deploymentconfigs"), - - sets.NewString("clusterpolicies.authorization.openshift.io", "clusterpolicies"), - sets.NewString("clusterpolicybindings.authorization.openshift.io", "clusterpolicybindings"), - sets.NewString("clusterrolebindings.authorization.openshift.io", "clusterrolebindings"), - sets.NewString("clusterroles.authorization.openshift.io", "clusterroles"), - sets.NewString("localresourceaccessreviews.authorization.openshift.io", "localresourceaccessreviews"), - sets.NewString("localsubjectaccessreviews.authorization.openshift.io", "localsubjectaccessreviews"), - sets.NewString("policies.authorization.openshift.io", "policies"), - sets.NewString("policybindings.authorization.openshift.io", "policybindings"), - sets.NewString("resourceaccessreviews.authorization.openshift.io", "resourceaccessreviews"), - sets.NewString("rolebindingrestrictions.authorization.openshift.io", "rolebindingrestrictions"), - sets.NewString("rolebindings.authorization.openshift.io", "rolebindings"), - sets.NewString("roles.authorization.openshift.io", "roles"), - sets.NewString("selfsubjectrulesreviews.authorization.openshift.io", "selfsubjectrulesreviews"), - sets.NewString("subjectaccessreviews.authorization.openshift.io", "subjectaccessreviews"), - sets.NewString("subjectrulesreviews.authorization.openshift.io", "subjectrulesreviews"), - - sets.NewString("builds.build.openshift.io", "builds"), - sets.NewString("buildconfigs.build.openshift.io", "buildconfigs"), - - sets.NewString("images.image.openshift.io", "images"), - sets.NewString("imagesignatures.image.openshift.io", "imagesignatures"), - sets.NewString("imagestreamimages.image.openshift.io", "imagestreamimages"), - sets.NewString("imagestreamimports.image.openshift.io", "imagestreamimports"), - sets.NewString("imagestreammappings.image.openshift.io", "imagestreammappings"), - sets.NewString("imagestreams.image.openshift.io", "imagestreams"), - sets.NewString("imagestreamtags.image.openshift.io", "imagestreamtags"), - - sets.NewString("clusternetworks.network.openshift.io", "clusternetworks"), - sets.NewString("egressnetworkpolicies.network.openshift.io", "egressnetworkpolicies"), - sets.NewString("hostsubnets.network.openshift.io", "hostsubnets"), - sets.NewString("netnamespaces.network.openshift.io", "netnamespaces"), - - sets.NewString("oauthaccesstokens.oauth.openshift.io", "oauthaccesstokens"), - sets.NewString("oauthauthorizetokens.oauth.openshift.io", "oauthauthorizetokens"), - sets.NewString("oauthclientauthorizations.oauth.openshift.io", "oauthclientauthorizations"), - sets.NewString("oauthclients.oauth.openshift.io", "oauthclients"), - - sets.NewString("projectrequests.project.openshift.io", "projectrequests"), - sets.NewString("projects.project.openshift.io", "projects"), - - sets.NewString("appliedclusterresourcequotas.quota.openshift.io", "appliedclusterresourcequotas"), - sets.NewString("clusterresourcequotas.quota.openshift.io", "clusterresourcequotas"), - - sets.NewString("routes.route.openshift.io", "routes"), - - sets.NewString("podsecuritypolicyreviews.security.openshift.io", "podsecuritypolicyreviews"), - sets.NewString("podsecuritypolicyselfsubjectreviews.security.openshift.io", "podsecuritypolicyselfsubjectreviews"), - sets.NewString("podsecuritypolicysubjectreviews.security.openshift.io", "podsecuritypolicysubjectreviews"), - - sets.NewString("processedtemplates.template.openshift.io", "processedtemplates"), - sets.NewString("templates.template.openshift.io", "templates"), - - sets.NewString("groups.user.openshift.io", "groups"), - sets.NewString("identities.user.openshift.io", "identities"), - sets.NewString("useridentitymappings.user.openshift.io", "useridentitymappings"), - sets.NewString("users.user.openshift.io", "users"), - - // kubernetes resources: - sets.NewString("horizontalpodautoscalers.autoscaling", "horizontalpodautoscalers.extensions"), - sets.NewString("jobs.batch", "jobs.extensions"), - }), - } -} - -// NewCmdMigrateAPIStorage implements a MigrateStorage command -func NewCmdMigrateAPIStorage(name, fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { - o := NewMigrateAPIStorageOptions(streams) - cmd := &cobra.Command{ - Use: name, // TODO do something useful here - Short: "Update the stored version of API objects", - Long: internalMigrateStorageLong, - Example: fmt.Sprintf(internalMigrateStorageExample, fullName), - Run: func(cmd *cobra.Command, args []string) { - kcmdutil.CheckErr(o.Complete(f, cmd, args)) - kcmdutil.CheckErr(o.Validate()) - kcmdutil.CheckErr(o.Run()) - }, - } - o.ResourceOptions.Bind(cmd) - - // opt-in to allow parallel execution since we know this command is goroutine safe - // storage migration is IO bound so we make sure that we have enough workers to saturate the rate limiter - o.Workers = 32 * runtime.NumCPU() - // expose a flag to allow rate limiting the workers based on network bandwidth - cmd.Flags().IntVar(&o.bandwidth, "bandwidth", o.bandwidth, - "Average network bandwidth measured in megabits per second (Mbps) to use during storage migration. Zero means no limit. This flag is alpha and may change in the future.") - - // remove flags that do not make sense - cmd.Flags().MarkDeprecated("confirm", "storage migration does not support dry run, this flag is ignored") - cmd.Flags().MarkHidden("confirm") - cmd.Flags().MarkDeprecated("output", "storage migration does not support dry run, this flag is ignored") - cmd.Flags().MarkHidden("output") - - return cmd -} - -func (o *MigrateAPIStorageOptions) Complete(f kcmdutil.Factory, c *cobra.Command, args []string) error { - // force unset output, it does not make sense for this command - if err := c.Flags().Set("output", ""); err != nil { - return err - } - // force confirm, dry run does not make sense for this command - o.Confirm = true - - o.ResourceOptions.SaveFn = o.save - if err := o.ResourceOptions.Complete(f, c); err != nil { - return err - } - - // do not limit the builder as we handle throttling via our own limiter - // thus the list calls that the builder makes are never rate limited - // we estimate their IO usage in our call to o.limiter.take - always := flowcontrol.NewFakeAlwaysRateLimiter() - o.Builder.TransformRequests( - func(req *rest.Request) { - req.Throttle(always) - }, - ) - - // bandwidth < 0 means error - // bandwidth == 0 means "no limit", we use a nil check to minimize overhead - // bandwidth > 0 means limit accordingly - if o.bandwidth > 0 { - o.limiter = newTokenLimiter(o.bandwidth, o.Workers) - - // disable paging when using a low rate limit to prevent ResourceExpired errors - if o.bandwidth < slowBandwidth { - o.Builder.RequestChunksOf(0) - } - } - - clientConfig, err := f.ToRESTConfig() - if err != nil { - return err - } - - // We do not have a way to access the REST client that dynamic.NewForConfig uses - // Thus we cannot use resource.NewClientWithOptions with our flowcontrol.NewFakeAlwaysRateLimiter - // To avoid any possibility of rate limiting, use an absurdly high burst and QPS - // We handle throttling via our own limiter - clientConfigCopy := rest.CopyConfig(clientConfig) - clientConfigCopy.Burst = 99999 - clientConfigCopy.QPS = 99999 - - o.client, err = dynamic.NewForConfig(clientConfigCopy) - if err != nil { - return err - } - - return nil -} - -func (o MigrateAPIStorageOptions) Validate() error { - if o.bandwidth < 0 { - return fmt.Errorf("invalid value %d for --bandwidth, must be at least 0", o.bandwidth) - } - return o.ResourceOptions.Validate() -} - -func (o MigrateAPIStorageOptions) Run() error { - return o.ResourceOptions.Visitor().Visit(migrate.AlwaysRequiresMigration) -} - -// save invokes the API to alter an object. The reporter passed to this method is the same returned by -// the migration visitor method (for this type, transformAPIStorage). It should return an error -// if the input type cannot be saved. It returns migrate.ErrRecalculate if migration should be re-run -// on the provided object. -func (o *MigrateAPIStorageOptions) save(info *resource.Info, reporter migrate.Reporter) error { - switch oldObject := info.Object.(type) { - case *unstructured.Unstructured: - // a nil limiter means "no limit" - if o.limiter != nil { - // we rate limit after performing all operations to make us less sensitive to conflicts - // use a defer to make sure we always rate limit even if the PUT fails - defer o.rateLimit(oldObject) - } - - // we are relying on unstructured types being lossless and unchanging - // across a decode and encode round trip (otherwise this command will mutate data) - newObject, err := o.client. - Resource(info.Mapping.Resource). - Namespace(info.Namespace). - Update(oldObject, metav1.UpdateOptions{}) - // storage migration is special in that all it needs to do is a no-op update to cause - // the api server to migrate the object to the preferred version. thus if we encounter - // a conflict, we know that something updated the object and we no longer need to do - // anything - if the object needed migration, the api server has already migrated it. - if errors.IsConflict(err) { - return migrate.ErrUnchanged - } - if err != nil { - return migrate.DefaultRetriable(info, err) - } - if newObject.GetResourceVersion() == oldObject.GetResourceVersion() { - return migrate.ErrUnchanged - } - default: - return fmt.Errorf("invalid type %T passed to storage migration: %v", oldObject, oldObject) - } - return nil -} - -func (o *MigrateAPIStorageOptions) rateLimit(oldObject *unstructured.Unstructured) { - // we need to approximate how many bytes this object was on the wire - // the simplest way to do that is to encode it back into bytes - // this is wasteful but we are trying to artificially slow down the worker anyway - var dataLen int - if data, err := oldObject.MarshalJSON(); err != nil { - // this should never happen - klog.Errorf("failed to marshall %#v: %v", oldObject, err) - // but in case it somehow does happen, assume the object was - // larger than most objects so we still rate limit "enough" - dataLen = 8192 - } else { - dataLen = len(data) - } - - // we need to account for the initial list operation which is roughly another PUT per object - // thus we amortize the cost of the list by: - // (1 LIST) / (N items) + 1 PUT == 2 PUTs == 2 * size of data - - // this is a slight overestimate since every retry attempt will still try to account for - // the initial list operation. this should not be an issue since retries are not that common - // and the rate limiting is best effort anyway. going slightly slower is acceptable. - latency := o.limiter.take(2 * dataLen) - // mimic rest.Request.tryThrottle logging logic - if latency > longThrottleLatency { - klog.V(4).Infof("Throttling request took %v, request: %s:%s", latency, "PUT", oldObject.GetSelfLink()) - } -} - -type tokenLimiter struct { - burst int - rateLimiter *rate.Limiter - nowFunc func() time.Time // for unit testing -} - -// take n bytes from the rateLimiter, and sleep if needed -// return the length of the sleep -// is goroutine safe -func (t *tokenLimiter) take(n int) time.Duration { - if n <= 0 { - return 0 - } - // if n > burst, we need to split the reservation otherwise ReserveN will fail - var extra time.Duration - for ; n > t.burst; n -= t.burst { - extra += t.getDuration(t.burst) - } - // calculate the remaining sleep time - total := t.getDuration(n) + extra - time.Sleep(total) - return total -} - -func (t *tokenLimiter) getDuration(n int) time.Duration { - now := t.nowFunc() - reservation := t.rateLimiter.ReserveN(now, n) - if !reservation.OK() { - // this should never happen but we do not want to hang a worker forever - klog.Errorf("unable to get rate limited reservation, burst=%d n=%d", t.burst, n) - return time.Minute - } - return reservation.DelayFrom(now) -} - -// rate limit based on bandwidth after conversion to bytes -// we use a burst value that scales linearly with the number of workers -func newTokenLimiter(bandwidth, workers int) *tokenLimiter { - burst := 100 * kbToBytes * workers // 100 KB of burst per worker - return &tokenLimiter{burst: burst, rateLimiter: rate.NewLimiter(rate.Limit(bandwidth*mbToKB*kbToBytes)/byteToBits, burst), nowFunc: time.Now} -} diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/migrate/storage/storage_test.go b/vendor/github.com/openshift/oc/pkg/cli/admin/migrate/storage/storage_test.go deleted file mode 100644 index ed9e9115d0c9..000000000000 --- a/vendor/github.com/openshift/oc/pkg/cli/admin/migrate/storage/storage_test.go +++ /dev/null @@ -1,93 +0,0 @@ -package storage - -import ( - "reflect" - "testing" - "time" - - "golang.org/x/time/rate" -) - -// Test_tokenLimiter_take makes sure that the -1/0/+1 boundary cases work with burst -// The exact values in "want" are generally irrelevant because they are controlled by rate.Limiter -// They must be kept under one minute to make sure we can detect a reservation failure -func Test_tokenLimiter_take(t *testing.T) { - nowFunc := func() time.Time { - return time.Time{} // current time is always 0 - } - - type args struct { - n int - } - tests := []struct { - name string - burst, limit int - args args - want time.Duration - }{ - { - name: "take 0", - burst: 1, - limit: 1, - args: args{ - n: 0, - }, - want: 0, - }, - { - name: "take -1", - burst: 1, - limit: 1, - args: args{ - n: -1, - }, - want: 0, - }, - { - name: "take -1 based on burst", - burst: 4, - limit: 1, - args: args{ - n: 3, - }, - want: 3 * time.Second, - }, - { - name: "take exact based on burst", - burst: 4, - limit: 1, - args: args{ - n: 4, - }, - want: 4 * time.Second, - }, - { - name: "take +1 based on burst", - burst: 4, - limit: 1, - args: args{ - n: 5, - }, - want: 9 * time.Second, - }, - { - name: "take +many based on burst", - burst: 6, - limit: 100, - args: args{ - n: 21, - }, - want: 570 * time.Millisecond, - }, - } - for _, tt := range tests { - tt := tt // capture range variable - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - limiter := &tokenLimiter{burst: tt.burst, rateLimiter: rate.NewLimiter(rate.Limit(tt.limit), tt.burst), nowFunc: nowFunc} - if got := limiter.take(tt.args.n); !reflect.DeepEqual(got, tt.want) { - t.Errorf("tokenLimiter.take() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/migrate/templateinstances/templateinstances.go b/vendor/github.com/openshift/oc/pkg/cli/admin/migrate/templateinstances/templateinstances.go deleted file mode 100644 index 82f532c2f706..000000000000 --- a/vendor/github.com/openshift/oc/pkg/cli/admin/migrate/templateinstances/templateinstances.go +++ /dev/null @@ -1,173 +0,0 @@ -package templateinstances - -import ( - "fmt" - "sort" - "strings" - - "github.com/spf13/cobra" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/cli-runtime/pkg/resource" - kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" - "k8s.io/kubernetes/pkg/kubectl/util/templates" - - templatev1 "github.com/openshift/api/template/v1" - templatev1typedclient "github.com/openshift/client-go/template/clientset/versioned/typed/template/v1" - "github.com/openshift/oc/pkg/cli/admin/migrate" -) - -type apiType struct { - Kind string - APIVersion string -} - -var ( - transforms = map[apiType]apiType{ - // legacy oapi group - {"DeploymentConfig", "v1"}: {"DeploymentConfig", "apps.openshift.io/v1"}, - {"BuildConfig", "v1"}: {"BuildConfig", "build.openshift.io/v1"}, - {"Build", "v1"}: {"Build", "build.openshift.io/v1"}, - {"Route", "v1"}: {"Route", "route.openshift.io/v1"}, - // legacy oapi group, for the lazy - {"DeploymentConfig", ""}: {"DeploymentConfig", "apps.openshift.io/v1"}, - {"BuildConfig", ""}: {"BuildConfig", "build.openshift.io/v1"}, - {"Build", ""}: {"Build", "build.openshift.io/v1"}, - {"Route", ""}: {"Route", "route.openshift.io/v1"}, - } - - internalMigrateTemplateInstancesLong = templates.LongDesc(fmt.Sprintf(` - Migrate Template Instances to refer to new API groups - - This command locates and updates every Template Instance which refers to a particular - group-version-kind to refer to some other, equivalent group-version-kind. - - The following transformations will occur: - -%s`, prettyPrintMigrations(transforms))) - - internalMigrateTemplateInstancesExample = templates.Examples(` - # Perform a dry-run of updating all objects - %[1]s - - # To actually perform the update, the confirm flag must be appended - %[1]s --confirm`) -) - -func prettyPrintMigrations(versionKinds map[apiType]apiType) string { - lines := make([]string, 0, len(versionKinds)) - for initial, final := range versionKinds { - line := fmt.Sprintf(" - %s.%s --> %s.%s", initial.APIVersion, initial.Kind, final.APIVersion, final.Kind) - lines = append(lines, line) - } - sort.Strings(lines) - - return strings.Join(lines, "\n") -} - -type MigrateTemplateInstancesOptions struct { - templateClient templatev1typedclient.TemplateV1Interface - - migrate.ResourceOptions - - transforms map[apiType]apiType -} - -func NewMigrateTemplateInstancesOptions(streams genericclioptions.IOStreams) *MigrateTemplateInstancesOptions { - return &MigrateTemplateInstancesOptions{ - ResourceOptions: *migrate.NewResourceOptions(streams).WithIncludes([]string{"templateinstance"}).WithAllNamespaces(), - transforms: transforms, - } -} - -// NewCmdMigrateTemplateInstancesAPI implements a MigrateTemplateInstances command -func NewCmdMigrateTemplateInstances(name, fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { - o := NewMigrateTemplateInstancesOptions(streams) - cmd := &cobra.Command{ - Use: name, - Short: "Update TemplateInstances to point to the latest group-version-kinds", - Long: internalMigrateTemplateInstancesLong, - Example: fmt.Sprintf(internalMigrateTemplateInstancesExample, fullName), - Run: func(cmd *cobra.Command, args []string) { - kcmdutil.CheckErr(o.Complete(name, f, cmd, args)) - kcmdutil.CheckErr(o.Validate()) - kcmdutil.CheckErr(o.Run()) - }, - } - o.ResourceOptions.Bind(cmd) - - return cmd -} - -func (o *MigrateTemplateInstancesOptions) Complete(name string, f kcmdutil.Factory, c *cobra.Command, args []string) error { - if len(args) != 0 { - return fmt.Errorf("%s takes no positional arguments", name) - } - - o.ResourceOptions.SaveFn = o.save - if err := o.ResourceOptions.Complete(f, c); err != nil { - return err - } - - clientConfig, err := f.ToRESTConfig() - if err != nil { - return err - } - o.templateClient, err = templatev1typedclient.NewForConfig(clientConfig) - if err != nil { - return err - } - - return nil -} - -func (o MigrateTemplateInstancesOptions) Validate() error { - return o.ResourceOptions.Validate() -} - -func (o MigrateTemplateInstancesOptions) Run() error { - return o.ResourceOptions.Visitor().Visit(func(info *resource.Info) (migrate.Reporter, error) { - return o.checkAndTransform(info.Object) - }) -} - -func (o *MigrateTemplateInstancesOptions) checkAndTransform(templateInstanceRaw runtime.Object) (migrate.Reporter, error) { - templateInstance, wasTI := templateInstanceRaw.(*templatev1.TemplateInstance) - if !wasTI { - return nil, fmt.Errorf("unrecognized object %#v", templateInstanceRaw) - } - - updated := false - for i, obj := range templateInstance.Status.Objects { - if newType, changed := o.transform(obj.Ref); changed { - templateInstance.Status.Objects[i].Ref.Kind = newType.Kind - templateInstance.Status.Objects[i].Ref.APIVersion = newType.APIVersion - updated = true - } - } - - return migrate.ReporterBool(updated), nil -} - -func (o *MigrateTemplateInstancesOptions) transform(ref corev1.ObjectReference) (apiType, bool) { - oldType := apiType{ref.Kind, ref.APIVersion} - if newType, ok := o.transforms[oldType]; ok { - return newType, true - } - return oldType, false -} - -// save invokes the API to alter an object. The reporter passed to this method is the same returned by -// the migration visitor method. It should return an error if the input type cannot be saved -// It returns migrate.ErrRecalculate if migration should be re-run on the provided object. -func (o *MigrateTemplateInstancesOptions) save(info *resource.Info, reporter migrate.Reporter) error { - templateInstance, wasTI := info.Object.(*templatev1.TemplateInstance) - if !wasTI { - return fmt.Errorf("unrecognized object %#v", info.Object) - } - - _, err := o.templateClient.TemplateInstances(templateInstance.Namespace).UpdateStatus(templateInstance) - return migrate.DefaultRetriable(info, err) -} diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/migrate/templateinstances/templateinstances_test.go b/vendor/github.com/openshift/oc/pkg/cli/admin/migrate/templateinstances/templateinstances_test.go deleted file mode 100644 index cad81bcbfc20..000000000000 --- a/vendor/github.com/openshift/oc/pkg/cli/admin/migrate/templateinstances/templateinstances_test.go +++ /dev/null @@ -1,130 +0,0 @@ -package templateinstances - -import ( - "testing" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - templatev1 "github.com/openshift/api/template/v1" -) - -func TestDefaultMigrations(t *testing.T) { - testCases := []struct { - name string - input metav1.TypeMeta - output metav1.TypeMeta - }{ - { - name: "legacy-dc", - input: metav1.TypeMeta{Kind: "DeploymentConfig", APIVersion: "v1"}, - output: metav1.TypeMeta{Kind: "DeploymentConfig", APIVersion: "apps.openshift.io/v1"}, - }, - { - name: "lazy-dc", - input: metav1.TypeMeta{Kind: "DeploymentConfig"}, - output: metav1.TypeMeta{Kind: "DeploymentConfig", APIVersion: "apps.openshift.io/v1"}, - }, - { - name: "ok-dc", - input: metav1.TypeMeta{Kind: "DeploymentConfig", APIVersion: "apps.openshift.io/v1"}, - output: metav1.TypeMeta{Kind: "DeploymentConfig", APIVersion: "apps.openshift.io/v1"}, - }, - { - name: "legacy-bc", - input: metav1.TypeMeta{Kind: "BuildConfig", APIVersion: "v1"}, - output: metav1.TypeMeta{Kind: "BuildConfig", APIVersion: "build.openshift.io/v1"}, - }, - { - name: "lazy-bc", - input: metav1.TypeMeta{Kind: "BuildConfig"}, - output: metav1.TypeMeta{Kind: "BuildConfig", APIVersion: "build.openshift.io/v1"}, - }, - { - name: "ok-bc", - input: metav1.TypeMeta{Kind: "BuildConfig", APIVersion: "build.openshift.io/v1"}, - output: metav1.TypeMeta{Kind: "BuildConfig", APIVersion: "build.openshift.io/v1"}, - }, - { - name: "legacy-build", - input: metav1.TypeMeta{Kind: "Build", APIVersion: "v1"}, - output: metav1.TypeMeta{Kind: "Build", APIVersion: "build.openshift.io/v1"}, - }, - { - name: "lazy-build", - input: metav1.TypeMeta{Kind: "Build"}, - output: metav1.TypeMeta{Kind: "Build", APIVersion: "build.openshift.io/v1"}, - }, - { - name: "ok-build", - input: metav1.TypeMeta{Kind: "Build", APIVersion: "build.openshift.io/v1"}, - output: metav1.TypeMeta{Kind: "Build", APIVersion: "build.openshift.io/v1"}, - }, - { - name: "legacy-route", - input: metav1.TypeMeta{Kind: "Route", APIVersion: "v1"}, - output: metav1.TypeMeta{Kind: "Route", APIVersion: "route.openshift.io/v1"}, - }, - { - name: "lazy-route", - input: metav1.TypeMeta{Kind: "Route"}, - output: metav1.TypeMeta{Kind: "Route", APIVersion: "route.openshift.io/v1"}, - }, - { - name: "ok-route", - input: metav1.TypeMeta{Kind: "Route", APIVersion: "route.openshift.io/v1"}, - output: metav1.TypeMeta{Kind: "Route", APIVersion: "route.openshift.io/v1"}, - }, - { - name: "legacy-other", - input: metav1.TypeMeta{Kind: "Cheddar", APIVersion: "v1"}, - output: metav1.TypeMeta{Kind: "Cheddar", APIVersion: "v1"}, - }, - { - name: "ok-other", - input: metav1.TypeMeta{Kind: "Cheddar", APIVersion: "cheese/v1alpha1"}, - output: metav1.TypeMeta{Kind: "Cheddar", APIVersion: "cheese/v1alpha1"}, - }, - } - - opts := MigrateTemplateInstancesOptions{ - transforms: transforms, - } - - for _, tc := range testCases { - tc := tc // copy the iteration variable to a non-iteration memory location - t.Run(tc.name, func(t *testing.T) { - oldTI := &templatev1.TemplateInstance{ - Status: templatev1.TemplateInstanceStatus{ - Objects: []templatev1.TemplateInstanceObject{ - { - Ref: corev1.ObjectReference{ - APIVersion: tc.input.APIVersion, - Kind: tc.input.Kind, - Name: tc.name, - }, - }, - }, - }, - } - - reporter, err := opts.checkAndTransform(oldTI) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - - expectedChanged := tc.input != tc.output - if reporter.Changed() != expectedChanged { - t.Errorf("expected changed to be: %v, but changed=%v", expectedChanged, reporter.Changed()) - } - newVersionKind := metav1.TypeMeta{ - APIVersion: oldTI.Status.Objects[0].Ref.APIVersion, - Kind: oldTI.Status.Objects[0].Ref.Kind, - } - if newVersionKind != tc.output { - t.Errorf("expected the template instance to be updated to %v, yet it ended up as %v", tc.output, newVersionKind) - } - }) - - } -} diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/mustgather/mustgather.go b/vendor/github.com/openshift/oc/pkg/cli/admin/mustgather/mustgather.go deleted file mode 100644 index 4868d29a93dd..000000000000 --- a/vendor/github.com/openshift/oc/pkg/cli/admin/mustgather/mustgather.go +++ /dev/null @@ -1,389 +0,0 @@ -package mustgather - -import ( - "fmt" - "math/rand" - "path" - "time" - - "github.com/spf13/cobra" - - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/cli-runtime/pkg/printers" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - "k8s.io/kubernetes/pkg/kubectl/cmd/logs" - kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" - "k8s.io/kubernetes/pkg/kubectl/polymorphichelpers" - "k8s.io/kubernetes/pkg/kubectl/scheme" - "k8s.io/kubernetes/pkg/kubectl/util/templates" - - v1 "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1" - "github.com/openshift/library-go/pkg/image/imageutil" - "github.com/openshift/library-go/pkg/operator/resource/retry" - "github.com/openshift/oc/pkg/cli/rsync" -) - -var ( - mustGatherLong = templates.LongDesc(` - Launch a pod to gather debugging information - - This command will launch a pod in a temporary namespace on your - cluster that gathers debugging information, using a copy of the active - client config context, and then downloads the gathered information. - - Experimental: This command is under active development and may change without notice. - `) - - mustGatherExample = templates.Examples(` - # gather default information using the default image and command, writing into ./must-gather.local. - oc adm must-gather - - # gather default information with a specific local folder to copy to - oc adm must-gather --dest-dir=/local/directory - - # gather default information using a specific image, command, and pod-dir - oc adm must-gather --image=my/image:tag --source-dir=/pod/directory -- myspecial-command.sh - `) -) - -func NewMustGatherCommand(f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { - o := NewMustGatherOptions(streams) - rsyncCommand := rsync.NewCmdRsync(rsync.RsyncRecommendedName, "", f, streams) - cmd := &cobra.Command{ - Use: "must-gather", - Short: "Launch a new instance of a pod for gathering debug information", - Long: mustGatherLong, - Example: mustGatherExample, - Run: func(cmd *cobra.Command, args []string) { - kcmdutil.CheckErr(o.Complete(f, cmd, args)) - kcmdutil.CheckErr(o.Run(rsyncCommand)) - }, - } - - cmd.Flags().StringVar(&o.NodeName, "node-name", o.NodeName, "Set a specific node to use - by default a random master will be used") - cmd.Flags().StringVar(&o.Image, "image", o.Image, "Set a specific image to use, by default the OpenShift's must-gather image will be used.") - cmd.Flags().StringVar(&o.DestDir, "dest-dir", o.DestDir, "Set a specific directory on the local machine to write gathered data to.") - cmd.Flags().StringVar(&o.SourceDir, "source-dir", o.SourceDir, "Set the specific directory on the pod copy the gathered data from.") - cmd.Flags().BoolVar(&o.Keep, "keep", o.Keep, "Do not delete temporary resources when command completes.") - cmd.Flags().MarkHidden("keep") - - return cmd -} - -func NewMustGatherOptions(streams genericclioptions.IOStreams) *MustGatherOptions { - return &MustGatherOptions{ - SourceDir: "/must-gather/", - IOStreams: streams, - } -} - -func (o *MustGatherOptions) Complete(f kcmdutil.Factory, cmd *cobra.Command, args []string) error { - if i := cmd.ArgsLenAtDash(); i != -1 && i < len(args) { - o.Command = args[i:] - } else { - o.Command = args - } - o.RESTClientGetter = f - var err error - if o.Config, err = f.ToRESTConfig(); err != nil { - return err - } - if o.Client, err = kubernetes.NewForConfig(o.Config); err != nil { - return err - } - if len(o.DestDir) == 0 { - o.DestDir = fmt.Sprintf("must-gather.local.%06d", rand.Int63()) - } - if len(o.Image) == 0 { - if o.Image, err = o.resolveMustGatherImage(); err != nil { - o.Image = "quay.io/openshift/origin-must-gather:latest" - fmt.Fprintf(o.Out, "%v\n", err) - } - } - fmt.Fprintf(o.Out, "Using image: %s\n", o.Image) - o.PrinterCreated, err = printers.NewTypeSetter(scheme.Scheme).WrapToPrinter(&printers.NamePrinter{Operation: "created"}, nil) - if err != nil { - return err - } - o.PrinterDeleted, err = printers.NewTypeSetter(scheme.Scheme).WrapToPrinter(&printers.NamePrinter{Operation: "deleted"}, nil) - if err != nil { - return err - } - o.RsyncRshCmd = rsync.DefaultRsyncRemoteShellToUse(cmd.Parent()) - return nil -} - -func (o *MustGatherOptions) resolveMustGatherImage() (string, error) { - imageClient, err := v1.NewForConfig(o.Config) - if err != nil { - return "", err - } - imageStream, err := imageClient.ImageStreams("openshift").Get("must-gather", metav1.GetOptions{}) - if err != nil { - return "", err - } - var image string - var ok bool - if image, ok = imageutil.ResolveLatestTaggedImage(imageStream, "latest"); !ok { - return "", fmt.Errorf("unable to resolve the openshift imagestream tag must-gather:latest") - } - return image, nil -} - -type MustGatherOptions struct { - genericclioptions.IOStreams - - Config *rest.Config - Client kubernetes.Interface - RESTClientGetter genericclioptions.RESTClientGetter - - NodeName string - DestDir string - SourceDir string - Image string - Command []string - Keep bool - - RsyncRshCmd string - - PrinterCreated printers.ResourcePrinter - PrinterDeleted printers.ResourcePrinter -} - -// Run creates and runs a must-gather pod.d -func (o *MustGatherOptions) Run(rsyncCmd *cobra.Command) error { - if len(o.Image) == 0 { - return fmt.Errorf("missing an image") - } - - var err error - - // create namespace - ns, err := o.Client.CoreV1().Namespaces().Create(&corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "openshift-must-gather-", - Labels: map[string]string{ - "openshift.io/run-level": "0", - }, - Annotations: map[string]string{ - "oc.openshift.io/command": "oc adm must-gather", - }, - }, - }) - if err != nil { - return err - } - o.PrinterCreated.PrintObj(ns, o.Out) - if !o.Keep { - defer func() { - if err := o.Client.CoreV1().Namespaces().Delete(ns.Name, nil); err != nil { - fmt.Printf("%v", err) - return - } - o.PrinterDeleted.PrintObj(ns, o.Out) - }() - } - - clusterRoleBinding, err := o.Client.RbacV1().ClusterRoleBindings().Create(o.newClusterRoleBinding(ns.Name)) - if err != nil { - return err - } - o.PrinterCreated.PrintObj(clusterRoleBinding, o.Out) - if !o.Keep { - defer func() { - if err := o.Client.RbacV1().ClusterRoleBindings().Delete(clusterRoleBinding.Name, &metav1.DeleteOptions{}); err != nil { - fmt.Printf("%v", err) - return - } - o.PrinterDeleted.PrintObj(clusterRoleBinding, o.Out) - }() - } - - // create pod - pod, err := o.Client.CoreV1().Pods(ns.Name).Create(o.newPod(o.NodeName)) - if err != nil { - return err - } - - // wait for gather container to be running (gather is running) - if err := o.waitForGatherContainerRunning(pod); err != nil { - return err - } - - // stream gather container logs - if err := o.getInitContainerLogs(pod); err != nil { - fmt.Fprintf(o.Out, "container logs unavailable: %v\n", err) - } - - // wait for pod to be running (gather has completed) - if err := o.waitForPodRunning(pod); err != nil { - return err - } - - // copy the gathered files into the local destination dir - err = o.copyFilesFromPod(pod) - return err -} - -func (o *MustGatherOptions) copyFilesFromPod(pod *corev1.Pod) error { - rsyncOptions := &rsync.RsyncOptions{ - Namespace: pod.Namespace, - Source: &rsync.PathSpec{PodName: pod.Name, Path: path.Clean(o.SourceDir) + "/"}, - ContainerName: "copy", - Destination: &rsync.PathSpec{PodName: "", Path: o.DestDir}, - Client: o.Client, - Config: o.Config, - RshCmd: fmt.Sprintf("%s --namespace=%s", o.RsyncRshCmd, pod.Namespace), - IOStreams: o.IOStreams, - } - rsyncOptions.Strategy = rsync.NewDefaultCopyStrategy(rsyncOptions) - return rsyncOptions.RunRsync() - -} - -func (o *MustGatherOptions) getInitContainerLogs(pod *corev1.Pod) error { - return (&logs.LogsOptions{ - Namespace: pod.Namespace, - ResourceArg: pod.Name, - Options: &corev1.PodLogOptions{ - Follow: true, - Container: pod.Spec.InitContainers[0].Name, - }, - RESTClientGetter: o.RESTClientGetter, - Object: pod, - ConsumeRequestFn: logs.DefaultConsumeRequest, - LogsForObject: polymorphichelpers.LogsForObjectFn, - IOStreams: genericclioptions.IOStreams{Out: o.Out}, - }).RunLogs() -} - -func (o *MustGatherOptions) waitForPodRunning(pod *corev1.Pod) error { - phase := pod.Status.Phase - err := wait.PollImmediate(time.Second, 10*time.Minute, func() (bool, error) { - var err error - if pod, err = o.Client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}); err != nil { - return false, nil - } - phase = pod.Status.Phase - return phase != corev1.PodPending, nil - }) - if err != nil { - return err - } - if phase != corev1.PodRunning { - return fmt.Errorf("pod is not running: %v\n", phase) - } - return nil -} - -func (o *MustGatherOptions) waitForGatherContainerRunning(pod *corev1.Pod) error { - return wait.PollImmediate(time.Second, 10*time.Minute, func() (bool, error) { - var err error - if pod, err = o.Client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}); err == nil { - if len(pod.Status.InitContainerStatuses) == 0 { - return false, nil - } - state := pod.Status.InitContainerStatuses[0].State - running := state.Running != nil - terminated := state.Terminated != nil - return running || terminated, nil - } - if retry.IsHTTPClientError(err) { - return false, nil - } - return false, err - }) -} - -func (o *MustGatherOptions) newClusterRoleBinding(ns string) *rbacv1.ClusterRoleBinding { - return &rbacv1.ClusterRoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "must-gather-", - Annotations: map[string]string{ - "oc.openshift.io/command": "oc adm must-gather", - }, - }, - RoleRef: rbacv1.RoleRef{ - APIGroup: "rbac.authorization.k8s.io", - Kind: "ClusterRole", - Name: "cluster-admin", - }, - Subjects: []rbacv1.Subject{ - { - Kind: "ServiceAccount", - Name: "default", - Namespace: ns, - }, - }, - } -} - -// newPod creates a pod with 2 containers with a shared volume mount: -// - gather: init container that runs gather command -// - copy: no-op container we can exec into -func (o *MustGatherOptions) newPod(node string) *corev1.Pod { - zero := int64(0) - ret := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "must-gather-", - Labels: map[string]string{ - "app": "must-gather", - }, - }, - Spec: corev1.PodSpec{ - NodeName: node, - RestartPolicy: corev1.RestartPolicyNever, - Volumes: []corev1.Volume{ - { - Name: "must-gather-output", - VolumeSource: corev1.VolumeSource{ - EmptyDir: &corev1.EmptyDirVolumeSource{}, - }, - }, - }, - InitContainers: []corev1.Container{ - { - Name: "gather", - Image: o.Image, - Command: []string{"/usr/bin/gather"}, - VolumeMounts: []corev1.VolumeMount{ - { - Name: "must-gather-output", - MountPath: path.Clean(o.SourceDir), - ReadOnly: false, - }, - }, - }, - }, - Containers: []corev1.Container{ - { - Name: "copy", - Image: o.Image, - Command: []string{"/bin/bash", "-c", "trap : TERM INT; sleep infinity & wait"}, - VolumeMounts: []corev1.VolumeMount{ - { - Name: "must-gather-output", - MountPath: path.Clean(o.SourceDir), - ReadOnly: false, - }, - }, - }, - }, - TerminationGracePeriodSeconds: &zero, - Tolerations: []corev1.Toleration{ - { - Operator: "Exists", - }, - }, - }, - } - if len(o.Command) > 0 { - ret.Spec.InitContainers[0].Command = o.Command - } - return ret -} diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/network/isolate_projects.go b/vendor/github.com/openshift/oc/pkg/cli/admin/network/isolate_projects.go deleted file mode 100644 index 61e932c59ebe..000000000000 --- a/vendor/github.com/openshift/oc/pkg/cli/admin/network/isolate_projects.go +++ /dev/null @@ -1,93 +0,0 @@ -package network - -import ( - "fmt" - - "github.com/spf13/cobra" - - kerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/cli-runtime/pkg/genericclioptions" - kapi "k8s.io/kubernetes/pkg/apis/core" - kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" - "k8s.io/kubernetes/pkg/kubectl/util/templates" - - "github.com/openshift/library-go/pkg/network/networkapihelpers" - "github.com/openshift/library-go/pkg/network/networkutils" -) - -const IsolateProjectsNetworkCommandName = "isolate-projects" - -var ( - isolateProjectsNetworkLong = templates.LongDesc(` - Isolate project network - - Allows projects to isolate their network from other projects when using the %[1]s network plugin.`) - - isolateProjectsNetworkExample = templates.Examples(` - # Provide isolation for project p1 - %[1]s - - # Allow all projects with label name=top-secret to have their own isolated project network - %[1]s --selector='name=top-secret'`) -) - -type IsolateOptions struct { - Options *ProjectOptions -} - -func NewIsolateOptions(streams genericclioptions.IOStreams) *IsolateOptions { - return &IsolateOptions{ - Options: NewProjectOptions(streams), - } -} - -func NewCmdIsolateProjectsNetwork(commandName, fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { - o := NewIsolateOptions(streams) - cmd := &cobra.Command{ - Use: commandName, - Short: "Isolate project network", - Long: fmt.Sprintf(isolateProjectsNetworkLong, networkutils.MultiTenantPluginName), - Example: fmt.Sprintf(isolateProjectsNetworkExample, fullName), - Run: func(c *cobra.Command, args []string) { - kcmdutil.CheckErr(o.Complete(f, c, args)) - kcmdutil.CheckErr(o.Validate()) - kcmdutil.CheckErr(o.Run()) - }, - } - - // Common optional params - cmd.Flags().StringVar(&o.Options.Selector, "selector", o.Options.Selector, "Label selector to filter projects. Either pass one/more projects as arguments or use this project selector") - - return cmd -} - -func (o *IsolateOptions) Complete(f kcmdutil.Factory, c *cobra.Command, args []string) error { - if err := o.Options.Complete(f, c, args); err != nil { - return err - } - o.Options.CheckSelector = c.Flag("selector").Changed - return nil -} - -func (o *IsolateOptions) Validate() error { - return o.Options.Validate() -} - -func (o *IsolateOptions) Run() error { - projects, err := o.Options.GetProjects() - if err != nil { - return err - } - - errList := []error{} - for _, project := range projects { - if project.Name == kapi.NamespaceDefault { - errList = append(errList, fmt.Errorf("network isolation for project %q is forbidden", project.Name)) - continue - } - if err = o.Options.UpdatePodNetwork(project.Name, networkapihelpers.IsolatePodNetwork, ""); err != nil { - errList = append(errList, fmt.Errorf("network isolation for project %q failed, error: %v", project.Name, err)) - } - } - return kerrors.NewAggregate(errList) -} diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/network/join_projects.go b/vendor/github.com/openshift/oc/pkg/cli/admin/network/join_projects.go deleted file mode 100644 index 97d8a738aaa8..000000000000 --- a/vendor/github.com/openshift/oc/pkg/cli/admin/network/join_projects.go +++ /dev/null @@ -1,111 +0,0 @@ -package network - -import ( - "errors" - "fmt" - - "github.com/spf13/cobra" - - kerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/cli-runtime/pkg/genericclioptions" - kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" - "k8s.io/kubernetes/pkg/kubectl/util/templates" - - "github.com/openshift/library-go/pkg/network/networkapihelpers" - "github.com/openshift/library-go/pkg/network/networkutils" -) - -const JoinProjectsNetworkCommandName = "join-projects" - -var ( - joinProjectsNetworkLong = templates.LongDesc(` - Join project network - - Allows projects to join existing project network when using the %[1]s network plugin.`) - - joinProjectsNetworkExample = templates.Examples(` - # Allow project p2 to use project p1 network - %[1]s --to= - - # Allow all projects with label name=top-secret to use project p1 network - %[1]s --to= --selector='name=top-secret'`) -) - -type JoinOptions struct { - Options *ProjectOptions - JoinProject *ProjectOptions - - joinProjectName string -} - -func NewJoinOptions(streams genericclioptions.IOStreams) *JoinOptions { - return &JoinOptions{ - Options: NewProjectOptions(streams), - JoinProject: NewProjectOptions(streams), - } -} - -func NewCmdJoinProjectsNetwork(commandName, fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { - o := NewJoinOptions(streams) - cmd := &cobra.Command{ - Use: commandName, - Short: "Join project network", - Long: fmt.Sprintf(joinProjectsNetworkLong, networkutils.MultiTenantPluginName), - Example: fmt.Sprintf(joinProjectsNetworkExample, fullName), - Run: func(c *cobra.Command, args []string) { - kcmdutil.CheckErr(o.Complete(f, c, args)) - kcmdutil.CheckErr(o.Validate()) - kcmdutil.CheckErr(o.Run()) - }, - } - // Supported operations - cmd.Flags().StringVar(&o.joinProjectName, "to", o.joinProjectName, "Join network of the given project name") - - // Common optional params - cmd.Flags().StringVar(&o.Options.Selector, "selector", o.Options.Selector, "Label selector to filter projects. Either pass one/more projects as arguments or use this project selector") - - return cmd -} - -func (o *JoinOptions) Complete(f kcmdutil.Factory, c *cobra.Command, args []string) error { - if err := o.Options.Complete(f, c, args); err != nil { - return err - } - if err := o.JoinProject.Complete(f, c, []string{o.joinProjectName}); err != nil { - return err - } - o.Options.CheckSelector = c.Flag("selector").Changed - return nil -} - -func (o *JoinOptions) Validate() error { - errList := []error{} - if err := o.Options.Validate(); err != nil { - errList = append(errList, err) - } - if len(o.joinProjectName) == 0 { - errList = append(errList, errors.New("must provide --to=")) - } - return kerrors.NewAggregate(errList) -} - -func (o *JoinOptions) Run() error { - projects, err := o.Options.GetProjects() - if err != nil { - return err - } - _, err = o.JoinProject.GetProjects() - if err != nil { - return err - } - - errList := []error{} - for _, project := range projects { - if project.Name != o.joinProjectName { - if err = o.Options.UpdatePodNetwork(project.Name, networkapihelpers.JoinPodNetwork, o.joinProjectName); err != nil { - errList = append(errList, fmt.Errorf("project %q failed to join %q, error: %v", project.Name, o.joinProjectName, err)) - } - } - } - return kerrors.NewAggregate(errList) -} diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/network/make_projects_global.go b/vendor/github.com/openshift/oc/pkg/cli/admin/network/make_projects_global.go deleted file mode 100644 index ecd5818e3c27..000000000000 --- a/vendor/github.com/openshift/oc/pkg/cli/admin/network/make_projects_global.go +++ /dev/null @@ -1,87 +0,0 @@ -package network - -import ( - "fmt" - - "github.com/spf13/cobra" - - kerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/cli-runtime/pkg/genericclioptions" - kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" - "k8s.io/kubernetes/pkg/kubectl/util/templates" - - "github.com/openshift/library-go/pkg/network/networkapihelpers" - "github.com/openshift/library-go/pkg/network/networkutils" -) - -const MakeGlobalProjectsNetworkCommandName = "make-projects-global" - -var ( - makeGlobalProjectsNetworkLong = templates.LongDesc(` - Make project network global - - Allows projects to access all pods in the cluster and vice versa when using the %[1]s network plugin.`) - - makeGlobalProjectsNetworkExample = templates.Examples(` - # Allow project p1 to access all pods in the cluster and vice versa - %[1]s - - # Allow all projects with label name=share to access all pods in the cluster and vice versa - %[1]s --selector='name=share'`) -) - -type MakeGlobalOptions struct { - Options *ProjectOptions -} - -func NewMakeGlobalOptions(streams genericclioptions.IOStreams) *MakeGlobalOptions { - return &MakeGlobalOptions{ - Options: NewProjectOptions(streams), - } -} - -func NewCmdMakeGlobalProjectsNetwork(commandName, fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { - o := NewMakeGlobalOptions(streams) - cmd := &cobra.Command{ - Use: commandName, - Short: "Make project network global", - Long: fmt.Sprintf(makeGlobalProjectsNetworkLong, networkutils.MultiTenantPluginName), - Example: fmt.Sprintf(makeGlobalProjectsNetworkExample, fullName), - Run: func(c *cobra.Command, args []string) { - kcmdutil.CheckErr(o.Complete(f, c, args)) - kcmdutil.CheckErr(o.Validate()) - kcmdutil.CheckErr(o.Run()) - }, - } - - // Common optional params - cmd.Flags().StringVar(&o.Options.Selector, "selector", o.Options.Selector, "Label selector to filter projects. Either pass one/more projects as arguments or use this project selector") - - return cmd -} -func (o *MakeGlobalOptions) Complete(f kcmdutil.Factory, c *cobra.Command, args []string) error { - if err := o.Options.Complete(f, c, args); err != nil { - return err - } - o.Options.CheckSelector = c.Flag("selector").Changed - return nil -} - -func (o *MakeGlobalOptions) Validate() error { - return o.Options.Validate() -} - -func (o *MakeGlobalOptions) Run() error { - projects, err := o.Options.GetProjects() - if err != nil { - return err - } - - errList := []error{} - for _, project := range projects { - if err = o.Options.UpdatePodNetwork(project.Name, networkapihelpers.GlobalPodNetwork, ""); err != nil { - errList = append(errList, fmt.Errorf("removing network isolation for project %q failed, error: %v", project.Name, err)) - } - } - return kerrors.NewAggregate(errList) -} diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/network/pod_network.go b/vendor/github.com/openshift/oc/pkg/cli/admin/network/pod_network.go deleted file mode 100644 index 2b4434677702..000000000000 --- a/vendor/github.com/openshift/oc/pkg/cli/admin/network/pod_network.go +++ /dev/null @@ -1,33 +0,0 @@ -package network - -import ( - "github.com/spf13/cobra" - - "k8s.io/cli-runtime/pkg/genericclioptions" - kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" - "k8s.io/kubernetes/pkg/kubectl/util/templates" -) - -const PodNetworkCommandName = "pod-network" - -var ( - podNetworkLong = templates.LongDesc(` - Manage pod network in the cluster - - This command provides common pod network operations for administrators.`) -) - -func NewCmdPodNetwork(name, fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { - // Parent command to which all subcommands are added. - cmds := &cobra.Command{ - Use: name, - Short: "Manage pod network", - Long: podNetworkLong, - Run: kcmdutil.DefaultSubCommandRun(streams.ErrOut), - } - - cmds.AddCommand(NewCmdJoinProjectsNetwork(JoinProjectsNetworkCommandName, fullName+" "+JoinProjectsNetworkCommandName, f, streams)) - cmds.AddCommand(NewCmdMakeGlobalProjectsNetwork(MakeGlobalProjectsNetworkCommandName, fullName+" "+MakeGlobalProjectsNetworkCommandName, f, streams)) - cmds.AddCommand(NewCmdIsolateProjectsNetwork(IsolateProjectsNetworkCommandName, fullName+" "+IsolateProjectsNetworkCommandName, f, streams)) - return cmds -} diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/network/project_options.go b/vendor/github.com/openshift/oc/pkg/cli/admin/network/project_options.go deleted file mode 100644 index b793f2c0c294..000000000000 --- a/vendor/github.com/openshift/oc/pkg/cli/admin/network/project_options.go +++ /dev/null @@ -1,205 +0,0 @@ -package network - -import ( - "errors" - "fmt" - "reflect" - "strings" - "time" - - "github.com/spf13/cobra" - - kapierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - kerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/cli-runtime/pkg/resource" - "k8s.io/client-go/kubernetes" - kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" - "k8s.io/kubernetes/pkg/kubectl/scheme" - - networkv1 "github.com/openshift/api/network/v1" - projectv1 "github.com/openshift/api/project/v1" - networkv1typedclient "github.com/openshift/client-go/network/clientset/versioned/typed/network/v1" - "github.com/openshift/library-go/pkg/network/networkapihelpers" - "github.com/openshift/library-go/pkg/network/networkutils" -) - -type ProjectOptions struct { - DefaultNamespace string - NetClient networkv1typedclient.NetworkV1Interface - KubeClient kubernetes.Interface - - Builder *resource.Builder - - ProjectNames []string - - // Common optional params - Selector string - CheckSelector bool - - genericclioptions.IOStreams -} - -func NewProjectOptions(streams genericclioptions.IOStreams) *ProjectOptions { - return &ProjectOptions{ - IOStreams: streams, - } -} - -func (p *ProjectOptions) Complete(f kcmdutil.Factory, c *cobra.Command, args []string) error { - var err error - p.DefaultNamespace, _, err = f.ToRawKubeConfigLoader().Namespace() - if err != nil { - return err - } - clientConfig, err := f.ToRESTConfig() - if err != nil { - return err - } - p.KubeClient, err = kubernetes.NewForConfig(clientConfig) - if err != nil { - return err - } - p.NetClient, err = networkv1typedclient.NewForConfig(clientConfig) - if err != nil { - return err - } - - p.Builder = f.NewBuilder() - p.ProjectNames = []string{} - if len(args) != 0 { - p.ProjectNames = append(p.ProjectNames, args...) - } - return nil -} - -// Common validations -func (p *ProjectOptions) Validate() error { - errList := []error{} - if p.CheckSelector { - if len(p.Selector) > 0 { - if _, err := labels.Parse(p.Selector); err != nil { - errList = append(errList, errors.New("--selector= must be a valid label selector")) - } - } - if len(p.ProjectNames) != 0 { - errList = append(errList, errors.New("either specify --selector= or projects but not both")) - } - } else if len(p.ProjectNames) == 0 { - errList = append(errList, errors.New("must provide --selector= or projects")) - } - - clusterNetwork, err := p.NetClient.ClusterNetworks().Get(networkv1.ClusterNetworkDefault, metav1.GetOptions{}) - if err != nil { - if kapierrors.IsNotFound(err) { - errList = append(errList, errors.New("managing pod network is only supported for openshift multitenant network plugin")) - } else { - errList = append(errList, errors.New("failed to fetch current network plugin info")) - } - } else if !isOpenShiftMultitenantNetworkPlugin(clusterNetwork.PluginName) { - errList = append(errList, fmt.Errorf("using plugin: %q, managing pod network is only supported for openshift multitenant network plugin", clusterNetwork.PluginName)) - } - - return kerrors.NewAggregate(errList) -} - -func isOpenShiftMultitenantNetworkPlugin(pluginName string) bool { - if strings.ToLower(pluginName) == networkutils.MultiTenantPluginName { - return true - } - return false -} - -func (p *ProjectOptions) GetProjects() ([]*projectv1.Project, error) { - nameArgs := []string{"projects"} - if len(p.ProjectNames) != 0 { - nameArgs = append(nameArgs, p.ProjectNames...) - } - - r := p.Builder. - WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...). - ContinueOnError(). - NamespaceParam(p.DefaultNamespace). - LabelSelectorParam(p.Selector). - ResourceTypeOrNameArgs(true, nameArgs...). - Flatten(). - Do() - if r.Err() != nil { - return nil, r.Err() - } - - errList := []error{} - projectList := []*projectv1.Project{} - _ = r.Visit(func(info *resource.Info, err error) error { - if err != nil { - return err - } - project, ok := info.Object.(*projectv1.Project) - if !ok { - err := fmt.Errorf("cannot convert input to Project: %v", reflect.TypeOf(info.Object)) - errList = append(errList, err) - // Don't bail out if one project fails - return nil - } - projectList = append(projectList, project) - return nil - }) - if len(errList) != 0 { - return projectList, kerrors.NewAggregate(errList) - } - - if len(projectList) == 0 { - return projectList, fmt.Errorf("no projects found") - } else { - givenProjectNames := sets.NewString(p.ProjectNames...) - foundProjectNames := sets.String{} - for _, project := range projectList { - foundProjectNames.Insert(project.ObjectMeta.Name) - } - skippedProjectNames := givenProjectNames.Difference(foundProjectNames) - if skippedProjectNames.Len() > 0 { - return projectList, fmt.Errorf("projects %v not found", strings.Join(skippedProjectNames.List(), ", ")) - } - } - return projectList, nil -} - -func (p *ProjectOptions) UpdatePodNetwork(nsName string, action networkapihelpers.PodNetworkAction, args string) error { - // Get corresponding NetNamespace for given namespace - netns, err := p.NetClient.NetNamespaces().Get(nsName, metav1.GetOptions{}) - if err != nil { - return err - } - - // Apply pod network change intent - networkapihelpers.SetChangePodNetworkAnnotation(netns, action, args) - - // Update NetNamespace object - _, err = p.NetClient.NetNamespaces().Update(netns) - if err != nil { - return err - } - - // Validate SDN controller applied or rejected the intent - backoff := wait.Backoff{ - Steps: 15, - Duration: 500 * time.Millisecond, - Factor: 1.1, - } - return wait.ExponentialBackoff(backoff, func() (bool, error) { - updatedNetNs, err := p.NetClient.NetNamespaces().Get(netns.NetName, metav1.GetOptions{}) - if err != nil { - return false, err - } - - if _, _, err = networkapihelpers.GetChangePodNetworkAnnotation(updatedNetNs); err == networkapihelpers.ErrorPodNetworkAnnotationNotFound { - return true, nil - } - // Pod network change not applied yet - return false, nil - }) -} diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/node/logs.go b/vendor/github.com/openshift/oc/pkg/cli/admin/node/logs.go deleted file mode 100644 index 648721d42032..000000000000 --- a/vendor/github.com/openshift/oc/pkg/cli/admin/node/logs.go +++ /dev/null @@ -1,538 +0,0 @@ -package node - -import ( - "bufio" - "bytes" - "compress/gzip" - "fmt" - "io" - "regexp" - "sort" - "strconv" - "strings" - - "github.com/spf13/cobra" - - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/selection" - "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/cli-runtime/pkg/resource" - "k8s.io/client-go/rest" - kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" - "k8s.io/kubernetes/pkg/kubectl/scheme" - "k8s.io/kubernetes/pkg/kubectl/util/templates" -) - -var ( - logsLong = templates.LongDesc(` - Display and filter node logs - - This command retrieves logs for the node. The default mode is to query the - systemd journal on supported operating systems, which allows searching, time - based filtering, and unit based filtering. You may also use the --path argument - to see a list of log files available under /var/logs/ and view those contents - directly. - - Node logs may contain sensitive output and so are limited to privileged node - administrators. The system:node-admins role grants this permission by default. - You check who has that permission via: - - $ oc adm policy who-can --all-namespaces get nodes/log - `) - - logsExample = templates.Examples(` - # Show kubelet logs from all masters - %[1]s node-logs --role master -u kubelet - - # See what logs are available in masters in /var/logs - %[1]s node-logs --role master --path=/ - - # Display cron log file from all masters - %[1]s node-logs --role master --path=cron - `) -) - -// LogsOptions holds all the necessary options for running oc adm node-logs. -type LogsOptions struct { - Resources []string - Selector string - Role string - - // the log path to fetch - Path string - - // --path=journal specific arguments - Grep string - GrepCaseSensitive bool - Units []string - SinceTime string - UntilTime string - Tail int - Output string - - // output format arguments - Raw bool - Unify bool - - RESTClientGetter func(mapping *meta.RESTMapping) (resource.RESTClient, error) - Builder *resource.Builder - - genericclioptions.IOStreams -} - -func NewLogsOptions(streams genericclioptions.IOStreams) *LogsOptions { - return &LogsOptions{ - Path: "journal", - IOStreams: streams, - GrepCaseSensitive: true, - } -} - -// NewCmdLogs creates a new logs command that supports OpenShift resources. -func NewCmdLogs(baseName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { - o := NewLogsOptions(streams) - cmd := &cobra.Command{ - Use: "node-logs [-l LABELS] [NODE...]", - DisableFlagsInUseLine: true, - Short: "Display and filter node logs", - Long: logsLong, - Example: fmt.Sprintf(logsExample, baseName), - Run: func(cmd *cobra.Command, args []string) { - kcmdutil.CheckErr(o.Complete(f, cmd, args)) - kcmdutil.CheckErr(o.Validate()) - kcmdutil.CheckErr(o.RunLogs()) - }, - } - - cmd.Flags().StringVar(&o.Path, "path", o.Path, "Retrieve the specified path within the node's /var/logs/ folder. The 'journal' value will allow querying the journal on supported operating systems.") - - cmd.Flags().StringSliceVarP(&o.Units, "unit", "u", o.Units, "Return log entries from the specified unit(s). Only applies to node journal logs.") - cmd.Flags().StringVarP(&o.Grep, "grep", "g", o.Grep, "Filter log entries by the provided regex pattern. Only applies to node journal logs.") - cmd.Flags().BoolVar(&o.GrepCaseSensitive, "case-sensitive", o.GrepCaseSensitive, "Filters are case sensitive by default. Pass --case-sensitive=false to do a case insensitive filter.") - cmd.Flags().StringVar(&o.SinceTime, "since", o.SinceTime, "Return logs after a specific ISO timestamp or relative date. Only applies to node journal logs.") - cmd.Flags().StringVar(&o.UntilTime, "until", o.UntilTime, "Return logs before a specific ISO timestamp or relative date. Only applies to node journal logs.") - cmd.Flags().StringVarP(&o.Output, "output", "o", o.Output, "Display journal logs in an alternate format (short, cat, json, short-unix). Only applies to node journal logs.") - cmd.Flags().IntVar(&o.Tail, "tail", o.Tail, "Return up to this many lines from the end of the log. Only applies to node journal logs.") - - cmd.Flags().StringVar(&o.Role, "role", o.Role, "Set a label selector by node role.") - cmd.Flags().StringVarP(&o.Selector, "selector", "l", o.Selector, "Selector (label query) to filter on.") - cmd.Flags().BoolVar(&o.Raw, "raw", o.Raw, "Perform no transformation of the returned data.") - cmd.Flags().BoolVar(&o.Unify, "unify", o.Unify, "Interleave logs by sorting the output. Defaults on when viewing node journal logs") - - return cmd -} - -func (o *LogsOptions) Complete(f kcmdutil.Factory, cmd *cobra.Command, args []string) error { - if !cmd.Flags().Lookup("unify").Changed { - o.Unify = o.Path == "journal" - } - - o.Resources = args - - o.RESTClientGetter = f.UnstructuredClientForMapping - - builder := f.NewBuilder(). - WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...). - SingleResourceType() - - if len(o.Resources) > 0 { - builder.ResourceNames("nodes", o.Resources...) - } - if len(o.Role) > 0 { - req, err := labels.NewRequirement(fmt.Sprintf("node-role.kubernetes.io/%s", o.Role), selection.Exists, nil) - if err != nil { - return fmt.Errorf("invalid --role: %v", err) - } - o.Selector = req.String() - } - if len(o.Selector) > 0 { - builder.ResourceTypes("nodes").LabelSelectorParam(o.Selector) - } - o.Builder = builder - - return nil -} - -func (o LogsOptions) Validate() error { - if len(o.Resources) == 0 && len(o.Selector) == 0 { - return fmt.Errorf("at least one node name or a selector (-l) must be specified") - } - if len(o.Resources) > 0 && len(o.Selector) > 0 { - return fmt.Errorf("node names and selector may not both be specified") - } - return nil -} - -// logRequest abstracts retrieving the content of the node logs endpoint which is normally -// either directory content or a file. It supports raw retrieval for use with the journal -// endpoint, and formats the HTML returned by a directory listing into a more user friendly -// output. -type logRequest struct { - node string - req *rest.Request - err error - - // raw is set to true when we are viewing the journal and wish to skip prefixing - raw bool - // skipPrefix bypasses prefixing if the user knows that a unique identifier is already - // in the file - skipPrefix bool -} - -// WriteTo prefixes the error message with the current node if necessary -func (req *logRequest) WriteRequest(out io.Writer) error { - if req.err != nil { - return req.err - } - err := req.writeTo(out) - if err != nil { - err = fmt.Errorf("%s %v", req.node, err) - req.err = err - } - return err -} - -func (req *logRequest) writeTo(out io.Writer) error { - in, err := req.req.Stream() - if err != nil { - return err - } - defer in.Close() - - // raw output implies we may be getting binary content directly - // from the remote and so we want to perform no translation - if req.raw { - // TODO: optionallyDecompress should be implemented by checking - // the content-encoding of the response, but we perform optional - // decompression here in case the content of the logs on the server - // is also gzipped. - return optionallyDecompress(out, in) - } - - var prefix []byte - if !req.skipPrefix { - prefix = []byte(fmt.Sprintf("%s ", req.node)) - } - - return outputDirectoryEntriesOrContent(out, in, prefix) -} - -// RunLogs retrieves node logs -func (o LogsOptions) RunLogs() error { - builder := o.Builder - - var requests []*logRequest - - var errs []error - result := builder.ContinueOnError().Flatten().Do() - err := result.Visit(func(info *resource.Info, err error) error { - if err != nil { - requests = append(requests, &logRequest{node: info.Name, err: err}) - return nil - } - mapping := info.ResourceMapping() - client, err := o.RESTClientGetter(mapping) - if err != nil { - requests = append(requests, &logRequest{node: info.Name, err: err}) - return nil - } - path := client.Get(). - Namespace(info.Namespace).Name(info.Name). - Resource(mapping.Resource.Resource).SubResource("proxy", "logs").Suffix(o.Path).URL().Path - if strings.HasSuffix(o.Path, "/") { - path += "/" - } - - req := client.Get().RequestURI(path). - SetHeader("Accept", "text/plain, */*"). - SetHeader("Accept-Encoding", "gzip") - if o.Path == "journal" { - if len(o.UntilTime) > 0 { - req.Param("until", o.UntilTime) - } - if len(o.SinceTime) > 0 { - req.Param("since", o.SinceTime) - } - if len(o.Output) > 0 { - req.Param("output", o.Output) - } - if len(o.Units) > 0 { - for _, unit := range o.Units { - req.Param("unit", unit) - } - } - if len(o.Grep) > 0 { - req.Param("grep", o.Grep) - req.Param("case-sensitive", fmt.Sprintf("%t", o.GrepCaseSensitive)) - } - if o.Tail > 0 { - req.Param("tail", strconv.Itoa(o.Tail)) - } - } - - requests = append(requests, &logRequest{ - node: info.Name, - req: req, - raw: o.Raw || o.Path == "journal", - }) - return nil - }) - if err != nil { - if agg, ok := err.(errors.Aggregate); ok { - errs = append(errs, agg.Errors()...) - } else { - errs = append(errs, err) - } - } - - found := len(errs) + len(requests) - // only hide prefix if the user specified a single item - skipPrefix := found == 1 && result.TargetsSingleItems() - - // buffer output for slightly better streaming performance - out := bufio.NewWriterSize(o.Out, 1024*16) - defer out.Flush() - - if o.Unify { - // unified output is each source, interleaved in lexographic order (assumes - // the source input is sorted by time) - var readers []Reader - for i := range requests { - req := requests[i] - req.skipPrefix = true - pr, pw := io.Pipe() - readers = append(readers, Reader{ - R: pr, - }) - go func() { - err := req.WriteRequest(pw) - pw.CloseWithError(err) - }() - } - _, err := NewMergeReader(readers...).WriteTo(out) - if agg := errors.Flatten(errors.NewAggregate([]error{err})); agg != nil { - errs = append(errs, agg.Errors()...) - } - - } else { - // display files sequentially - for _, req := range requests { - req.skipPrefix = skipPrefix - if err := req.WriteRequest(out); err != nil { - errs = append(errs, err) - } - } - } - - if len(errs) > 0 { - for _, err := range errs { - fmt.Fprintf(o.ErrOut, "error: %v\n", err) - } - return kcmdutil.ErrExit - } - - return nil -} - -func optionallyDecompress(out io.Writer, in io.Reader) error { - bufferSize := 4096 - buf := bufio.NewReaderSize(in, bufferSize) - head, err := buf.Peek(1024) - if err != nil && err != io.EOF { - return err - } - if _, err := gzip.NewReader(bytes.NewBuffer(head)); err != nil { - // not a gzipped stream - _, err = io.Copy(out, buf) - return err - } - r, err := gzip.NewReader(buf) - if err != nil { - return err - } - _, err = io.Copy(out, r) - return err -} - -func outputDirectoryEntriesOrContent(out io.Writer, in io.Reader, prefix []byte) error { - bufferSize := 4096 - buf := bufio.NewReaderSize(in, bufferSize) - - // turn href links into lines of output - content, _ := buf.Peek(bufferSize) - if bytes.HasPrefix(content, []byte("
")) {
-		reLink := regexp.MustCompile(`href="([^"]+)"`)
-		s := bufio.NewScanner(buf)
-		s.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {
-			matches := reLink.FindSubmatchIndex(data)
-			if matches == nil {
-				advance = bytes.LastIndex(data, []byte("\n"))
-				if advance == -1 {
-					advance = 0
-				}
-				return advance, nil, nil
-			}
-			advance = matches[1]
-			token = data[matches[2]:matches[3]]
-			return advance, token, nil
-		})
-		for s.Scan() {
-			if _, err := out.Write(prefix); err != nil {
-				return err
-			}
-			if _, err := fmt.Fprintln(out, s.Text()); err != nil {
-				return err
-			}
-		}
-		return s.Err()
-	}
-
-	// without a prefix we can copy directly
-	if len(prefix) == 0 {
-		_, err := io.Copy(out, buf)
-		return err
-	}
-
-	r := NewMergeReader(Reader{R: buf, Prefix: prefix})
-	_, err := r.WriteTo(out)
-	return err
-}
-
-// Reader wraps an io.Reader and inserts the provided prefix at the
-// beginning of the output and before each newline character found
-// in the stream.
-type Reader struct {
-	R      io.Reader
-	Prefix []byte
-}
-
-type mergeReader []Reader
-
-// NewMergeReader attempts to display the provided readers as line
-// oriented output in lexographic order by always reading the next
-// available line from the reader with the "smallest" line.
-//
-// For example, given the readers with the following lines:
-//   1: A
-//      B
-//      D
-//   2: C
-//      D
-//      E
-//
-//  the reader would contain:
-//      A
-//      B
-//      C
-//      D
-//      D
-//      E
-//
-// The merge reader uses bufio.NewReader() for each input and the
-// ReadLine() method to find the next shortest input. If a given
-// line is longer than the buffer size of 4096, and all readers
-// have the same initial 4096 characters, the order is undefined.
-func NewMergeReader(r ...Reader) io.WriterTo {
-	return mergeReader(r)
-}
-
-// WriteTo copies the provided readers into the provided output.
-func (r mergeReader) WriteTo(out io.Writer) (int64, error) {
-	// shortcut common cases
-	switch len(r) {
-	case 0:
-		return 0, nil
-	case 1:
-		if len(r[0].Prefix) == 0 {
-			return io.Copy(out, r[0].R)
-		}
-	}
-
-	// initialize the buffered readers
-	bufSize := 4096
-	var buffers sortedBuffers
-	var errs []error
-	for _, in := range r {
-		buf := &buffer{
-			r:      bufio.NewReaderSize(in.R, bufSize),
-			prefix: in.Prefix,
-		}
-		if err := buf.next(); err != nil {
-			errs = append(errs, err)
-			continue
-		}
-		buffers = append(buffers, buf)
-	}
-
-	var n int64
-	for len(buffers) > 0 {
-		// find the lowest buffer
-		sort.Sort(buffers)
-
-		// write out the line from the smallest buffer
-		buf := buffers[0]
-
-		if len(buf.prefix) > 0 {
-			b, err := out.Write(buf.prefix)
-			n += int64(b)
-			if err != nil {
-				return n, err
-			}
-		}
-
-		for {
-			done := !buf.linePrefix
-			b, err := out.Write(buf.line)
-			n += int64(b)
-			if err != nil {
-				return n, err
-			}
-
-			// try to fill the buffer, and if we get an error reading drop this source
-			if err := buf.next(); err != nil {
-				errs = append(errs, err)
-				buffers = buffers[1:]
-				break
-			}
-
-			// we reached the end of our line
-			if done {
-				break
-			}
-		}
-		b, err := fmt.Fprintln(out)
-		n += int64(b)
-		if err != nil {
-			return n, err
-		}
-	}
-
-	return n, errors.FilterOut(errors.NewAggregate(errs), func(err error) bool { return err == io.EOF })
-}
-
-type buffer struct {
-	r          *bufio.Reader
-	prefix     []byte
-	line       []byte
-	linePrefix bool
-}
-
-func (b *buffer) next() error {
-	var err error
-	b.line, b.linePrefix, err = b.r.ReadLine()
-	return err
-}
-
-type sortedBuffers []*buffer
-
-func (buffers sortedBuffers) Less(i, j int) bool {
-	return bytes.Compare(buffers[i].line, buffers[j].line) < 0
-}
-func (buffers sortedBuffers) Swap(i, j int) {
-	buffers[i], buffers[j] = buffers[j], buffers[i]
-}
-func (buffers sortedBuffers) Len() int {
-	return len(buffers)
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/node/logs_test.go b/vendor/github.com/openshift/oc/pkg/cli/admin/node/logs_test.go
deleted file mode 100644
index bc5d78dd0752..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/node/logs_test.go
+++ /dev/null
@@ -1,125 +0,0 @@
-package node
-
-import (
-	"bytes"
-	"compress/gzip"
-	"io"
-	"strings"
-	"testing"
-)
-
-func Test_optionallyDecompress(t *testing.T) {
-	longString := strings.Repeat(`some test content`, 1000)
-	tests := []struct {
-		name    string
-		in      io.Reader
-		wantOut string
-		wantErr bool
-	}{
-		{in: gzipped(`some test content`), wantOut: `some test content`},
-		{in: bytes.NewBufferString(`some test content`), wantOut: `some test content`},
-		{in: gzipped(longString), wantOut: longString},
-		{in: bytes.NewBufferString(longString), wantOut: longString},
-	}
-	for _, tt := range tests {
-		t.Run(tt.name, func(t *testing.T) {
-			out := &bytes.Buffer{}
-			if err := optionallyDecompress(out, tt.in); (err != nil) != tt.wantErr {
-				t.Errorf("optionallyDecompress() error = %v, wantErr %v", err, tt.wantErr)
-				return
-			}
-			if gotOut := out.String(); gotOut != tt.wantOut {
-				t.Errorf("optionallyDecompress() = %v, want %v", gotOut, tt.wantOut)
-			}
-		})
-	}
-}
-
-func gzipped(s string) io.Reader {
-	out := &bytes.Buffer{}
-	gw := gzip.NewWriter(out)
-	gw.Write([]byte(s))
-	gw.Close()
-	return out
-}
-
-func Test_outputDirectoryEntriesOrContent(t *testing.T) {
-	tests := []struct {
-		name    string
-		in      io.Reader
-		prefix  []byte
-		wantOut string
-		wantErr bool
-	}{
-		{in: bytes.NewBufferString(`
`), wantOut: "line\n"},
-		{in: bytes.NewBufferString(`
\n`), wantOut: "line\n"},
-		{prefix: []byte("test: "), in: bytes.NewBufferString(`
\n`), wantOut: "test: line\n"},
-		{in: bytes.NewBufferString(`
\n`), wantOut: ""},
-		{in: bytes.NewBufferString(`
`), wantOut: ` 
`},
-
-		{in: bytes.NewBufferString("
" + strings.Repeat("stuff\n", 1000)), wantOut: strings.Repeat("link\n", 1000)},
-		{prefix: []byte("test: "), in: bytes.NewBufferString("
" + strings.Repeat("stuff\n", 1000)), wantOut: strings.Repeat("test: link\n", 1000)},
-	}
-	for _, tt := range tests {
-		t.Run(tt.name, func(t *testing.T) {
-			out := &bytes.Buffer{}
-			if err := outputDirectoryEntriesOrContent(out, tt.in, tt.prefix); (err != nil) != tt.wantErr {
-				t.Errorf("outputDirectoryEntriesOrContent() error = %v, wantErr %v", err, tt.wantErr)
-				return
-			}
-			if gotOut := out.String(); gotOut != tt.wantOut {
-				t.Errorf("outputDirectoryEntriesOrContent() = %v, want %v", gotOut, tt.wantOut)
-			}
-		})
-	}
-}
-
-func Test_mergeReader_WriteTo(t *testing.T) {
-	tests := []struct {
-		name    string
-		in      []Reader
-		wantOut string
-		wantErr bool
-	}{
-		{in: nil, wantOut: ""},
-		{in: readers("1\n2\n3\n"), wantOut: "1\n2\n3\n"},
-		{in: readers("1\n2", "1\n3\n"), wantOut: "1\n1\n2\n3\n"},
-		{in: readers("1a\n2a\n3a\n", "2b\n3b\n4b\n", "1c\n3c\n4c\n"), wantOut: "1a\n1c\n2a\n2b\n3a\n3b\n3c\n4b\n4c\n"},
-
-		{in: readers("a|1\n2\n3\n"), wantOut: "a1\na2\na3\n"},
-		{in: readers("a|1\n2", "b|1\n3\n"), wantOut: "a1\nb1\na2\nb3\n"},
-		{in: readers("a: |1\n2", "b: |1\n3\n"), wantOut: "a: 1\nb: 1\na: 2\nb: 3\n"},
-	}
-	for _, tt := range tests {
-		t.Run(tt.name, func(t *testing.T) {
-			r := mergeReader(tt.in)
-			out := &bytes.Buffer{}
-			n, err := r.WriteTo(out)
-			if (err != nil) != tt.wantErr {
-				t.Errorf("mergeReader.WriteTo() error = %v, wantErr %v", err, tt.wantErr)
-				return
-			}
-			if n != int64(out.Len()) {
-				t.Errorf("mergeReader.WriteTo() = %v, want %v", n, out.Len())
-			}
-			if gotOut := out.String(); gotOut != tt.wantOut {
-				t.Errorf("mergeReader.WriteTo() = %v, want %v", gotOut, tt.wantOut)
-			}
-		})
-	}
-}
-
-func readers(all ...string) []Reader {
-	var out []Reader
-	for _, s := range all {
-		if strings.Contains(s, "|") {
-			parts := strings.SplitN(s, "|", 2)
-			out = append(out, Reader{Prefix: []byte(parts[0]), R: bytes.NewBufferString(parts[1])})
-		} else {
-			out = append(out, Reader{R: bytes.NewBufferString(s)})
-		}
-	}
-	return out
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/oadm_bashcomp_func.go b/vendor/github.com/openshift/oc/pkg/cli/admin/oadm_bashcomp_func.go
deleted file mode 100644
index 8dd290863229..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/oadm_bashcomp_func.go
+++ /dev/null
@@ -1,16 +0,0 @@
-package admin
-
-const (
-	BashCompletionFunc = `
-__custom_func() {
-    case ${last_command} in
-        oadm_validate_master-config | oadm_validate_node-config)
-            _filedir
-            return
-            ;;
-        *)
-            ;;
-    esac
-}
-`
-)
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/policy/authz_helpers.go b/vendor/github.com/openshift/oc/pkg/cli/admin/policy/authz_helpers.go
deleted file mode 100644
index 94c47a570988..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/policy/authz_helpers.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package policy
-
-import (
-	corev1 "k8s.io/api/core/v1"
-	"k8s.io/apiserver/pkg/authentication/serviceaccount"
-)
-
-func buildSubjects(users, groups []string) []corev1.ObjectReference {
-	subjects := []corev1.ObjectReference{}
-
-	for _, user := range users {
-		saNamespace, saName, err := serviceaccount.SplitUsername(user)
-		if err == nil {
-			subjects = append(subjects, corev1.ObjectReference{Kind: "ServiceAccount", Namespace: saNamespace, Name: saName})
-			continue
-		}
-
-		subjects = append(subjects, corev1.ObjectReference{Kind: "User", Name: user})
-	}
-
-	for _, group := range groups {
-		subjects = append(subjects, corev1.ObjectReference{Kind: "Group", Name: group})
-	}
-
-	return subjects
-}
-
-// stringSubjectsFor returns users and groups for comparison against user.Info.  currentNamespace is used to
-// to create usernames for service accounts where namespace=="".
-func stringSubjectsFor(currentNamespace string, subjects []corev1.ObjectReference) ([]string, []string) {
-	// these MUST be nil to indicate empty
-	var users, groups []string
-
-	for _, subject := range subjects {
-		switch subject.Kind {
-		case "ServiceAccount":
-			namespace := currentNamespace
-			if len(subject.Namespace) > 0 {
-				namespace = subject.Namespace
-			}
-			if len(namespace) > 0 {
-				users = append(users, serviceaccount.MakeUsername(namespace, subject.Name))
-			}
-
-		case "User":
-			users = append(users, subject.Name)
-
-		case "Group":
-			groups = append(groups, subject.Name)
-		}
-	}
-
-	return users, groups
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/policy/modify_roles.go b/vendor/github.com/openshift/oc/pkg/cli/admin/policy/modify_roles.go
deleted file mode 100644
index b6b2862d4994..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/policy/modify_roles.go
+++ /dev/null
@@ -1,770 +0,0 @@
-package policy
-
-import (
-	"errors"
-	"fmt"
-	"strings"
-
-	"github.com/spf13/cobra"
-
-	rbacv1 "k8s.io/api/rbac/v1"
-	kapierrors "k8s.io/apimachinery/pkg/api/errors"
-	"k8s.io/apimachinery/pkg/api/validation"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
-	"k8s.io/apimachinery/pkg/runtime"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	"k8s.io/cli-runtime/pkg/printers"
-	corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
-	rbacv1client "k8s.io/client-go/kubernetes/typed/rbac/v1"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/scheme"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-
-	userv1client "github.com/openshift/client-go/user/clientset/versioned/typed/user/v1"
-	"github.com/openshift/library-go/pkg/authorization/authorizationutil"
-)
-
-const (
-	AddRoleToGroupRecommendedName      = "add-role-to-group"
-	AddRoleToUserRecommendedName       = "add-role-to-user"
-	RemoveRoleFromGroupRecommendedName = "remove-role-from-group"
-	RemoveRoleFromUserRecommendedName  = "remove-role-from-user"
-
-	AddClusterRoleToGroupRecommendedName      = "add-cluster-role-to-group"
-	AddClusterRoleToUserRecommendedName       = "add-cluster-role-to-user"
-	RemoveClusterRoleFromGroupRecommendedName = "remove-cluster-role-from-group"
-	RemoveClusterRoleFromUserRecommendedName  = "remove-cluster-role-from-user"
-)
-
-var (
-	addRoleToUserExample = templates.Examples(`
-		# Add the 'view' role to user1 for the current project
-	  %[1]s view user1
-
-	  # Add the 'edit' role to serviceaccount1 for the current project
-	  %[1]s edit -z serviceaccount1`)
-
-	addRoleToUserLongDesc = templates.LongDesc(`
-	  Add a role to users or service accounts for the current project
-
-	  This command allows you to grant a user access to specific resources and actions within the current project, by assigning them to a role. It creates or modifies a RoleBinding referencing the specified role adding the user(s) or serviceaccount(s) to the list of subjects. The command does not require that the matching role or user/serviceaccount resources exist and will create the binding successfully even when the role or user/serviceaccount do not exist or when the user does not have access to view them.
-
-	  If the --rolebinding-name argument is supplied, it will look for an existing rolebinding with that name. The role on the matching rolebinding MUST match the role name supplied to the command. If no rolebinding name is given, a default name will be used. When --role-namespace argument is specified as a non-empty value, it MUST match the current namespace. When role-namespace is specified, the rolebinding will reference a namespaced Role. Otherwise, the rolebinding will reference a ClusterRole resource.
-
-	  To learn more, see information about RBAC and policy, or use the 'get' and 'describe' commands on the following resources: 'clusterroles', 'clusterrolebindings', 'roles', 'rolebindings', 'users', 'groups', and 'serviceaccounts'.`)
-
-	addRoleToGroupLongDesc = templates.LongDesc(`
-	  Add a role to groups for the current project
-
-	  This command allows you to grant a group access to specific resources and actions within the current project, by assigning them to a role. It creates or modifies a RoleBinding referencing the specified role adding the group(s) to the list of subjects. The command does not require that the matching role or group resources exist and will create the binding successfully even when the role or group do not exist or when the user does not have access to view them.
-
-	  If the --rolebinding-name argument is supplied, it will look for an existing rolebinding with that name. The role on the matching rolebinding MUST match the role name supplied to the command. If no rolebinding name is given, a default name will be used. When --role-namespace argument is specified as a non-empty value, it MUST match the current namespace. When role-namespace is specified, the rolebinding will reference a namespaced Role. Otherwise, the rolebinding will reference a ClusterRole resource.
-
-	  To learn more, see information about RBAC and policy, or use the 'get' and 'describe' commands on the following resources: 'clusterroles', 'clusterrolebindings', 'roles', 'rolebindings', 'users', 'groups', and 'serviceaccounts'.`)
-
-	addClusterRoleToUserLongDesc = templates.LongDesc(`
-	  Add a role to users or service accounts across all projects
-
-	  This command allows you to grant a user access to specific resources and actions within the cluster, by assigning them to a role. It creates or modifies a ClusterRoleBinding referencing the specified ClusterRole, adding the user(s) or serviceaccount(s) to the list of subjects. This command does not require that the matching cluster role or user/serviceaccount resources exist and will create the binding successfully even when the role or user/serviceaccount do not exist or when the user does not have access to view them.
-
-	  If the --rolebinding-name argument is supplied, it will look for an existing clusterrolebinding with that name. The role on the matching clusterrolebinding MUST match the role name supplied to the command. If no rolebinding name is given, a default name will be used.
-
-	  To learn more, see information about RBAC and policy, or use the 'get' and 'describe' commands on the following resources: 'clusterroles', 'clusterrolebindings', 'roles', 'rolebindings', 'users', 'groups', and 'serviceaccounts'.`)
-
-	addClusterRoleToGroupLongDesc = templates.LongDesc(`
-	  Add a role to groups for the current project
-
-	  This command creates or modifies a ClusterRoleBinding with the named cluster role by adding the named group(s) to the list of subjects. The command does not require the matching role or group resources exist and will create the binding successfully even when the role or group do not exist or when the user does not have access to view them.
-
-	  If the --rolebinding-name argument is supplied, it will look for an existing clusterrolebinding with that name. The role on the matching clusterrolebinding MUST match the role name supplied to the command. If no rolebinding name is given, a default name will be used.`)
-)
-
-type RoleModificationOptions struct {
-	PrintFlags *genericclioptions.PrintFlags
-
-	ToPrinter func(string) (printers.ResourcePrinter, error)
-
-	RoleName             string
-	RoleNamespace        string
-	RoleKind             string
-	RoleBindingName      string
-	RoleBindingNamespace string
-	RbacClient           rbacv1client.RbacV1Interface
-	SANames              []string
-
-	UserClient           userv1client.UserV1Interface
-	ServiceAccountClient corev1client.ServiceAccountsGetter
-
-	Targets  []string
-	Users    []string
-	Groups   []string
-	Subjects []rbacv1.Subject
-
-	DryRun bool
-
-	PrintErrf func(format string, args ...interface{})
-
-	genericclioptions.IOStreams
-}
-
-func NewRoleModificationOptions(streams genericclioptions.IOStreams) *RoleModificationOptions {
-	return &RoleModificationOptions{
-		PrintFlags: genericclioptions.NewPrintFlags("added").WithTypeSetter(scheme.Scheme),
-		IOStreams:  streams,
-	}
-}
-
-// NewCmdAddRoleToGroup implements the OpenShift cli add-role-to-group command
-func NewCmdAddRoleToGroup(name, fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	o := NewRoleModificationOptions(streams)
-	cmd := &cobra.Command{
-		Use:   name + " ROLE GROUP [GROUP ...]",
-		Short: "Add a role to groups for the current project",
-		Long:  addRoleToGroupLongDesc,
-		Run: func(cmd *cobra.Command, args []string) {
-			kcmdutil.CheckErr(o.Complete(f, cmd, args, &o.Groups, "group"))
-			kcmdutil.CheckErr(o.checkRoleBindingNamespace(f))
-			kcmdutil.CheckErr(o.AddRole())
-		},
-	}
-
-	cmd.Flags().StringVar(&o.RoleBindingName, "rolebinding-name", o.RoleBindingName, "Name of the rolebinding to modify or create. If left empty creates a new rolebinding with a default name")
-	cmd.Flags().StringVar(&o.RoleNamespace, "role-namespace", o.RoleNamespace, "namespace where the role is located: empty means a role defined in cluster policy")
-
-	kcmdutil.AddDryRunFlag(cmd)
-	o.PrintFlags.AddFlags(cmd)
-	return cmd
-}
-
-// NewCmdAddRoleToUser implements the OpenShift cli add-role-to-user command
-func NewCmdAddRoleToUser(name, fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	o := NewRoleModificationOptions(streams)
-	o.SANames = []string{}
-	cmd := &cobra.Command{
-		Use:     name + " ROLE (USER | -z SERVICEACCOUNT) [USER ...]",
-		Short:   "Add a role to users or serviceaccounts for the current project",
-		Long:    addRoleToUserLongDesc,
-		Example: fmt.Sprintf(addRoleToUserExample, fullName),
-		Run: func(cmd *cobra.Command, args []string) {
-			kcmdutil.CheckErr(o.CompleteUserWithSA(f, cmd, args))
-			kcmdutil.CheckErr(o.checkRoleBindingNamespace(f))
-			kcmdutil.CheckErr(o.AddRole())
-		},
-	}
-
-	cmd.Flags().StringVar(&o.RoleBindingName, "rolebinding-name", o.RoleBindingName, "Name of the rolebinding to modify or create. If left empty creates a new rolebinding with a default name")
-	cmd.Flags().StringVar(&o.RoleNamespace, "role-namespace", o.RoleNamespace, "namespace where the role is located: empty means a role defined in cluster policy")
-	cmd.Flags().StringSliceVarP(&o.SANames, "serviceaccount", "z", o.SANames, "service account in the current namespace to use as a user")
-
-	kcmdutil.AddDryRunFlag(cmd)
-	o.PrintFlags.AddFlags(cmd)
-	return cmd
-}
-
-// NewCmdRemoveRoleFromGroup implements the OpenShift cli remove-role-from-group command
-func NewCmdRemoveRoleFromGroup(name, fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	o := NewRoleModificationOptions(streams)
-	cmd := &cobra.Command{
-		Use:   name + " ROLE GROUP [GROUP ...]",
-		Short: "Remove a role from groups for the current project",
-		Long:  `Remove a role from groups for the current project`,
-		Run: func(cmd *cobra.Command, args []string) {
-			kcmdutil.CheckErr(o.Complete(f, cmd, args, &o.Groups, "group"))
-			kcmdutil.CheckErr(o.checkRoleBindingNamespace(f))
-			kcmdutil.CheckErr(o.RemoveRole())
-		},
-	}
-
-	cmd.Flags().StringVar(&o.RoleBindingName, "rolebinding-name", o.RoleBindingName, "Name of the rolebinding to modify. If left empty it will operate on all rolebindings")
-	cmd.Flags().StringVar(&o.RoleNamespace, "role-namespace", o.RoleNamespace, "namespace where the role is located: empty means a role defined in cluster policy")
-
-	kcmdutil.AddDryRunFlag(cmd)
-	o.PrintFlags.AddFlags(cmd)
-	return cmd
-}
-
-// NewCmdRemoveRoleFromUser implements the OpenShift cli remove-role-from-user command
-func NewCmdRemoveRoleFromUser(name, fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	o := NewRoleModificationOptions(streams)
-	o.SANames = []string{}
-	cmd := &cobra.Command{
-		Use:   name + " ROLE USER [USER ...]",
-		Short: "Remove a role from users for the current project",
-		Long:  `Remove a role from users for the current project`,
-		Run: func(cmd *cobra.Command, args []string) {
-			kcmdutil.CheckErr(o.CompleteUserWithSA(f, cmd, args))
-			kcmdutil.CheckErr(o.checkRoleBindingNamespace(f))
-			kcmdutil.CheckErr(o.RemoveRole())
-		},
-	}
-
-	cmd.Flags().StringVar(&o.RoleBindingName, "rolebinding-name", o.RoleBindingName, "Name of the rolebinding to modify. If left empty it will operate on all rolebindings")
-	cmd.Flags().StringVar(&o.RoleNamespace, "role-namespace", o.RoleNamespace, "namespace where the role is located: empty means a role defined in cluster policy")
-	cmd.Flags().StringSliceVarP(&o.SANames, "serviceaccount", "z", o.SANames, "service account in the current namespace to use as a user")
-
-	kcmdutil.AddDryRunFlag(cmd)
-	o.PrintFlags.AddFlags(cmd)
-	return cmd
-}
-
-// NewCmdAddClusterRoleToGroup implements the OpenShift cli add-cluster-role-to-group command
-func NewCmdAddClusterRoleToGroup(name, fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	o := NewRoleModificationOptions(streams)
-	o.RoleKind = "ClusterRole"
-	cmd := &cobra.Command{
-		Use:   name + "   [group]...",
-		Short: "Add a role to groups for all projects in the cluster",
-		Long:  addClusterRoleToGroupLongDesc,
-		Run: func(cmd *cobra.Command, args []string) {
-			kcmdutil.CheckErr(o.Complete(f, cmd, args, &o.Groups, "group"))
-			kcmdutil.CheckErr(o.AddRole())
-		},
-	}
-
-	cmd.Flags().StringVar(&o.RoleBindingName, "rolebinding-name", o.RoleBindingName, "Name of the rolebinding to modify or create. If left empty creates a new rolebinding with a default name")
-
-	kcmdutil.AddDryRunFlag(cmd)
-	o.PrintFlags.AddFlags(cmd)
-	return cmd
-}
-
-// NewCmdAddClusterRoleToUser implements the OpenShift cli add-cluster-role-to-user command
-func NewCmdAddClusterRoleToUser(name, fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	o := NewRoleModificationOptions(streams)
-	o.RoleKind = "ClusterRole"
-	o.SANames = []string{}
-	cmd := &cobra.Command{
-		Use:   name + "   [user]...",
-		Short: "Add a role to users for all projects in the cluster",
-		Long:  addClusterRoleToUserLongDesc,
-		Run: func(cmd *cobra.Command, args []string) {
-			kcmdutil.CheckErr(o.CompleteUserWithSA(f, cmd, args))
-			kcmdutil.CheckErr(o.AddRole())
-		},
-	}
-
-	cmd.Flags().StringVar(&o.RoleBindingName, "rolebinding-name", o.RoleBindingName, "Name of the rolebinding to modify or create. If left empty creates a new rolebindo.RoleBindingNameg with a default name")
-	cmd.Flags().StringSliceVarP(&o.SANames, "serviceaccount", "z", o.SANames, "service account in the current namespace to use o.SANamess a user")
-
-	kcmdutil.AddDryRunFlag(cmd)
-	o.PrintFlags.AddFlags(cmd)
-	return cmd
-}
-
-// NewCmdRemoveClusterRoleFromGroup implements the OpenShift cli remove-cluster-role-from-group command
-func NewCmdRemoveClusterRoleFromGroup(name, fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	o := NewRoleModificationOptions(streams)
-	o.RoleKind = "ClusterRole"
-	cmd := &cobra.Command{
-		Use:   name + "   [group]...",
-		Short: "Remove a role from groups for all projects in the cluster",
-		Long:  `Remove a role from groups for all projects in the cluster`,
-		Run: func(cmd *cobra.Command, args []string) {
-			kcmdutil.CheckErr(o.Complete(f, cmd, args, &o.Groups, "group"))
-			kcmdutil.CheckErr(o.RemoveRole())
-		},
-	}
-
-	cmd.Flags().StringVar(&o.RoleBindingName, "rolebinding-name", o.RoleBindingName, "Name of the rolebinding to modify. If left empty it will operate on all rolebindings")
-
-	kcmdutil.AddDryRunFlag(cmd)
-	o.PrintFlags.AddFlags(cmd)
-	return cmd
-}
-
-// NewCmdRemoveClusterRoleFromUser implements the OpenShift cli remove-cluster-role-from-user command
-func NewCmdRemoveClusterRoleFromUser(name, fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	o := NewRoleModificationOptions(streams)
-	o.RoleKind = "ClusterRole"
-	o.SANames = []string{}
-	cmd := &cobra.Command{
-		Use:   name + "   [user]...",
-		Short: "Remove a role from users for all projects in the cluster",
-		Long:  `Remove a role from users for all projects in the cluster`,
-		Run: func(cmd *cobra.Command, args []string) {
-			kcmdutil.CheckErr(o.CompleteUserWithSA(f, cmd, args))
-			kcmdutil.CheckErr(o.RemoveRole())
-		},
-	}
-
-	cmd.Flags().StringVar(&o.RoleBindingName, "rolebinding-name", o.RoleBindingName, "Name of the rolebinding to modify. If left empty it will operate on all rolebindings")
-	cmd.Flags().StringSliceVarP(&o.SANames, "serviceaccount", "z", o.SANames, "service account in the current namespace to use as a user")
-
-	kcmdutil.AddDryRunFlag(cmd)
-	o.PrintFlags.AddFlags(cmd)
-	return cmd
-}
-
-func (o *RoleModificationOptions) checkRoleBindingNamespace(f kcmdutil.Factory) error {
-	var err error
-	o.RoleBindingNamespace, _, err = f.ToRawKubeConfigLoader().Namespace()
-	if err != nil {
-		return err
-	}
-	if len(o.RoleNamespace) > 0 {
-		if o.RoleBindingNamespace != o.RoleNamespace {
-			return fmt.Errorf("role binding in namespace %q can't reference role in different namespace %q",
-				o.RoleBindingNamespace, o.RoleNamespace)
-		}
-		o.RoleKind = "Role"
-	} else {
-		o.RoleKind = "ClusterRole"
-	}
-	return nil
-}
-
-func (o *RoleModificationOptions) innerComplete(f kcmdutil.Factory, cmd *cobra.Command) error {
-	clientConfig, err := f.ToRESTConfig()
-	if err != nil {
-		return err
-	}
-	o.RbacClient, err = rbacv1client.NewForConfig(clientConfig)
-	if err != nil {
-		return err
-	}
-	o.UserClient, err = userv1client.NewForConfig(clientConfig)
-	if err != nil {
-		return err
-	}
-	o.ServiceAccountClient, err = corev1client.NewForConfig(clientConfig)
-	if err != nil {
-		return err
-	}
-
-	o.DryRun = kcmdutil.GetFlagBool(cmd, "dry-run")
-	o.PrintErrf = func(format string, args ...interface{}) {
-		fmt.Fprintf(o.ErrOut, format, args...)
-	}
-
-	return nil
-}
-
-func (o *RoleModificationOptions) CompleteUserWithSA(f kcmdutil.Factory, cmd *cobra.Command, args []string) error {
-	if len(args) < 1 {
-		return errors.New("you must specify a role")
-	}
-
-	o.RoleName = args[0]
-	if len(args) > 1 {
-		o.Users = append(o.Users, args[1:]...)
-	}
-
-	o.Targets = o.Users
-
-	if (len(o.Users) == 0) && (len(o.SANames) == 0) {
-		return errors.New("you must specify at least one user or service account")
-	}
-
-	// return an error if a fully-qualified service-account name is used
-	for _, sa := range o.SANames {
-		if strings.HasPrefix(sa, "system:serviceaccount") {
-			return errors.New("--serviceaccount (-z) should only be used with short-form serviceaccount names (e.g. `default`)")
-		}
-
-		if errCauses := validation.ValidateServiceAccountName(sa, false); len(errCauses) > 0 {
-			message := fmt.Sprintf("%q is not a valid serviceaccount name:\n  ", sa)
-			message += strings.Join(errCauses, "\n  ")
-			return errors.New(message)
-		}
-	}
-
-	err := o.innerComplete(f, cmd)
-	if err != nil {
-		return err
-	}
-
-	defaultNamespace, _, err := f.ToRawKubeConfigLoader().Namespace()
-	if err != nil {
-		return err
-	}
-
-	for _, sa := range o.SANames {
-		o.Targets = append(o.Targets, sa)
-		o.Subjects = append(o.Subjects, rbacv1.Subject{Namespace: defaultNamespace, Name: sa, Kind: rbacv1.ServiceAccountKind})
-	}
-
-	o.ToPrinter = func(operation string) (printers.ResourcePrinter, error) {
-		o.PrintFlags.NamePrintFlags.Operation = getSuccessMessage(o.DryRun, operation, o.Targets)
-		return o.PrintFlags.ToPrinter()
-	}
-
-	return nil
-}
-
-func (o *RoleModificationOptions) Complete(f kcmdutil.Factory, cmd *cobra.Command, args []string, target *[]string, targetName string) error {
-	if len(args) < 2 {
-		return fmt.Errorf("you must specify at least two arguments:  <%s> [%s]...", targetName, targetName)
-	}
-
-	o.RoleName = args[0]
-	*target = append(*target, args[1:]...)
-
-	o.Targets = *target
-
-	if err := o.innerComplete(f, cmd); err != nil {
-		return err
-	}
-
-	o.ToPrinter = func(operation string) (printers.ResourcePrinter, error) {
-		o.PrintFlags.NamePrintFlags.Operation = getSuccessMessage(o.DryRun, operation, o.Targets)
-		return o.PrintFlags.ToPrinter()
-	}
-
-	return nil
-}
-
-func (o *RoleModificationOptions) getRoleBinding() (*roleBindingAbstraction, bool /* isUpdate */, error) {
-	roleBinding, err := getRoleBindingAbstraction(o.RbacClient, o.RoleBindingName, o.RoleBindingNamespace)
-	if err != nil {
-		if kapierrors.IsNotFound(err) {
-			return nil, false, nil
-		}
-		return nil, false, err
-	}
-
-	// Check that we update the rolebinding for the intended role.
-	if roleBinding.RoleName() != o.RoleName {
-		return nil, false, fmt.Errorf("rolebinding %s found for role %s, not %s",
-			o.RoleBindingName, roleBinding.RoleName(), o.RoleName)
-	}
-	if roleBinding.RoleKind() != o.RoleKind {
-		return nil, false, fmt.Errorf("rolebinding %s found for %q, not %q",
-			o.RoleBindingName, roleBinding.RoleKind(), o.RoleKind)
-	}
-
-	return roleBinding, true, nil
-}
-
-func (o *RoleModificationOptions) newRoleBinding() (*roleBindingAbstraction, error) {
-	var roleBindingName string
-
-	// Create a new rolebinding with the desired name.
-	if len(o.RoleBindingName) > 0 {
-		roleBindingName = o.RoleBindingName
-	} else {
-		// If unspecified will always use the default naming
-		var err error
-		roleBindingName, err = getUniqueName(o.RbacClient, o.RoleName, o.RoleBindingNamespace)
-		if err != nil {
-			return nil, err
-		}
-	}
-	roleBinding, err := newRoleBindingAbstraction(o.RbacClient, roleBindingName, o.RoleBindingNamespace, o.RoleName, o.RoleKind)
-	if err != nil {
-		return nil, err
-	}
-	return roleBinding, nil
-}
-
-func (o *RoleModificationOptions) AddRole() error {
-	var (
-		roleBinding *roleBindingAbstraction
-		isUpdate    bool
-		err         error
-	)
-
-	p, err := o.ToPrinter("added")
-	if err != nil {
-		return err
-	}
-
-	roleToPrint := o.roleObjectToPrint()
-
-	// Look for an existing rolebinding by name.
-	if len(o.RoleBindingName) > 0 {
-		roleBinding, isUpdate, err = o.getRoleBinding()
-		if err != nil {
-			return err
-		}
-	} else {
-		// Check if we already have a role binding that matches
-		checkBindings, err := getRoleBindingAbstractionsForRole(o.RbacClient, o.RoleName, o.RoleKind, o.RoleBindingNamespace)
-		if err != nil {
-			return err
-		}
-		if len(checkBindings) > 0 {
-			for _, checkBinding := range checkBindings {
-				newSubjects := addSubjects(o.Users, o.Groups, o.Subjects, checkBinding.Subjects())
-				if len(newSubjects) == len(checkBinding.Subjects()) {
-					// we already have a rolebinding that matches
-					if o.PrintFlags.OutputFormat != nil && len(*o.PrintFlags.OutputFormat) > 0 {
-						return p.PrintObj(checkBinding.Object(), o.Out)
-					}
-					return p.PrintObj(roleToPrint, o.Out)
-				}
-			}
-		}
-	}
-
-	if roleBinding == nil {
-		roleBinding, err = o.newRoleBinding()
-		if err != nil {
-			return err
-		}
-	}
-
-	// warn if binding to non-existent role
-	if o.PrintErrf != nil {
-		var err error
-		if roleBinding.RoleKind() == "Role" {
-			_, err = o.RbacClient.Roles(o.RoleBindingNamespace).Get(roleBinding.RoleName(), metav1.GetOptions{})
-		} else {
-			_, err = o.RbacClient.ClusterRoles().Get(roleBinding.RoleName(), metav1.GetOptions{})
-		}
-		if err != nil && kapierrors.IsNotFound(err) {
-			o.PrintErrf("Warning: role '%s' not found\n", roleBinding.RoleName())
-		}
-	}
-	existingSubjects := roleBinding.Subjects()
-	newSubjects := addSubjects(o.Users, o.Groups, o.Subjects, existingSubjects)
-	// warn if any new subject does not exist, skipping existing subjects on the binding
-	if o.PrintErrf != nil {
-		// `addSubjects` appends new subjects onto the list of existing ones, skip over the existing ones
-		for _, newSubject := range newSubjects[len(existingSubjects):] {
-			var err error
-			switch newSubject.Kind {
-			case rbacv1.ServiceAccountKind:
-				if o.ServiceAccountClient != nil {
-					_, err = o.ServiceAccountClient.ServiceAccounts(newSubject.Namespace).Get(newSubject.Name, metav1.GetOptions{})
-				}
-			case rbacv1.UserKind:
-				if o.UserClient != nil {
-					_, err = o.UserClient.Users().Get(newSubject.Name, metav1.GetOptions{})
-				}
-			case rbacv1.GroupKind:
-				if o.UserClient != nil {
-					_, err = o.UserClient.Groups().Get(newSubject.Name, metav1.GetOptions{})
-				}
-			}
-			if err != nil && kapierrors.IsNotFound(err) {
-				o.PrintErrf("Warning: %s '%s' not found\n", newSubject.Kind, newSubject.Name)
-			}
-		}
-	}
-	roleBinding.SetSubjects(newSubjects)
-
-	if o.DryRun || (o.PrintFlags.OutputFormat != nil && len(*o.PrintFlags.OutputFormat) > 0) {
-		return p.PrintObj(roleBinding.Object(), o.Out)
-	}
-
-	if isUpdate {
-		err = roleBinding.Update()
-	} else {
-		err = roleBinding.Create()
-		// If the rolebinding was created in the meantime, rerun
-		if kapierrors.IsAlreadyExists(err) {
-			return o.AddRole()
-		}
-	}
-	if err != nil {
-		return err
-	}
-
-	return p.PrintObj(roleToPrint, o.Out)
-}
-
-// addSubjects appends new subjects to the list existing ones, removing any duplicates.
-// !!! The returned list MUST start with `existingSubjects` and only append new subjects *after*;
-//     consumers of this function expect new subjects to start at `len(existingSubjects)`.
-func addSubjects(users []string, groups []string, subjects []rbacv1.Subject, existingSubjects []rbacv1.Subject) []rbacv1.Subject {
-	subjectsToAdd := authorizationutil.BuildRBACSubjects(users, groups)
-	subjectsToAdd = append(subjectsToAdd, subjects...)
-	newSubjects := make([]rbacv1.Subject, len(existingSubjects))
-	copy(newSubjects, existingSubjects)
-
-subjectCheck:
-	for _, subjectToAdd := range subjectsToAdd {
-		for _, newSubject := range newSubjects {
-			if newSubject.Kind == subjectToAdd.Kind &&
-				newSubject.Name == subjectToAdd.Name &&
-				newSubject.Namespace == subjectToAdd.Namespace {
-				continue subjectCheck
-			}
-		}
-
-		newSubjects = append(newSubjects, subjectToAdd)
-	}
-
-	return newSubjects
-}
-
-func (o *RoleModificationOptions) checkRolebindingAutoupdate(roleBinding *roleBindingAbstraction) {
-	if roleBinding.Annotation(rbacv1.AutoUpdateAnnotationKey) == "true" {
-		if o.PrintErrf != nil {
-			o.PrintErrf("Warning: Your changes may get lost whenever a master"+
-				" is restarted, unless you prevent reconciliation of this"+
-				" rolebinding using the following command: oc annotate"+
-				" %s.rbac %s '%s=false' --overwrite", roleBinding.Type(),
-				roleBinding.Name(), rbacv1.AutoUpdateAnnotationKey)
-		}
-	}
-}
-
-func (o *RoleModificationOptions) roleObjectToPrint() runtime.Object {
-	var roleToPrint runtime.Object
-	if len(o.RoleBindingNamespace) == 0 {
-		roleToPrint = &rbacv1.ClusterRole{
-			// this is ok because we know exactly how we want to be serialized
-			TypeMeta:   metav1.TypeMeta{APIVersion: rbacv1.SchemeGroupVersion.String(), Kind: o.RoleKind},
-			ObjectMeta: metav1.ObjectMeta{Name: o.RoleName},
-		}
-	} else {
-		roleToPrint = &rbacv1.Role{
-			// this is ok because we know exactly how we want to be serialized
-			TypeMeta:   metav1.TypeMeta{APIVersion: rbacv1.SchemeGroupVersion.String(), Kind: o.RoleKind},
-			ObjectMeta: metav1.ObjectMeta{Name: o.RoleName},
-		}
-	}
-	return roleToPrint
-}
-
-func (o *RoleModificationOptions) RemoveRole() error {
-	var roleBindings []*roleBindingAbstraction
-	var err error
-	if len(o.RoleBindingName) > 0 {
-		existingRoleBinding, err := getRoleBindingAbstraction(o.RbacClient, o.RoleBindingName, o.RoleBindingNamespace)
-		if err != nil {
-			return err
-		}
-		// Check that we update the rolebinding for the intended role.
-		if existingRoleBinding.RoleName() != o.RoleName {
-			return fmt.Errorf("rolebinding %s contains role %s, instead of role %s",
-				o.RoleBindingName, existingRoleBinding.RoleName(), o.RoleName)
-		}
-		if existingRoleBinding.RoleKind() != o.RoleKind {
-			return fmt.Errorf("rolebinding %s contains role %s of kind %q, not %q",
-				o.RoleBindingName, o.RoleName, existingRoleBinding.RoleKind(), o.RoleKind)
-		}
-
-		roleBindings = make([]*roleBindingAbstraction, 1)
-		roleBindings[0] = existingRoleBinding
-	} else {
-		roleBindings, err = getRoleBindingAbstractionsForRole(o.RbacClient, o.RoleName, o.RoleKind, o.RoleBindingNamespace)
-		if err != nil {
-			return err
-		}
-		if len(roleBindings) == 0 {
-			bindingType := "ClusterRoleBinding"
-			if len(o.RoleBindingNamespace) > 0 {
-				bindingType = "RoleBinding"
-			}
-			return fmt.Errorf("unable to locate any %s for %s %q", bindingType, o.RoleKind, o.RoleName)
-		}
-	}
-
-	subjectsToRemove := authorizationutil.BuildRBACSubjects(o.Users, o.Groups)
-	subjectsToRemove = append(subjectsToRemove, o.Subjects...)
-
-	found := 0
-	cnt := 0
-	for _, roleBinding := range roleBindings {
-		var resultingSubjects []rbacv1.Subject
-		resultingSubjects, cnt = removeSubjects(roleBinding.Subjects(), subjectsToRemove)
-		roleBinding.SetSubjects(resultingSubjects)
-		found += cnt
-	}
-
-	p, err := o.ToPrinter("removed")
-	if err != nil {
-		return err
-	}
-
-	if o.PrintFlags.OutputFormat != nil && len(*o.PrintFlags.OutputFormat) > 0 {
-		if found == 0 {
-			return fmt.Errorf("unable to find target %v", o.Targets)
-		}
-
-		var updated *unstructured.UnstructuredList
-		if len(o.RoleBindingNamespace) > 0 {
-			updatedBindings := &unstructured.UnstructuredList{
-				Object: map[string]interface{}{
-					"kind":       "List",
-					"apiVersion": "v1",
-					"metadata":   map[string]interface{}{},
-				},
-			}
-			for _, binding := range roleBindings {
-				obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(binding.Object())
-				if err != nil {
-					return err
-				}
-				updatedBindings.Items = append(updatedBindings.Items, unstructured.Unstructured{Object: obj})
-			}
-			updated = updatedBindings
-		} else {
-			updatedBindings := &unstructured.UnstructuredList{
-				Object: map[string]interface{}{
-					"kind":       "List",
-					"apiVersion": "v1",
-					"metadata":   map[string]interface{}{},
-				},
-			}
-			for _, binding := range roleBindings {
-				obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(binding.Object())
-				if err != nil {
-					return err
-				}
-				updatedBindings.Items = append(updatedBindings.Items, unstructured.Unstructured{Object: obj})
-			}
-			updated = updatedBindings
-		}
-
-		return p.PrintObj(updated, o.Out)
-	}
-
-	roleToPrint := o.roleObjectToPrint()
-	if o.DryRun {
-		return p.PrintObj(roleToPrint, o.Out)
-	}
-
-	for _, roleBinding := range roleBindings {
-		if len(roleBinding.Subjects()) > 0 || roleBinding.Annotation(rbacv1.AutoUpdateAnnotationKey) == "false" {
-			err = roleBinding.Update()
-		} else {
-			err = roleBinding.Delete()
-		}
-		if err != nil {
-			return err
-		}
-		o.checkRolebindingAutoupdate(roleBinding)
-	}
-	if found == 0 {
-		return fmt.Errorf("unable to find target %v", o.Targets)
-	}
-
-	return p.PrintObj(roleToPrint, o.Out)
-}
-
-func removeSubjects(haystack, needles []rbacv1.Subject) ([]rbacv1.Subject, int) {
-	newSubjects := []rbacv1.Subject{}
-	found := 0
-
-existingLoop:
-	for _, existingSubject := range haystack {
-		for _, toRemove := range needles {
-			if existingSubject.Kind == toRemove.Kind &&
-				existingSubject.Name == toRemove.Name &&
-				existingSubject.Namespace == toRemove.Namespace {
-				found++
-				continue existingLoop
-
-			}
-		}
-
-		newSubjects = append(newSubjects, existingSubject)
-	}
-
-	return newSubjects, found
-}
-
-func getSuccessMessage(dryRun bool, operation string, targets []string) string {
-	allTargets := fmt.Sprintf("%q", targets)
-	if len(targets) == 1 {
-		allTargets = fmt.Sprintf("%q", targets[0])
-	}
-	if dryRun {
-		return fmt.Sprintf("%s: %s (dry run)", operation, allTargets)
-	}
-	return fmt.Sprintf("%s: %s", operation, allTargets)
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/policy/modify_roles_test.go b/vendor/github.com/openshift/oc/pkg/cli/admin/policy/modify_roles_test.go
deleted file mode 100644
index 9a180cb102cc..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/policy/modify_roles_test.go
+++ /dev/null
@@ -1,1360 +0,0 @@
-package policy
-
-import (
-	"fmt"
-	"reflect"
-	"testing"
-
-	corev1 "k8s.io/api/core/v1"
-	rbacv1 "k8s.io/api/rbac/v1"
-	"k8s.io/apimachinery/pkg/api/equality"
-	kapierrors "k8s.io/apimachinery/pkg/api/errors"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	diffutil "k8s.io/apimachinery/pkg/util/diff"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	"k8s.io/cli-runtime/pkg/printers"
-	fakeclient "k8s.io/client-go/kubernetes/fake"
-	rbacv1client "k8s.io/client-go/kubernetes/typed/rbac/v1"
-
-	userv1 "github.com/openshift/api/user/v1"
-	fakeuserclient "github.com/openshift/client-go/user/clientset/versioned/fake"
-)
-
-func TestModifyNamedClusterRoleBinding(t *testing.T) {
-	tests := map[string]struct {
-		action                      string
-		inputRole                   string
-		inputRoleBindingName        string
-		inputSubjects               []string
-		expectedRoleBindingName     string
-		expectedSubjects            []rbacv1.Subject
-		existingClusterRoleBindings *rbacv1.ClusterRoleBindingList
-		expectedRoleBindingList     []string
-	}{
-		// no name provided - create "edit" for role "edit"
-		"create-clusterrolebinding": {
-			action:    "add",
-			inputRole: "edit",
-			inputSubjects: []string{
-				"foo",
-			},
-			expectedRoleBindingName: "edit",
-			expectedSubjects: []rbacv1.Subject{{
-				APIGroup: rbacv1.GroupName,
-				Name:     "foo",
-				Kind:     rbacv1.UserKind,
-			}},
-			existingClusterRoleBindings: &rbacv1.ClusterRoleBindingList{
-				Items: []rbacv1.ClusterRoleBinding{},
-			},
-			expectedRoleBindingList: []string{"edit"},
-		},
-		// name provided - create "custom" for role "edit"
-		"create-named-clusterrolebinding": {
-			action:               "add",
-			inputRole:            "edit",
-			inputRoleBindingName: "custom",
-			inputSubjects: []string{
-				"foo",
-			},
-			expectedRoleBindingName: "custom",
-			expectedSubjects: []rbacv1.Subject{{
-				APIGroup: rbacv1.GroupName,
-				Name:     "foo",
-				Kind:     rbacv1.UserKind,
-			}},
-			existingClusterRoleBindings: &rbacv1.ClusterRoleBindingList{
-				Items: []rbacv1.ClusterRoleBinding{},
-			},
-			expectedRoleBindingList: []string{"custom"},
-		},
-		// name provided - modify "custom"
-		"update-named-clusterrolebinding": {
-			action:               "add",
-			inputRole:            "edit",
-			inputRoleBindingName: "custom",
-			inputSubjects: []string{
-				"baz",
-			},
-			expectedRoleBindingName: "custom",
-			expectedSubjects: []rbacv1.Subject{{
-				APIGroup: rbacv1.GroupName,
-				Name:     "bar",
-				Kind:     rbacv1.UserKind,
-			}, {
-				APIGroup: rbacv1.GroupName,
-				Name:     "baz",
-				Kind:     rbacv1.UserKind,
-			}},
-			existingClusterRoleBindings: &rbacv1.ClusterRoleBindingList{
-				Items: []rbacv1.ClusterRoleBinding{{
-					ObjectMeta: metav1.ObjectMeta{
-						Name: "edit",
-					},
-					Subjects: []rbacv1.Subject{{
-						APIGroup: rbacv1.GroupName,
-						Name:     "foo",
-						Kind:     rbacv1.UserKind,
-					}},
-					RoleRef: rbacv1.RoleRef{
-						Name: "edit",
-						Kind: "ClusterRole",
-					}}, {
-					ObjectMeta: metav1.ObjectMeta{
-						Name: "custom",
-					},
-					Subjects: []rbacv1.Subject{{
-						APIGroup: rbacv1.GroupName,
-						Name:     "bar",
-						Kind:     rbacv1.UserKind,
-					}},
-					RoleRef: rbacv1.RoleRef{
-						Name: "edit",
-						Kind: "ClusterRole",
-					}},
-				},
-			},
-			expectedRoleBindingList: []string{"custom", "edit"},
-		},
-		// name provided - remove from "custom"
-		"remove-named-clusterrolebinding": {
-			action:               "remove",
-			inputRole:            "edit",
-			inputRoleBindingName: "custom",
-			inputSubjects: []string{
-				"baz",
-			},
-			expectedRoleBindingName: "custom",
-			expectedSubjects: []rbacv1.Subject{{
-				APIGroup: rbacv1.GroupName,
-				Name:     "bar",
-				Kind:     rbacv1.UserKind,
-			}},
-			existingClusterRoleBindings: &rbacv1.ClusterRoleBindingList{
-				Items: []rbacv1.ClusterRoleBinding{{
-					ObjectMeta: metav1.ObjectMeta{
-						Name: "edit",
-					},
-					Subjects: []rbacv1.Subject{{
-						APIGroup: rbacv1.GroupName,
-						Name:     "foo",
-						Kind:     rbacv1.UserKind,
-					}},
-					RoleRef: rbacv1.RoleRef{
-						Name: "edit",
-						Kind: "ClusterRole",
-					}}, {
-					ObjectMeta: metav1.ObjectMeta{
-						Name: "custom",
-					},
-					Subjects: []rbacv1.Subject{{
-						APIGroup: rbacv1.GroupName,
-						Name:     "bar",
-						Kind:     rbacv1.UserKind,
-					}, {
-						APIGroup: rbacv1.GroupName,
-						Name:     "baz",
-						Kind:     rbacv1.UserKind,
-					}},
-					RoleRef: rbacv1.RoleRef{
-						Name: "edit",
-						Kind: "ClusterRole",
-					}},
-				},
-			},
-			expectedRoleBindingList: []string{"custom", "edit"},
-		},
-		// no name provided - creates "edit-0"
-		"update-default-clusterrolebinding": {
-			action:    "add",
-			inputRole: "edit",
-			inputSubjects: []string{
-				"baz",
-			},
-			expectedRoleBindingName: "edit-0",
-			expectedSubjects: []rbacv1.Subject{{
-				APIGroup: rbacv1.GroupName,
-				Name:     "baz",
-				Kind:     rbacv1.UserKind,
-			}},
-			existingClusterRoleBindings: &rbacv1.ClusterRoleBindingList{
-				Items: []rbacv1.ClusterRoleBinding{{
-					ObjectMeta: metav1.ObjectMeta{
-						Name: "edit",
-					},
-					Subjects: []rbacv1.Subject{{
-						APIGroup: rbacv1.GroupName,
-						Name:     "foo",
-						Kind:     rbacv1.UserKind,
-					}},
-					RoleRef: rbacv1.RoleRef{
-						Name: "edit",
-						Kind: "ClusterRole",
-					}}, {
-					ObjectMeta: metav1.ObjectMeta{
-						Name: "custom",
-					},
-					Subjects: []rbacv1.Subject{{
-						APIGroup: rbacv1.GroupName,
-						Name:     "bar",
-						Kind:     rbacv1.UserKind,
-					}},
-					RoleRef: rbacv1.RoleRef{
-						Name: "edit",
-						Kind: "ClusterRole",
-					}},
-				},
-			},
-			expectedRoleBindingList: []string{"custom", "edit", "edit-0"},
-		},
-		// no name provided - removes "baz"
-		"remove-default-clusterrolebinding": {
-			action:    "remove",
-			inputRole: "edit",
-			inputSubjects: []string{
-				"baz",
-			},
-			expectedRoleBindingName: "edit",
-			expectedSubjects: []rbacv1.Subject{{
-				APIGroup: rbacv1.GroupName,
-				Name:     "foo",
-				Kind:     rbacv1.UserKind,
-			}},
-			existingClusterRoleBindings: &rbacv1.ClusterRoleBindingList{
-				Items: []rbacv1.ClusterRoleBinding{{
-					ObjectMeta: metav1.ObjectMeta{
-						Name: "edit",
-					},
-					Subjects: []rbacv1.Subject{{
-						APIGroup: rbacv1.GroupName,
-						Name:     "foo",
-						Kind:     rbacv1.UserKind,
-					}, {
-						APIGroup: rbacv1.GroupName,
-						Name:     "baz",
-						Kind:     rbacv1.UserKind,
-					}},
-					RoleRef: rbacv1.RoleRef{
-						Name: "edit",
-						Kind: "ClusterRole",
-					}}, {
-					ObjectMeta: metav1.ObjectMeta{
-						Name: "custom",
-					},
-					Subjects: []rbacv1.Subject{{
-						APIGroup: rbacv1.GroupName,
-						Name:     "bar",
-						Kind:     rbacv1.UserKind,
-					}},
-					RoleRef: rbacv1.RoleRef{
-						Name: "edit",
-						Kind: "ClusterRole",
-					}},
-				},
-			},
-			expectedRoleBindingList: []string{"custom", "edit"},
-		},
-		// name provided - remove from autoupdate protected
-		"remove-from-protected-clusterrolebinding": {
-			action:               "remove",
-			inputRole:            "edit",
-			inputRoleBindingName: "custom",
-			inputSubjects: []string{
-				"bar",
-			},
-			expectedRoleBindingName: "custom",
-			expectedSubjects:        []rbacv1.Subject{},
-			existingClusterRoleBindings: &rbacv1.ClusterRoleBindingList{
-				Items: []rbacv1.ClusterRoleBinding{{
-					ObjectMeta: metav1.ObjectMeta{
-						Annotations: map[string]string{rbacv1.AutoUpdateAnnotationKey: "false"},
-						Name:        "custom",
-					},
-					Subjects: []rbacv1.Subject{{
-						APIGroup: rbacv1.GroupName,
-						Name:     "bar",
-						Kind:     rbacv1.UserKind,
-					}},
-					RoleRef: rbacv1.RoleRef{
-						Name: "edit",
-						Kind: "ClusterRole",
-					}},
-				},
-			},
-			expectedRoleBindingList: []string{"custom"},
-		},
-		// name not provided - do not add duplicate
-		"do-not-add-duplicate-clusterrolebinding": {
-			action:                  "add",
-			inputRole:               "edit",
-			inputSubjects:           []string{"foo"},
-			expectedRoleBindingName: "edit",
-			expectedSubjects: []rbacv1.Subject{{
-				APIGroup: rbacv1.GroupName,
-				Name:     "foo",
-				Kind:     rbacv1.UserKind,
-			}},
-			existingClusterRoleBindings: &rbacv1.ClusterRoleBindingList{
-				Items: []rbacv1.ClusterRoleBinding{{
-					ObjectMeta: metav1.ObjectMeta{
-						Name: "edit",
-					},
-					Subjects: []rbacv1.Subject{{
-						APIGroup: rbacv1.GroupName,
-						Name:     "foo",
-						Kind:     rbacv1.UserKind,
-					}},
-					RoleRef: rbacv1.RoleRef{
-						Name: "edit",
-						Kind: "ClusterRole",
-					}},
-				},
-			},
-			expectedRoleBindingList: []string{"edit"},
-		},
-	}
-	for tcName, tc := range tests {
-		// Set up modifier options and run AddRole()
-		o := &RoleModificationOptions{
-			RoleName:        tc.inputRole,
-			RoleKind:        "ClusterRole",
-			RoleBindingName: tc.inputRoleBindingName,
-			Users:           tc.inputSubjects,
-			RbacClient:      fakeclient.NewSimpleClientset(tc.existingClusterRoleBindings).RbacV1(),
-			PrintFlags:      genericclioptions.NewPrintFlags(""),
-			ToPrinter:       func(string) (printers.ResourcePrinter, error) { return printers.NewDiscardingPrinter(), nil },
-		}
-
-		modifyRoleAndCheck(t, o, tcName, tc.action, tc.expectedRoleBindingName, tc.expectedSubjects, tc.expectedRoleBindingList)
-	}
-}
-
-func TestModifyNamedLocalRoleBinding(t *testing.T) {
-	tests := map[string]struct {
-		action                  string
-		inputRole               string
-		inputRoleBindingName    string
-		inputSubjects           []string
-		expectedRoleBindingName string
-		expectedSubjects        []rbacv1.Subject
-		existingRoleBindings    *rbacv1.RoleBindingList
-		expectedRoleBindingList []string
-	}{
-		// no name provided - create "edit" for role "edit"
-		"create-rolebinding": {
-			action:    "add",
-			inputRole: "edit",
-			inputSubjects: []string{
-				"foo",
-			},
-			expectedRoleBindingName: "edit",
-			expectedSubjects: []rbacv1.Subject{{
-				APIGroup: rbacv1.GroupName,
-				Name:     "foo",
-				Kind:     rbacv1.UserKind,
-			}},
-			existingRoleBindings: &rbacv1.RoleBindingList{
-				Items: []rbacv1.RoleBinding{},
-			},
-			expectedRoleBindingList: []string{"edit"},
-		},
-		// name provided - create "custom" for role "edit"
-		"create-named-binding": {
-			action:               "add",
-			inputRole:            "edit",
-			inputRoleBindingName: "custom",
-			inputSubjects: []string{
-				"foo",
-			},
-			expectedRoleBindingName: "custom",
-			expectedSubjects: []rbacv1.Subject{{
-				APIGroup: rbacv1.GroupName,
-				Name:     "foo",
-				Kind:     rbacv1.UserKind,
-			}},
-			existingRoleBindings: &rbacv1.RoleBindingList{
-				Items: []rbacv1.RoleBinding{},
-			},
-			expectedRoleBindingList: []string{"custom"},
-		},
-		// no name provided - modify "edit"
-		"update-default-binding": {
-			action:    "add",
-			inputRole: "edit",
-			inputSubjects: []string{
-				"baz",
-			},
-			expectedRoleBindingName: "edit-0",
-			expectedSubjects: []rbacv1.Subject{{
-				APIGroup: rbacv1.GroupName,
-				Name:     "baz",
-				Kind:     rbacv1.UserKind,
-			}},
-			existingRoleBindings: &rbacv1.RoleBindingList{
-				Items: []rbacv1.RoleBinding{{
-					ObjectMeta: metav1.ObjectMeta{
-						Name:      "edit",
-						Namespace: metav1.NamespaceDefault,
-					},
-					Subjects: []rbacv1.Subject{{
-						APIGroup: rbacv1.GroupName,
-						Name:     "foo",
-						Kind:     rbacv1.UserKind,
-					}},
-					RoleRef: rbacv1.RoleRef{
-						Name: "edit",
-						Kind: "Role",
-					}}, {
-					ObjectMeta: metav1.ObjectMeta{
-						Name:      "custom",
-						Namespace: metav1.NamespaceDefault,
-					},
-					Subjects: []rbacv1.Subject{{
-						APIGroup: rbacv1.GroupName,
-						Name:     "bar",
-						Kind:     rbacv1.UserKind,
-					}},
-					RoleRef: rbacv1.RoleRef{
-						Name: "edit",
-						Kind: "Role",
-					}},
-				},
-			},
-			expectedRoleBindingList: []string{"custom", "edit", "edit-0"},
-		},
-		// no name provided - remove "bar"
-		"remove-default-binding": {
-			action:    "remove",
-			inputRole: "edit",
-			inputSubjects: []string{
-				"foo",
-			},
-			expectedRoleBindingName: "edit",
-			expectedSubjects: []rbacv1.Subject{{
-				APIGroup: rbacv1.GroupName,
-				Name:     "baz",
-				Kind:     rbacv1.UserKind,
-			}},
-			existingRoleBindings: &rbacv1.RoleBindingList{
-				Items: []rbacv1.RoleBinding{{
-					ObjectMeta: metav1.ObjectMeta{
-						Name:      "edit",
-						Namespace: metav1.NamespaceDefault,
-					},
-					Subjects: []rbacv1.Subject{{
-						APIGroup: rbacv1.GroupName,
-						Name:     "foo",
-						Kind:     rbacv1.UserKind,
-					}, {
-						APIGroup: rbacv1.GroupName,
-						Name:     "baz",
-						Kind:     rbacv1.UserKind,
-					}},
-					RoleRef: rbacv1.RoleRef{
-						Name: "edit",
-						Kind: "Role",
-					}}, {
-					ObjectMeta: metav1.ObjectMeta{
-						Name:      "custom",
-						Namespace: metav1.NamespaceDefault,
-					},
-					Subjects: []rbacv1.Subject{{
-						APIGroup: rbacv1.GroupName,
-						Name:     "bar",
-						Kind:     rbacv1.UserKind,
-					}},
-					RoleRef: rbacv1.RoleRef{
-						Name: "edit",
-						Kind: "Role",
-					}},
-				},
-			},
-			expectedRoleBindingList: []string{"custom", "edit"},
-		},
-		// name provided - modify "custom"
-		"update-named-binding": {
-			action:               "add",
-			inputRole:            "edit",
-			inputRoleBindingName: "custom",
-			inputSubjects: []string{
-				"baz",
-			},
-			expectedRoleBindingName: "custom",
-			expectedSubjects: []rbacv1.Subject{{
-				APIGroup: rbacv1.GroupName,
-				Name:     "bar",
-				Kind:     rbacv1.UserKind,
-			}, {
-				APIGroup: rbacv1.GroupName,
-				Name:     "baz",
-				Kind:     rbacv1.UserKind,
-			}},
-			existingRoleBindings: &rbacv1.RoleBindingList{
-				Items: []rbacv1.RoleBinding{{
-					ObjectMeta: metav1.ObjectMeta{
-						Name:      "edit",
-						Namespace: metav1.NamespaceDefault,
-					},
-					Subjects: []rbacv1.Subject{{
-						APIGroup: rbacv1.GroupName,
-						Name:     "foo",
-						Kind:     rbacv1.UserKind,
-					}},
-					RoleRef: rbacv1.RoleRef{
-						Name: "edit",
-						Kind: "Role",
-					}}, {
-					ObjectMeta: metav1.ObjectMeta{
-						Name:      "custom",
-						Namespace: metav1.NamespaceDefault,
-					},
-					Subjects: []rbacv1.Subject{{
-						APIGroup: rbacv1.GroupName,
-						Name:     "bar",
-						Kind:     rbacv1.UserKind,
-					}},
-					RoleRef: rbacv1.RoleRef{
-						Name: "edit",
-						Kind: "Role",
-					}},
-				},
-			},
-			expectedRoleBindingList: []string{"custom", "edit"},
-		},
-		// name provided - modify "custom"
-		"remove-named-binding": {
-			action:               "remove",
-			inputRole:            "edit",
-			inputRoleBindingName: "custom",
-			inputSubjects: []string{
-				"baz",
-			},
-			expectedRoleBindingName: "custom",
-			expectedSubjects: []rbacv1.Subject{{
-				APIGroup: rbacv1.GroupName,
-				Name:     "bar",
-				Kind:     rbacv1.UserKind,
-			}},
-			existingRoleBindings: &rbacv1.RoleBindingList{
-				Items: []rbacv1.RoleBinding{{
-					ObjectMeta: metav1.ObjectMeta{
-						Name:      "edit",
-						Namespace: metav1.NamespaceDefault,
-					},
-					Subjects: []rbacv1.Subject{{
-						APIGroup: rbacv1.GroupName,
-						Name:     "foo",
-						Kind:     rbacv1.UserKind,
-					}},
-					RoleRef: rbacv1.RoleRef{
-						Name: "edit",
-						Kind: "Role",
-					}}, {
-					ObjectMeta: metav1.ObjectMeta{
-						Name:      "custom",
-						Namespace: metav1.NamespaceDefault,
-					},
-					Subjects: []rbacv1.Subject{{
-						APIGroup: rbacv1.GroupName,
-						Name:     "bar",
-						Kind:     rbacv1.UserKind,
-					}, {
-						APIGroup: rbacv1.GroupName,
-						Name:     "baz",
-						Kind:     rbacv1.UserKind,
-					}},
-					RoleRef: rbacv1.RoleRef{
-						Name: "edit",
-						Kind: "Role",
-					}},
-				},
-			},
-			expectedRoleBindingList: []string{"custom", "edit"},
-		},
-	}
-	for tcName, tc := range tests {
-		// Set up modifier options and run AddRole()
-		o := &RoleModificationOptions{
-			RoleBindingNamespace: metav1.NamespaceDefault,
-			RoleBindingName:      tc.inputRoleBindingName,
-			RoleKind:             "Role",
-			RoleName:             tc.inputRole,
-			RbacClient:           fakeclient.NewSimpleClientset(tc.existingRoleBindings).RbacV1(),
-			Users:                tc.inputSubjects,
-			PrintFlags:           genericclioptions.NewPrintFlags(""),
-			ToPrinter:            func(string) (printers.ResourcePrinter, error) { return printers.NewDiscardingPrinter(), nil },
-		}
-
-		modifyRoleAndCheck(t, o, tcName, tc.action, tc.expectedRoleBindingName, tc.expectedSubjects, tc.expectedRoleBindingList)
-	}
-}
-func TestModifyRoleBindingWarnings(t *testing.T) {
-	type clusterState struct {
-		roles               *rbacv1.RoleList
-		clusterRoles        *rbacv1.ClusterRoleList
-		roleBindings        *rbacv1.RoleBindingList
-		clusterRoleBindings *rbacv1.ClusterRoleBindingList
-
-		users           *userv1.UserList
-		groups          *userv1.GroupList
-		serviceAccounts *corev1.ServiceAccountList
-	}
-	type cmdInputs struct {
-		roleName        string
-		roleKind        string
-		roleBindingName string
-		roleNamespace   string
-		userNames       []string
-		groupNames      []string
-		serviceAccounts []rbacv1.Subject
-	}
-	type cmdOutputs struct {
-		warnings []string
-	}
-	const (
-		currentNamespace = "ns-0"
-
-		existingRoleName                  = "existing-role-0"
-		existingRoleBindingName           = "existing-rolebinding-0"
-		existingNamespacedRoleBindingName = "existing-namespaced-rolebinding-0"
-		existingClusterRoleBindingName    = "existing-clusterrolebinding-0"
-		existingClusterRoleName           = "existing-clusterrole-0"
-		existingUserName                  = "existing-user-0"
-		existingGroupName                 = "existing-group-0"
-		existingServiceAccountName        = "existing-serviceaccount-0"
-
-		boundRoleName           = "bound-role-0"
-		boundUserName           = "bound-user-0"
-		boundGroupName          = "bound-group-0"
-		boundServiceAccountName = "bound-serviceaccount-0"
-
-		newRoleName               = "tbd-role-0"
-		newClusterRoleName        = "tbd-clusterrole-0"
-		newRoleBindingName        = "tbd-rolebinding-0"
-		newClusterRoleBindingName = "tbd-clusterrolebinding-0"
-		newUserName               = "tbd-user-0"
-		newGroupName              = "tbd-group-0"
-		newServiceAccountName     = "tbd-serviceaccount-0"
-
-		roleNotFoundWarning           = "Warning: role 'tbd-role-0' not found\n"
-		clusterRoleNotFoundWarning    = "Warning: role 'tbd-clusterrole-0' not found\n"
-		userNotFoundWarning           = "Warning: User 'tbd-user-0' not found\n"
-		groupNotFoundWarning          = "Warning: Group 'tbd-group-0' not found\n"
-		serviceAccountNotFoundWarning = "Warning: ServiceAccount 'tbd-serviceaccount-0' not found\n"
-	)
-	var (
-		boundSubjects = []rbacv1.Subject{
-			{APIGroup: rbacv1.GroupName, Kind: rbacv1.UserKind, Name: boundUserName},
-			{APIGroup: rbacv1.GroupName, Kind: rbacv1.GroupKind, Name: boundGroupName},
-			{APIGroup: rbacv1.GroupName, Kind: rbacv1.ServiceAccountKind, Name: boundServiceAccountName},
-		}
-		existingUser = userv1.User{
-			ObjectMeta: metav1.ObjectMeta{Name: existingUserName},
-		}
-		existingGroup = userv1.Group{
-			ObjectMeta: metav1.ObjectMeta{Name: existingGroupName},
-		}
-		existingServiceAccount = corev1.ServiceAccount{
-			ObjectMeta: metav1.ObjectMeta{Name: existingServiceAccountName, Namespace: currentNamespace},
-		}
-		existingRole = rbacv1.Role{
-			ObjectMeta: metav1.ObjectMeta{Name: existingRoleName, Namespace: currentNamespace},
-		}
-		boundRole = rbacv1.Role{
-			ObjectMeta: metav1.ObjectMeta{Name: boundRoleName, Namespace: currentNamespace},
-		}
-		existingClusterRole = rbacv1.ClusterRole{
-			ObjectMeta: metav1.ObjectMeta{Name: existingClusterRoleName},
-		}
-		existingRoleBinding = rbacv1.RoleBinding{
-			ObjectMeta: metav1.ObjectMeta{Name: existingRoleBindingName, Namespace: currentNamespace},
-			Subjects:   boundSubjects,
-			RoleRef:    rbacv1.RoleRef{APIGroup: rbacv1.GroupName, Name: existingClusterRoleName, Kind: "ClusterRole"},
-		}
-		existingNamespacedRoleBinding = rbacv1.RoleBinding{
-			ObjectMeta: metav1.ObjectMeta{Name: existingNamespacedRoleBindingName, Namespace: currentNamespace},
-			Subjects:   boundSubjects,
-			RoleRef:    rbacv1.RoleRef{APIGroup: rbacv1.GroupName, Name: boundRoleName, Kind: "Role"},
-		}
-		existingClusterRoleBinding = rbacv1.ClusterRoleBinding{
-			ObjectMeta: metav1.ObjectMeta{Name: existingClusterRoleBindingName},
-			Subjects:   boundSubjects,
-			RoleRef:    rbacv1.RoleRef{APIGroup: rbacv1.GroupName, Name: existingClusterRoleName, Kind: "ClusterRole"},
-		}
-		defaultInitialState = clusterState{
-			roles: &rbacv1.RoleList{
-				Items: []rbacv1.Role{existingRole},
-			},
-			clusterRoles: &rbacv1.ClusterRoleList{
-				Items: []rbacv1.ClusterRole{existingClusterRole},
-			},
-			roleBindings: &rbacv1.RoleBindingList{
-				Items: []rbacv1.RoleBinding{existingRoleBinding, existingNamespacedRoleBinding},
-			},
-			clusterRoleBindings: &rbacv1.ClusterRoleBindingList{
-				Items: []rbacv1.ClusterRoleBinding{existingClusterRoleBinding},
-			},
-			users: &userv1.UserList{
-				Items: []userv1.User{existingUser},
-			},
-			groups: &userv1.GroupList{
-				Items: []userv1.Group{existingGroup},
-			},
-			serviceAccounts: &corev1.ServiceAccountList{
-				Items: []corev1.ServiceAccount{existingServiceAccount},
-			},
-		}
-	)
-	tests := []struct {
-		name    string
-		subtest string
-
-		initialState clusterState
-		inputs       cmdInputs
-
-		expectedOutputs cmdOutputs
-		expectedState   clusterState
-	}{
-		{
-			name:         "add-role-to-user",
-			subtest:      "no-warnings-needed",
-			initialState: defaultInitialState,
-			inputs: cmdInputs{
-				roleKind:        "Role",
-				roleName:        existingRoleName,
-				roleBindingName: newRoleBindingName,
-				roleNamespace:   currentNamespace,
-				userNames:       []string{existingUserName},
-				serviceAccounts: []rbacv1.Subject{},
-				groupNames:      []string{},
-			},
-			expectedOutputs: cmdOutputs{
-				warnings: []string{},
-			},
-			expectedState: clusterState{
-				roleBindings: &rbacv1.RoleBindingList{
-					Items: []rbacv1.RoleBinding{
-						existingRoleBinding,
-						existingNamespacedRoleBinding,
-						{
-							ObjectMeta: metav1.ObjectMeta{
-								Name: newRoleBindingName, Namespace: currentNamespace},
-							Subjects: []rbacv1.Subject{
-								{APIGroup: rbacv1.GroupName, Kind: rbacv1.UserKind, Name: existingUserName}},
-							RoleRef: rbacv1.RoleRef{
-								APIGroup: rbacv1.GroupName, Kind: "Role", Name: existingRoleName},
-						},
-					},
-				},
-				clusterRoleBindings: &rbacv1.ClusterRoleBindingList{
-					Items: []rbacv1.ClusterRoleBinding{existingClusterRoleBinding},
-				},
-			},
-		},
-		{
-			name:         "add-role-to-user",
-			subtest:      "role-not-found-warning",
-			initialState: defaultInitialState,
-			inputs: cmdInputs{
-				roleKind:        "Role",
-				roleName:        newRoleName,
-				roleBindingName: newRoleBindingName,
-				roleNamespace:   currentNamespace,
-				userNames:       []string{existingUserName},
-				serviceAccounts: []rbacv1.Subject{},
-				groupNames:      []string{},
-			},
-			expectedOutputs: cmdOutputs{
-				warnings: []string{roleNotFoundWarning},
-			},
-			expectedState: clusterState{
-				roleBindings: &rbacv1.RoleBindingList{
-					Items: []rbacv1.RoleBinding{
-						existingRoleBinding,
-						existingNamespacedRoleBinding,
-						{
-							ObjectMeta: metav1.ObjectMeta{
-								Name: newRoleBindingName, Namespace: currentNamespace},
-							Subjects: []rbacv1.Subject{
-								{APIGroup: rbacv1.GroupName, Kind: rbacv1.UserKind, Name: existingUserName}},
-							RoleRef: rbacv1.RoleRef{
-								APIGroup: rbacv1.GroupName, Kind: "Role", Name: newRoleName},
-						},
-					},
-				},
-				clusterRoleBindings: &rbacv1.ClusterRoleBindingList{
-					Items: []rbacv1.ClusterRoleBinding{existingClusterRoleBinding},
-				},
-			},
-		},
-		{
-			name:    "add-role-to-user",
-			subtest: "user-not-found-warning",
-			initialState: clusterState{
-				roles: &rbacv1.RoleList{
-					Items: []rbacv1.Role{existingRole, boundRole},
-				},
-				clusterRoles: &rbacv1.ClusterRoleList{
-					Items: []rbacv1.ClusterRole{existingClusterRole},
-				},
-				roleBindings: &rbacv1.RoleBindingList{
-					Items: []rbacv1.RoleBinding{existingRoleBinding, existingNamespacedRoleBinding},
-				},
-				clusterRoleBindings: &rbacv1.ClusterRoleBindingList{
-					Items: []rbacv1.ClusterRoleBinding{existingClusterRoleBinding},
-				},
-				users: &userv1.UserList{
-					Items: []userv1.User{existingUser},
-				},
-				groups: &userv1.GroupList{
-					Items: []userv1.Group{existingGroup},
-				},
-				serviceAccounts: &corev1.ServiceAccountList{
-					Items: []corev1.ServiceAccount{existingServiceAccount},
-				},
-			},
-			inputs: cmdInputs{
-				roleKind:        "Role",
-				roleName:        boundRoleName,
-				roleBindingName: existingNamespacedRoleBindingName,
-				roleNamespace:   currentNamespace,
-				userNames:       []string{boundUserName, newUserName},
-				serviceAccounts: []rbacv1.Subject{},
-				groupNames:      []string{},
-			},
-			expectedOutputs: cmdOutputs{
-				warnings: []string{userNotFoundWarning},
-			},
-			expectedState: clusterState{
-				roleBindings: &rbacv1.RoleBindingList{
-					Items: []rbacv1.RoleBinding{
-						existingRoleBinding,
-						{
-							ObjectMeta: existingNamespacedRoleBinding.ObjectMeta,
-							Subjects: append(existingNamespacedRoleBinding.Subjects,
-								rbacv1.Subject{APIGroup: rbacv1.GroupName, Kind: rbacv1.UserKind, Name: newUserName}),
-							RoleRef: existingNamespacedRoleBinding.RoleRef,
-						},
-					},
-				},
-				clusterRoleBindings: &rbacv1.ClusterRoleBindingList{
-					Items: []rbacv1.ClusterRoleBinding{existingClusterRoleBinding},
-				},
-			},
-		},
-		{
-			name:    "add-role-to-user",
-			subtest: "serviceaccount-not-found-warning",
-			initialState: clusterState{
-				roles: &rbacv1.RoleList{
-					Items: []rbacv1.Role{existingRole, boundRole},
-				},
-				clusterRoles: &rbacv1.ClusterRoleList{
-					Items: []rbacv1.ClusterRole{existingClusterRole},
-				},
-				roleBindings: &rbacv1.RoleBindingList{
-					Items: []rbacv1.RoleBinding{existingRoleBinding, existingNamespacedRoleBinding},
-				},
-				clusterRoleBindings: &rbacv1.ClusterRoleBindingList{
-					Items: []rbacv1.ClusterRoleBinding{existingClusterRoleBinding},
-				},
-				users: &userv1.UserList{
-					Items: []userv1.User{existingUser},
-				},
-				groups: &userv1.GroupList{
-					Items: []userv1.Group{existingGroup},
-				},
-				serviceAccounts: &corev1.ServiceAccountList{
-					Items: []corev1.ServiceAccount{existingServiceAccount},
-				},
-			},
-			inputs: cmdInputs{
-				roleKind:        "Role",
-				roleName:        boundRoleName,
-				roleBindingName: existingNamespacedRoleBindingName,
-				roleNamespace:   currentNamespace,
-				userNames:       []string{boundUserName},
-				serviceAccounts: []rbacv1.Subject{
-					{APIGroup: rbacv1.GroupName, Kind: rbacv1.ServiceAccountKind, Name: newServiceAccountName},
-				},
-				groupNames: []string{},
-			},
-			expectedOutputs: cmdOutputs{
-				warnings: []string{serviceAccountNotFoundWarning},
-			},
-			expectedState: clusterState{
-				roleBindings: &rbacv1.RoleBindingList{
-					Items: []rbacv1.RoleBinding{
-						existingRoleBinding,
-						{
-							ObjectMeta: existingNamespacedRoleBinding.ObjectMeta,
-							Subjects: append(existingNamespacedRoleBinding.Subjects,
-								rbacv1.Subject{APIGroup: rbacv1.GroupName, Kind: rbacv1.ServiceAccountKind, Name: newServiceAccountName}),
-							RoleRef: existingNamespacedRoleBinding.RoleRef,
-						},
-					},
-				},
-				clusterRoleBindings: &rbacv1.ClusterRoleBindingList{
-					Items: []rbacv1.ClusterRoleBinding{existingClusterRoleBinding},
-				},
-			},
-		},
-		{
-			name:         "add-role-to-group",
-			subtest:      "no-warning-needed",
-			initialState: defaultInitialState,
-			inputs: cmdInputs{
-				roleKind:        "Role",
-				roleName:        existingRoleName,
-				roleBindingName: newRoleBindingName,
-				roleNamespace:   currentNamespace,
-				userNames:       []string{},
-				serviceAccounts: []rbacv1.Subject{},
-				groupNames:      []string{existingGroupName},
-			},
-			expectedOutputs: cmdOutputs{
-				warnings: []string{},
-			},
-			expectedState: clusterState{
-				roleBindings: &rbacv1.RoleBindingList{
-					Items: []rbacv1.RoleBinding{
-						existingRoleBinding,
-						existingNamespacedRoleBinding,
-						{
-							ObjectMeta: metav1.ObjectMeta{
-								Name: newRoleBindingName, Namespace: currentNamespace},
-							Subjects: []rbacv1.Subject{
-								{APIGroup: rbacv1.GroupName, Kind: rbacv1.GroupKind, Name: existingGroupName}},
-							RoleRef: rbacv1.RoleRef{
-								APIGroup: rbacv1.GroupName, Kind: "Role", Name: existingRoleName},
-						},
-					},
-				},
-				clusterRoleBindings: &rbacv1.ClusterRoleBindingList{
-					Items: []rbacv1.ClusterRoleBinding{existingClusterRoleBinding},
-				},
-			},
-		},
-		{
-			name:    "add-role-to-group",
-			subtest: "group-not-found-warning",
-			initialState: clusterState{
-				roles: &rbacv1.RoleList{
-					Items: []rbacv1.Role{existingRole, boundRole},
-				},
-				clusterRoles: &rbacv1.ClusterRoleList{
-					Items: []rbacv1.ClusterRole{existingClusterRole},
-				},
-				roleBindings: &rbacv1.RoleBindingList{
-					Items: []rbacv1.RoleBinding{existingRoleBinding, existingNamespacedRoleBinding},
-				},
-				clusterRoleBindings: &rbacv1.ClusterRoleBindingList{
-					Items: []rbacv1.ClusterRoleBinding{existingClusterRoleBinding},
-				},
-				users: &userv1.UserList{
-					Items: []userv1.User{existingUser},
-				},
-				groups: &userv1.GroupList{
-					Items: []userv1.Group{existingGroup},
-				},
-				serviceAccounts: &corev1.ServiceAccountList{
-					Items: []corev1.ServiceAccount{existingServiceAccount},
-				},
-			},
-			inputs: cmdInputs{
-				roleKind:        "Role",
-				roleName:        boundRoleName,
-				roleBindingName: existingNamespacedRoleBindingName,
-				roleNamespace:   currentNamespace,
-				userNames:       []string{boundUserName},
-				serviceAccounts: []rbacv1.Subject{},
-				groupNames:      []string{boundGroupName, newGroupName},
-			},
-			expectedOutputs: cmdOutputs{
-				warnings: []string{groupNotFoundWarning},
-			},
-			expectedState: clusterState{
-				roleBindings: &rbacv1.RoleBindingList{
-					Items: []rbacv1.RoleBinding{
-						existingRoleBinding,
-						{
-							ObjectMeta: existingNamespacedRoleBinding.ObjectMeta,
-							Subjects: append(existingNamespacedRoleBinding.Subjects,
-								rbacv1.Subject{APIGroup: rbacv1.GroupName, Kind: rbacv1.GroupKind, Name: newGroupName}),
-							RoleRef: existingNamespacedRoleBinding.RoleRef,
-						},
-					},
-				},
-				clusterRoleBindings: &rbacv1.ClusterRoleBindingList{
-					Items: []rbacv1.ClusterRoleBinding{existingClusterRoleBinding},
-				},
-			},
-		},
-		{
-			name:         "add-cluster-role-to-user",
-			subtest:      "no-warnings-needed",
-			initialState: defaultInitialState,
-			inputs: cmdInputs{
-				roleKind:        "ClusterRole",
-				roleName:        existingClusterRoleName,
-				roleBindingName: newClusterRoleBindingName,
-				roleNamespace:   "",
-				userNames:       []string{existingUserName},
-				serviceAccounts: []rbacv1.Subject{},
-				groupNames:      []string{},
-			},
-			expectedOutputs: cmdOutputs{
-				warnings: []string{},
-			},
-			expectedState: clusterState{
-				roleBindings: &rbacv1.RoleBindingList{
-					Items: []rbacv1.RoleBinding{existingRoleBinding, existingNamespacedRoleBinding},
-				},
-				clusterRoleBindings: &rbacv1.ClusterRoleBindingList{
-					Items: []rbacv1.ClusterRoleBinding{
-						existingClusterRoleBinding,
-						{
-							ObjectMeta: metav1.ObjectMeta{
-								Name: newClusterRoleBindingName},
-							Subjects: []rbacv1.Subject{
-								{APIGroup: rbacv1.GroupName, Kind: rbacv1.UserKind, Name: existingUserName}},
-							RoleRef: rbacv1.RoleRef{
-								APIGroup: rbacv1.GroupName, Kind: "ClusterRole", Name: existingClusterRoleName},
-						},
-					},
-				},
-			},
-		},
-		{
-			name:         "add-cluster-role-to-user",
-			subtest:      "role-not-found-warning",
-			initialState: defaultInitialState,
-			inputs: cmdInputs{
-				roleKind:        "ClusterRole",
-				roleName:        newClusterRoleName,
-				roleBindingName: newClusterRoleBindingName,
-				userNames:       []string{existingUserName},
-				serviceAccounts: []rbacv1.Subject{},
-				groupNames:      []string{},
-			},
-			expectedOutputs: cmdOutputs{
-				warnings: []string{clusterRoleNotFoundWarning},
-			},
-			expectedState: clusterState{
-				roleBindings: &rbacv1.RoleBindingList{
-					Items: []rbacv1.RoleBinding{existingRoleBinding, existingNamespacedRoleBinding},
-				},
-				clusterRoleBindings: &rbacv1.ClusterRoleBindingList{
-					Items: []rbacv1.ClusterRoleBinding{
-						existingClusterRoleBinding,
-						{
-							ObjectMeta: metav1.ObjectMeta{
-								Name: newClusterRoleBindingName},
-							Subjects: []rbacv1.Subject{
-								{APIGroup: rbacv1.GroupName, Kind: rbacv1.UserKind, Name: existingUserName}},
-							RoleRef: rbacv1.RoleRef{
-								APIGroup: rbacv1.GroupName, Kind: "ClusterRole", Name: newClusterRoleName},
-						},
-					},
-				},
-			},
-		},
-		{
-			name:         "add-cluster-role-to-user",
-			subtest:      "user-not-found-warning",
-			initialState: defaultInitialState,
-			inputs: cmdInputs{
-				roleKind:        "ClusterRole",
-				roleName:        existingClusterRoleName,
-				roleBindingName: existingClusterRoleBindingName,
-				userNames:       []string{boundUserName, newUserName},
-				serviceAccounts: []rbacv1.Subject{},
-				groupNames:      []string{},
-			},
-			expectedOutputs: cmdOutputs{
-				warnings: []string{userNotFoundWarning},
-			},
-			expectedState: clusterState{
-				roleBindings: &rbacv1.RoleBindingList{
-					Items: []rbacv1.RoleBinding{existingRoleBinding, existingNamespacedRoleBinding},
-				},
-				clusterRoleBindings: &rbacv1.ClusterRoleBindingList{
-					Items: []rbacv1.ClusterRoleBinding{
-						existingClusterRoleBinding,
-						{
-							ObjectMeta: metav1.ObjectMeta{
-								Name: existingClusterRoleBindingName},
-							Subjects: append(existingClusterRoleBinding.Subjects,
-								rbacv1.Subject{APIGroup: rbacv1.GroupName, Kind: rbacv1.UserKind, Name: newUserName}),
-							RoleRef: rbacv1.RoleRef{
-								APIGroup: rbacv1.GroupName, Kind: "ClusterRole", Name: existingClusterRoleName},
-						},
-					},
-				},
-			},
-		},
-		{
-			name:         "add-cluster-role-to-user",
-			subtest:      "serviceaccount-not-found-warning",
-			initialState: defaultInitialState,
-			inputs: cmdInputs{
-				roleKind:        "ClusterRole",
-				roleName:        existingClusterRoleName,
-				roleBindingName: existingClusterRoleBindingName,
-				userNames:       []string{},
-				serviceAccounts: []rbacv1.Subject{
-					{APIGroup: rbacv1.GroupName, Kind: rbacv1.ServiceAccountKind, Name: newServiceAccountName},
-					{APIGroup: rbacv1.GroupName, Kind: rbacv1.ServiceAccountKind, Name: boundServiceAccountName},
-				},
-				groupNames: []string{},
-			},
-			expectedOutputs: cmdOutputs{
-				warnings: []string{serviceAccountNotFoundWarning},
-			},
-			expectedState: clusterState{
-				roleBindings: &rbacv1.RoleBindingList{
-					Items: []rbacv1.RoleBinding{existingRoleBinding, existingNamespacedRoleBinding},
-				},
-				clusterRoleBindings: &rbacv1.ClusterRoleBindingList{
-					Items: []rbacv1.ClusterRoleBinding{
-						{
-							ObjectMeta: metav1.ObjectMeta{
-								Name: existingClusterRoleBindingName},
-							Subjects: append(existingClusterRoleBinding.Subjects,
-								rbacv1.Subject{APIGroup: rbacv1.GroupName, Kind: rbacv1.ServiceAccountKind, Name: newServiceAccountName}),
-							RoleRef: rbacv1.RoleRef{
-								APIGroup: rbacv1.GroupName, Kind: "ClusterRole", Name: existingClusterRoleName},
-						},
-					},
-				},
-			},
-		},
-		{
-			name:         "add-cluster-role-to-group",
-			subtest:      "no-warning-needed",
-			initialState: defaultInitialState,
-			inputs: cmdInputs{
-				roleKind:        "ClusterRole",
-				roleName:        existingClusterRoleName,
-				roleBindingName: newClusterRoleBindingName,
-				userNames:       []string{},
-				serviceAccounts: []rbacv1.Subject{},
-				groupNames:      []string{existingGroupName},
-			},
-			expectedOutputs: cmdOutputs{
-				warnings: []string{},
-			},
-			expectedState: clusterState{
-				roleBindings: &rbacv1.RoleBindingList{
-					Items: []rbacv1.RoleBinding{existingRoleBinding, existingNamespacedRoleBinding},
-				},
-				clusterRoleBindings: &rbacv1.ClusterRoleBindingList{
-					Items: []rbacv1.ClusterRoleBinding{
-						existingClusterRoleBinding,
-						{
-							ObjectMeta: metav1.ObjectMeta{
-								Name: newClusterRoleBindingName},
-							Subjects: []rbacv1.Subject{
-								{APIGroup: rbacv1.GroupName, Kind: rbacv1.GroupKind, Name: existingGroupName}},
-							RoleRef: rbacv1.RoleRef{
-								APIGroup: rbacv1.GroupName, Kind: "ClusterRole", Name: existingClusterRoleName},
-						},
-					},
-				},
-			},
-		},
-		{
-			name:         "add-cluster-role-to-group",
-			subtest:      "group-not-found-warning",
-			initialState: defaultInitialState,
-			inputs: cmdInputs{
-				roleKind:        "ClusterRole",
-				roleName:        existingClusterRoleName,
-				roleBindingName: existingClusterRoleBindingName,
-				userNames:       []string{boundUserName},
-				serviceAccounts: []rbacv1.Subject{},
-				groupNames:      []string{boundGroupName, newGroupName},
-			},
-			expectedOutputs: cmdOutputs{
-				warnings: []string{groupNotFoundWarning},
-			},
-			expectedState: clusterState{
-				roleBindings: &rbacv1.RoleBindingList{
-					Items: []rbacv1.RoleBinding{existingRoleBinding, existingNamespacedRoleBinding},
-				},
-				clusterRoleBindings: &rbacv1.ClusterRoleBindingList{
-					Items: []rbacv1.ClusterRoleBinding{
-						{
-							ObjectMeta: metav1.ObjectMeta{
-								Name: existingClusterRoleBindingName},
-							Subjects: append(existingClusterRoleBinding.Subjects,
-								rbacv1.Subject{APIGroup: rbacv1.GroupName, Kind: rbacv1.GroupKind, Name: newGroupName}),
-							RoleRef: rbacv1.RoleRef{
-								APIGroup: rbacv1.GroupName, Kind: "ClusterRole", Name: existingClusterRoleName},
-						},
-					},
-				},
-			},
-		},
-	}
-
-	for _, tt := range tests {
-		// Set up modifier options and run AddRole()
-		t.Run(tt.name+":"+tt.subtest, func(t *testing.T) {
-			expectedWarnings := map[string]string{}
-			for _, warning := range tt.expectedOutputs.warnings {
-				expectedWarnings[warning] = warning
-			}
-			o := &RoleModificationOptions{
-				RoleBindingNamespace: tt.inputs.roleNamespace,
-				RoleBindingName:      tt.inputs.roleBindingName,
-				RoleKind:             tt.inputs.roleKind,
-				RoleName:             tt.inputs.roleName,
-				RbacClient:           fakeclient.NewSimpleClientset(tt.initialState.roles, tt.initialState.clusterRoles, tt.initialState.roleBindings, tt.initialState.clusterRoleBindings).RbacV1(),
-				Users:                tt.inputs.userNames,
-				Groups:               tt.inputs.groupNames,
-				Subjects:             tt.inputs.serviceAccounts,
-				UserClient:           fakeuserclient.NewSimpleClientset(tt.initialState.users, tt.initialState.groups).UserV1(),
-				ServiceAccountClient: fakeclient.NewSimpleClientset(tt.initialState.serviceAccounts).CoreV1(),
-				PrintFlags:           genericclioptions.NewPrintFlags(""),
-				ToPrinter:            func(string) (printers.ResourcePrinter, error) { return printers.NewDiscardingPrinter(), nil },
-				PrintErrf: func(format string, args ...interface{}) {
-					actualWarning := fmt.Sprintf(format, args...)
-					if _, ok := expectedWarnings[actualWarning]; !ok {
-						t.Errorf("unexpected warning: '%s'", actualWarning)
-					}
-					delete(expectedWarnings, actualWarning)
-				},
-			}
-			err := o.AddRole()
-			if err != nil {
-				t.Errorf("unexpected error: %v", err)
-			}
-
-			rbs, err := o.RbacClient.RoleBindings(tt.inputs.roleNamespace).List(metav1.ListOptions{})
-			if err != nil {
-				t.Errorf("unexpected error fetching rolebindings: %v", err)
-			}
-			expectedRoleBindings := map[string]rbacv1.RoleBinding{}
-			if tt.expectedState.roleBindings != nil {
-				for _, expected := range tt.expectedState.roleBindings.Items {
-					expectedRoleBindings[expected.ObjectMeta.Name] = expected
-				}
-			}
-			for _, found := range rbs.Items {
-				expected, ok := expectedRoleBindings[found.ObjectMeta.Name]
-				if !ok {
-					t.Errorf("unexpected rolebinding: %v", found.ObjectMeta.Name)
-				}
-				compareResources(t, expected, found)
-				delete(expectedRoleBindings, found.ObjectMeta.Name)
-			}
-			for missing := range expectedRoleBindings {
-				t.Errorf("missing rolebinding: %s", missing)
-			}
-
-			crbs, err := o.RbacClient.ClusterRoleBindings().List(metav1.ListOptions{})
-			if err != nil {
-				t.Errorf("unexpected error fetching clusterrolebindings: %v", err)
-			}
-			expectedClusterRoleBindings := map[string]rbacv1.ClusterRoleBinding{}
-			if tt.expectedState.clusterRoleBindings != nil {
-				for _, expected := range tt.expectedState.clusterRoleBindings.Items {
-					expectedClusterRoleBindings[expected.ObjectMeta.Name] = expected
-				}
-			}
-			for _, found := range crbs.Items {
-				expected, ok := expectedClusterRoleBindings[found.ObjectMeta.Name]
-				if !ok {
-					t.Errorf("unexpected clusterrolebinding: %v", found.ObjectMeta.Name)
-				}
-				compareResources(t, expected, found)
-				delete(expectedClusterRoleBindings, found.ObjectMeta.Name)
-			}
-			for missing := range expectedClusterRoleBindings {
-				t.Errorf("missing clusterrolebinding: %s", missing)
-			}
-			for warning := range expectedWarnings {
-				t.Errorf("missing warning: '%s'", warning)
-			}
-		})
-	}
-}
-
-// compareResources compares resource equality then prints a diff for easier debugging
-func compareResources(t *testing.T, expected, actual interface{}) {
-	if eq := equality.Semantic.DeepEqual(expected, actual); !eq {
-		t.Errorf("Resource does not match expected value: %s",
-			diffutil.ObjectDiff(expected, actual))
-	}
-}
-func getRoleBindingAbstractionsList(rbacClient rbacv1client.RbacV1Interface, namespace string) ([]*roleBindingAbstraction, error) {
-	ret := make([]*roleBindingAbstraction, 0)
-	// see if we can find an existing binding that points to the role in question.
-	if len(namespace) > 0 {
-		roleBindings, err := rbacClient.RoleBindings(namespace).List(metav1.ListOptions{})
-		if err != nil && !kapierrors.IsNotFound(err) {
-			return nil, err
-		}
-		for i := range roleBindings.Items {
-			// shallow copy outside of the loop so that we can take its address
-			roleBinding := roleBindings.Items[i]
-			ret = append(ret, &roleBindingAbstraction{rbacClient: rbacClient, roleBinding: &roleBinding})
-		}
-	} else {
-		clusterRoleBindings, err := rbacClient.ClusterRoleBindings().List(metav1.ListOptions{})
-		if err != nil && !kapierrors.IsNotFound(err) {
-			return nil, err
-		}
-		for i := range clusterRoleBindings.Items {
-			// shallow copy outside of the loop so that we can take its address
-			clusterRoleBinding := clusterRoleBindings.Items[i]
-			ret = append(ret, &roleBindingAbstraction{rbacClient: rbacClient, clusterRoleBinding: &clusterRoleBinding})
-		}
-	}
-
-	return ret, nil
-}
-func modifyRoleAndCheck(t *testing.T, o *RoleModificationOptions, tcName, action string, expectedName string, expectedSubjects []rbacv1.Subject,
-	expectedBindings []string) {
-	var err error
-	switch action {
-	case "add":
-		err = o.AddRole()
-	case "remove":
-		err = o.RemoveRole()
-	default:
-		err = fmt.Errorf("Invalid action %s", action)
-	}
-	if err != nil {
-		t.Errorf("%s: unexpected err %v", tcName, err)
-	}
-
-	roleBinding, err := getRoleBindingAbstraction(o.RbacClient, expectedName, o.RoleBindingNamespace)
-	if err != nil {
-		t.Errorf("%s: err fetching roleBinding %s, %s", tcName, expectedName, err)
-	}
-
-	if !reflect.DeepEqual(expectedSubjects, roleBinding.Subjects()) {
-		t.Errorf("%s: err expected users: %v, actual: %v", tcName, expectedSubjects, roleBinding.Subjects())
-	}
-
-	roleBindings, err := getRoleBindingAbstractionsList(o.RbacClient, o.RoleBindingNamespace)
-	foundBindings := make([]string, len(expectedBindings))
-	for _, roleBinding := range roleBindings {
-		var foundBinding string
-		for i := range expectedBindings {
-			if expectedBindings[i] == roleBinding.Name() {
-				foundBindings[i] = roleBinding.Name()
-				foundBinding = roleBinding.Name()
-				break
-			}
-		}
-		if len(foundBinding) == 0 {
-			t.Errorf("%s: found unexpected binding %q", tcName, roleBinding.Name())
-		}
-	}
-	if !reflect.DeepEqual(expectedBindings, foundBindings) {
-		t.Errorf("%s: err expected bindings: %v, actual: %v", tcName, expectedBindings, foundBindings)
-	}
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/policy/modify_scc.go b/vendor/github.com/openshift/oc/pkg/cli/admin/policy/modify_scc.go
deleted file mode 100644
index c3be62ff99fd..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/policy/modify_scc.go
+++ /dev/null
@@ -1,329 +0,0 @@
-package policy
-
-import (
-	"errors"
-	"fmt"
-
-	"github.com/spf13/cobra"
-
-	corev1 "k8s.io/api/core/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	"k8s.io/cli-runtime/pkg/printers"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/scheme"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-
-	securityv1typedclient "github.com/openshift/client-go/security/clientset/versioned/typed/security/v1"
-)
-
-const (
-	AddSCCToGroupRecommendedName      = "add-scc-to-group"
-	AddSCCToUserRecommendedName       = "add-scc-to-user"
-	RemoveSCCFromGroupRecommendedName = "remove-scc-from-group"
-	RemoveSCCFromUserRecommendedName  = "remove-scc-from-user"
-)
-
-var (
-	addSCCToUserExample = templates.Examples(`
-		# Add the 'restricted' security context contraint to user1 and user2
-		%[1]s restricted user1 user2
-
-		# Add the 'privileged' security context contraint to the service account serviceaccount1 in the current namespace
-		%[1]s privileged -z serviceaccount1`)
-
-	addSCCToGroupExample = templates.Examples(`
-		# Add the 'restricted' security context contraint to group1 and group2
-		%[1]s restricted group1 group2`)
-)
-
-type SCCModificationOptions struct {
-	PrintFlags *genericclioptions.PrintFlags
-
-	ToPrinter func(string) (printers.ResourcePrinter, error)
-
-	SCCName      string
-	SCCInterface securityv1typedclient.SecurityContextConstraintsInterface
-	SANames      []string
-
-	DefaultSubjectNamespace string
-	Subjects                []corev1.ObjectReference
-
-	IsGroup bool
-	DryRun  bool
-	Output  string
-
-	genericclioptions.IOStreams
-}
-
-func NewSCCModificationOptions(streams genericclioptions.IOStreams) *SCCModificationOptions {
-	return &SCCModificationOptions{
-		PrintFlags: genericclioptions.NewPrintFlags("added to").WithTypeSetter(scheme.Scheme),
-		IOStreams:  streams,
-	}
-}
-
-func NewCmdAddSCCToGroup(name, fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	o := NewSCCModificationOptions(streams)
-	cmd := &cobra.Command{
-		Use:     name + " SCC GROUP [GROUP ...]",
-		Short:   "Add security context constraint to groups",
-		Long:    `Add security context constraint to groups`,
-		Example: fmt.Sprintf(addSCCToGroupExample, fullName),
-		Run: func(cmd *cobra.Command, args []string) {
-			kcmdutil.CheckErr(o.CompleteGroups(f, cmd, args))
-			kcmdutil.CheckErr(o.AddSCC())
-		},
-	}
-
-	kcmdutil.AddDryRunFlag(cmd)
-	o.PrintFlags.AddFlags(cmd)
-	return cmd
-}
-
-func NewCmdAddSCCToUser(name, fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	o := NewSCCModificationOptions(streams)
-	o.SANames = []string{}
-	cmd := &cobra.Command{
-		Use:     name + " SCC (USER | -z SERVICEACCOUNT) [USER ...]",
-		Short:   "Add security context constraint to users or a service account",
-		Long:    `Add security context constraint to users or a service account`,
-		Example: fmt.Sprintf(addSCCToUserExample, fullName),
-		Run: func(cmd *cobra.Command, args []string) {
-			kcmdutil.CheckErr(o.CompleteUsers(f, cmd, args))
-			kcmdutil.CheckErr(o.AddSCC())
-		},
-	}
-
-	cmd.Flags().StringSliceVarP(&o.SANames, "serviceaccount", "z", o.SANames, "service account in the current namespace to use as a user")
-
-	kcmdutil.AddDryRunFlag(cmd)
-	o.PrintFlags.AddFlags(cmd)
-	return cmd
-}
-
-func NewCmdRemoveSCCFromGroup(name, fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	o := NewSCCModificationOptions(streams)
-	cmd := &cobra.Command{
-		Use:   name + " SCC GROUP [GROUP ...]",
-		Short: "Remove group from scc",
-		Long:  `Remove group from scc`,
-		Run: func(cmd *cobra.Command, args []string) {
-			kcmdutil.CheckErr(o.CompleteGroups(f, cmd, args))
-			kcmdutil.CheckErr(o.RemoveSCC())
-		},
-	}
-
-	kcmdutil.AddDryRunFlag(cmd)
-	o.PrintFlags.AddFlags(cmd)
-	return cmd
-}
-
-func NewCmdRemoveSCCFromUser(name, fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	o := NewSCCModificationOptions(streams)
-	o.SANames = []string{}
-	cmd := &cobra.Command{
-		Use:   name + " SCC USER [USER ...]",
-		Short: "Remove user from scc",
-		Long:  `Remove user from scc`,
-		Run: func(cmd *cobra.Command, args []string) {
-			kcmdutil.CheckErr(o.CompleteUsers(f, cmd, args))
-			kcmdutil.CheckErr(o.RemoveSCC())
-		},
-	}
-
-	cmd.Flags().StringSliceVarP(&o.SANames, "serviceaccount", "z", o.SANames, "service account in the current namespace to use as a user")
-
-	kcmdutil.AddDryRunFlag(cmd)
-	o.PrintFlags.AddFlags(cmd)
-	return cmd
-}
-
-func (o *SCCModificationOptions) CompleteUsers(f kcmdutil.Factory, cmd *cobra.Command, args []string) error {
-	if len(args) < 1 {
-		return errors.New("you must specify a scc")
-	}
-
-	o.SCCName = args[0]
-	o.Subjects = buildSubjects(args[1:], []string{})
-
-	if (len(o.Subjects) == 0) && (len(o.SANames) == 0) {
-		return errors.New("you must specify at least one user or service account")
-	}
-
-	o.DryRun = kcmdutil.GetFlagBool(cmd, "dry-run")
-	o.Output = kcmdutil.GetFlagString(cmd, "output")
-
-	o.ToPrinter = func(message string) (printers.ResourcePrinter, error) {
-		o.PrintFlags.NamePrintFlags.Operation = message
-		if o.DryRun {
-			o.PrintFlags.Complete("%s (dry run)")
-		}
-
-		return o.PrintFlags.ToPrinter()
-	}
-
-	clientConfig, err := f.ToRESTConfig()
-	if err != nil {
-		return err
-	}
-	securityClient, err := securityv1typedclient.NewForConfig(clientConfig)
-	if err != nil {
-		return err
-	}
-	o.SCCInterface = securityClient.SecurityContextConstraints()
-
-	o.DefaultSubjectNamespace, _, err = f.ToRawKubeConfigLoader().Namespace()
-	if err != nil {
-		return err
-	}
-
-	for _, sa := range o.SANames {
-		o.Subjects = append(o.Subjects, corev1.ObjectReference{Namespace: o.DefaultSubjectNamespace, Name: sa, Kind: "ServiceAccount"})
-	}
-
-	return nil
-}
-
-func (o *SCCModificationOptions) CompleteGroups(f kcmdutil.Factory, cmd *cobra.Command, args []string) error {
-	if len(args) < 2 {
-		return errors.New("you must specify at least two arguments:   [group]...")
-	}
-
-	o.Output = kcmdutil.GetFlagString(cmd, "output")
-	o.DryRun = kcmdutil.GetFlagBool(cmd, "dry-run")
-
-	o.ToPrinter = func(message string) (printers.ResourcePrinter, error) {
-		o.PrintFlags.NamePrintFlags.Operation = message
-		if o.DryRun {
-			o.PrintFlags.Complete("%s (dry run)")
-		}
-
-		return o.PrintFlags.ToPrinter()
-	}
-
-	o.IsGroup = true
-	o.SCCName = args[0]
-	o.Subjects = buildSubjects([]string{}, args[1:])
-
-	clientConfig, err := f.ToRESTConfig()
-	if err != nil {
-		return err
-	}
-	securityClient, err := securityv1typedclient.NewForConfig(clientConfig)
-	if err != nil {
-		return err
-	}
-	o.SCCInterface = securityClient.SecurityContextConstraints()
-
-	o.DefaultSubjectNamespace, _, err = f.ToRawKubeConfigLoader().Namespace()
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (o *SCCModificationOptions) AddSCC() error {
-	scc, err := o.SCCInterface.Get(o.SCCName, metav1.GetOptions{})
-	if err != nil {
-		return err
-	}
-
-	users, groups := stringSubjectsFor(o.DefaultSubjectNamespace, o.Subjects)
-	usersToAdd, _ := diff(users, scc.Users)
-	groupsToAdd, _ := diff(groups, scc.Groups)
-
-	scc.Users = append(scc.Users, usersToAdd...)
-	scc.Groups = append(scc.Groups, groupsToAdd...)
-
-	message := successMessage(true, o.IsGroup, users, groups)
-
-	p, err := o.ToPrinter(message)
-	if err != nil {
-		return err
-	}
-
-	if o.DryRun {
-		return p.PrintObj(scc, o.Out)
-	}
-
-	_, err = o.SCCInterface.Update(scc)
-	if err != nil {
-		return err
-	}
-
-	return p.PrintObj(scc, o.Out)
-}
-
-func (o *SCCModificationOptions) RemoveSCC() error {
-	scc, err := o.SCCInterface.Get(o.SCCName, metav1.GetOptions{})
-	if err != nil {
-		return err
-	}
-
-	users, groups := stringSubjectsFor(o.DefaultSubjectNamespace, o.Subjects)
-	_, remainingUsers := diff(users, scc.Users)
-	_, remainingGroups := diff(groups, scc.Groups)
-
-	scc.Users = remainingUsers
-	scc.Groups = remainingGroups
-
-	message := successMessage(false, o.IsGroup, users, groups)
-
-	p, err := o.ToPrinter(message)
-	if err != nil {
-		return err
-	}
-
-	if o.DryRun {
-		return p.PrintObj(scc, o.Out)
-	}
-
-	_, err = o.SCCInterface.Update(scc)
-	if err != nil {
-		return err
-	}
-
-	return p.PrintObj(scc, o.Out)
-}
-
-func diff(lhsSlice, rhsSlice []string) (lhsOnly []string, rhsOnly []string) {
-	return singleDiff(lhsSlice, rhsSlice), singleDiff(rhsSlice, lhsSlice)
-}
-
-func singleDiff(lhsSlice, rhsSlice []string) (lhsOnly []string) {
-	for _, lhs := range lhsSlice {
-		found := false
-		for _, rhs := range rhsSlice {
-			if lhs == rhs {
-				found = true
-				break
-			}
-		}
-
-		if !found {
-			lhsOnly = append(lhsOnly, lhs)
-		}
-	}
-
-	return lhsOnly
-}
-
-// generate affirmative output
-func successMessage(didAdd bool, isGroup bool, usersToAdd, groupsToAdd []string) string {
-	verb := "removed from"
-	allTargets := fmt.Sprintf("%q", usersToAdd)
-
-	if isGroup {
-		allTargets = fmt.Sprintf("%q", groupsToAdd)
-	}
-	if didAdd {
-		verb = "added to"
-	}
-	if isGroup {
-		verb += " groups"
-	}
-
-	return fmt.Sprintf("%s: %s", verb, allTargets)
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/policy/modify_scc_test.go b/vendor/github.com/openshift/oc/pkg/cli/admin/policy/modify_scc_test.go
deleted file mode 100644
index 76e95ac28052..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/policy/modify_scc_test.go
+++ /dev/null
@@ -1,161 +0,0 @@
-package policy
-
-import (
-	"reflect"
-	"testing"
-
-	corev1 "k8s.io/api/core/v1"
-	"k8s.io/apimachinery/pkg/runtime"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	"k8s.io/cli-runtime/pkg/printers"
-	clientgotesting "k8s.io/client-go/testing"
-
-	securityv1 "github.com/openshift/api/security/v1"
-	fakesecurityclient "github.com/openshift/client-go/security/clientset/versioned/fake"
-	fakesecurityv1client "github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/fake"
-)
-
-func TestModifySCC(t *testing.T) {
-	tests := map[string]struct {
-		startingSCC *securityv1.SecurityContextConstraints
-		subjects    []corev1.ObjectReference
-		expectedSCC *securityv1.SecurityContextConstraints
-		remove      bool
-	}{
-		"add-user-to-empty": {
-			startingSCC: &securityv1.SecurityContextConstraints{},
-			subjects:    []corev1.ObjectReference{{Name: "one", Kind: "User"}, {Name: "two", Kind: "User"}},
-			expectedSCC: &securityv1.SecurityContextConstraints{Users: []string{"one", "two"}},
-			remove:      false,
-		},
-		"add-user-to-existing": {
-			startingSCC: &securityv1.SecurityContextConstraints{Users: []string{"one"}},
-			subjects:    []corev1.ObjectReference{{Name: "two", Kind: "User"}},
-			expectedSCC: &securityv1.SecurityContextConstraints{Users: []string{"one", "two"}},
-			remove:      false,
-		},
-		"add-user-to-existing-with-overlap": {
-			startingSCC: &securityv1.SecurityContextConstraints{Users: []string{"one"}},
-			subjects:    []corev1.ObjectReference{{Name: "one", Kind: "User"}, {Name: "two", Kind: "User"}},
-			expectedSCC: &securityv1.SecurityContextConstraints{Users: []string{"one", "two"}},
-			remove:      false,
-		},
-
-		"add-sa-to-empty": {
-			startingSCC: &securityv1.SecurityContextConstraints{},
-			subjects:    []corev1.ObjectReference{{Namespace: "a", Name: "one", Kind: "ServiceAccount"}, {Namespace: "b", Name: "two", Kind: "ServiceAccount"}},
-			expectedSCC: &securityv1.SecurityContextConstraints{Users: []string{"system:serviceaccount:a:one", "system:serviceaccount:b:two"}},
-			remove:      false,
-		},
-		"add-sa-to-existing": {
-			startingSCC: &securityv1.SecurityContextConstraints{Users: []string{"one"}},
-			subjects:    []corev1.ObjectReference{{Namespace: "b", Name: "two", Kind: "ServiceAccount"}},
-			expectedSCC: &securityv1.SecurityContextConstraints{Users: []string{"one", "system:serviceaccount:b:two"}},
-			remove:      false,
-		},
-		"add-sa-to-existing-with-overlap": {
-			startingSCC: &securityv1.SecurityContextConstraints{Users: []string{"system:serviceaccount:a:one"}},
-			subjects:    []corev1.ObjectReference{{Namespace: "a", Name: "one", Kind: "ServiceAccount"}, {Namespace: "b", Name: "two", Kind: "ServiceAccount"}},
-			expectedSCC: &securityv1.SecurityContextConstraints{Users: []string{"system:serviceaccount:a:one", "system:serviceaccount:b:two"}},
-			remove:      false,
-		},
-
-		"add-group-to-empty": {
-			startingSCC: &securityv1.SecurityContextConstraints{},
-			subjects:    []corev1.ObjectReference{{Name: "one", Kind: "Group"}, {Name: "two", Kind: "Group"}},
-			expectedSCC: &securityv1.SecurityContextConstraints{Groups: []string{"one", "two"}},
-			remove:      false,
-		},
-		"add-group-to-existing": {
-			startingSCC: &securityv1.SecurityContextConstraints{Groups: []string{"one"}},
-			subjects:    []corev1.ObjectReference{{Name: "two", Kind: "Group"}},
-			expectedSCC: &securityv1.SecurityContextConstraints{Groups: []string{"one", "two"}},
-			remove:      false,
-		},
-		"add-group-to-existing-with-overlap": {
-			startingSCC: &securityv1.SecurityContextConstraints{Groups: []string{"one"}},
-			subjects:    []corev1.ObjectReference{{Name: "one", Kind: "Group"}, {Name: "two", Kind: "Group"}},
-			expectedSCC: &securityv1.SecurityContextConstraints{Groups: []string{"one", "two"}},
-			remove:      false,
-		},
-
-		"remove-user": {
-			startingSCC: &securityv1.SecurityContextConstraints{Users: []string{"one", "two"}},
-			subjects:    []corev1.ObjectReference{{Name: "one", Kind: "User"}, {Name: "two", Kind: "User"}},
-			expectedSCC: &securityv1.SecurityContextConstraints{},
-			remove:      true,
-		},
-		"remove-user-from-existing-with-overlap": {
-			startingSCC: &securityv1.SecurityContextConstraints{Users: []string{"one", "two"}},
-			subjects:    []corev1.ObjectReference{{Name: "two", Kind: "User"}},
-			expectedSCC: &securityv1.SecurityContextConstraints{Users: []string{"one"}},
-			remove:      true,
-		},
-
-		"remove-sa": {
-			startingSCC: &securityv1.SecurityContextConstraints{Users: []string{"system:serviceaccount:a:one", "system:serviceaccount:b:two"}},
-			subjects:    []corev1.ObjectReference{{Namespace: "a", Name: "one", Kind: "ServiceAccount"}, {Namespace: "b", Name: "two", Kind: "ServiceAccount"}},
-			expectedSCC: &securityv1.SecurityContextConstraints{},
-			remove:      true,
-		},
-		"remove-sa-from-existing-with-overlap": {
-			startingSCC: &securityv1.SecurityContextConstraints{Users: []string{"system:serviceaccount:a:one", "system:serviceaccount:b:two"}},
-			subjects:    []corev1.ObjectReference{{Namespace: "b", Name: "two", Kind: "ServiceAccount"}},
-			expectedSCC: &securityv1.SecurityContextConstraints{Users: []string{"system:serviceaccount:a:one"}},
-			remove:      true,
-		},
-
-		"remove-group": {
-			startingSCC: &securityv1.SecurityContextConstraints{Groups: []string{"one", "two"}},
-			subjects:    []corev1.ObjectReference{{Name: "one", Kind: "Group"}, {Name: "two", Kind: "Group"}},
-			expectedSCC: &securityv1.SecurityContextConstraints{},
-			remove:      true,
-		},
-		"remove-group-from-existing-with-overlap": {
-			startingSCC: &securityv1.SecurityContextConstraints{Groups: []string{"one", "two"}},
-			subjects:    []corev1.ObjectReference{{Name: "two", Kind: "Group"}},
-			expectedSCC: &securityv1.SecurityContextConstraints{Groups: []string{"one"}},
-			remove:      true,
-		},
-	}
-
-	for tcName, tc := range tests {
-		fakeClient := fakesecurityv1client.FakeSecurityV1{Fake: &(fakesecurityclient.NewSimpleClientset().Fake)}
-		fakeClient.Fake.PrependReactor("get", "securitycontextconstraints", func(action clientgotesting.Action) (handled bool, ret runtime.Object, err error) {
-			return true, tc.startingSCC, nil
-		})
-		var actualSCC *securityv1.SecurityContextConstraints
-		fakeClient.Fake.PrependReactor("update", "securitycontextconstraints", func(action clientgotesting.Action) (handled bool, ret runtime.Object, err error) {
-			actualSCC = action.(clientgotesting.UpdateAction).GetObject().(*securityv1.SecurityContextConstraints)
-			return true, actualSCC, nil
-		})
-
-		o := &SCCModificationOptions{
-			PrintFlags: genericclioptions.NewPrintFlags(""),
-			ToPrinter:  func(string) (printers.ResourcePrinter, error) { return printers.NewDiscardingPrinter(), nil },
-
-			SCCName:                 "foo",
-			SCCInterface:            fakeClient.SecurityContextConstraints(),
-			DefaultSubjectNamespace: "",
-			Subjects:                tc.subjects,
-
-			IOStreams: genericclioptions.NewTestIOStreamsDiscard(),
-		}
-
-		var err error
-		if tc.remove {
-			err = o.RemoveSCC()
-		} else {
-			err = o.AddSCC()
-		}
-		if err != nil {
-			t.Errorf("%s: unexpected err %v", tcName, err)
-		}
-		if e, a := tc.expectedSCC.Users, actualSCC.Users; !reflect.DeepEqual(e, a) {
-			t.Errorf("%s: expected %v, actual %v", tcName, e, a)
-		}
-		if e, a := tc.expectedSCC.Groups, actualSCC.Groups; !reflect.DeepEqual(e, a) {
-			t.Errorf("%s: expected %v, actual %v", tcName, e, a)
-		}
-	}
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/policy/policy.go b/vendor/github.com/openshift/oc/pkg/cli/admin/policy/policy.go
deleted file mode 100644
index 9e8fde01c99e..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/policy/policy.go
+++ /dev/null
@@ -1,299 +0,0 @@
-package policy
-
-import (
-	"fmt"
-
-	"github.com/spf13/cobra"
-	rbacv1 "k8s.io/api/rbac/v1"
-	kapierrors "k8s.io/apimachinery/pkg/api/errors"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/runtime"
-	"k8s.io/apimachinery/pkg/util/sets"
-	"k8s.io/apimachinery/pkg/util/uuid"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	rbacv1client "k8s.io/client-go/kubernetes/typed/rbac/v1"
-	rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	ktemplates "k8s.io/kubernetes/pkg/kubectl/util/templates"
-
-	cmdutil "github.com/openshift/oc/pkg/helpers/cmd"
-)
-
-const PolicyRecommendedName = "policy"
-
-var policyLong = ktemplates.LongDesc(`
-	Manage policy on the cluster
-
-	These commands allow you to assign and manage the roles and policies that apply to users. The reconcile
-	commands allow you to reset and upgrade your system policies to the latest default policies.
-
-	To see more information on roles and policies, use the 'get' and 'describe' commands on the following
-	resources: 'clusterroles', 'clusterpolicy', 'clusterrolebindings', 'roles', 'policy', 'rolebindings',
-	and 'scc'.`)
-
-// NewCmdPolicy implements the OpenShift cli policy command
-func NewCmdPolicy(name, fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	// Parent command to which all subcommands are added.
-	cmds := &cobra.Command{
-		Use:   name,
-		Short: "Manage cluster authorization and security policy",
-		Long:  policyLong,
-		Run:   kcmdutil.DefaultSubCommandRun(streams.ErrOut),
-	}
-
-	groups := ktemplates.CommandGroups{
-		{
-			Message: "Discover:",
-			Commands: []*cobra.Command{
-				NewCmdWhoCan(WhoCanRecommendedName, fullName+" "+WhoCanRecommendedName, f, streams),
-				NewCmdSccSubjectReview(SubjectReviewRecommendedName, fullName+" "+SubjectReviewRecommendedName, f, streams),
-				NewCmdSccReview(ReviewRecommendedName, fullName+" "+ReviewRecommendedName, f, streams),
-			},
-		},
-		{
-			Message: "Manage project membership:",
-			Commands: []*cobra.Command{
-				NewCmdRemoveUserFromProject(RemoveUserRecommendedName, fullName+" "+RemoveUserRecommendedName, f, streams),
-				NewCmdRemoveGroupFromProject(RemoveGroupRecommendedName, fullName+" "+RemoveGroupRecommendedName, f, streams),
-			},
-		},
-		{
-			Message: "Assign roles to users and groups:",
-			Commands: []*cobra.Command{
-				NewCmdAddRoleToUser(AddRoleToUserRecommendedName, fullName+" "+AddRoleToUserRecommendedName, f, streams),
-				NewCmdAddRoleToGroup(AddRoleToGroupRecommendedName, fullName+" "+AddRoleToGroupRecommendedName, f, streams),
-				NewCmdRemoveRoleFromUser(RemoveRoleFromUserRecommendedName, fullName+" "+RemoveRoleFromUserRecommendedName, f, streams),
-				NewCmdRemoveRoleFromGroup(RemoveRoleFromGroupRecommendedName, fullName+" "+RemoveRoleFromGroupRecommendedName, f, streams),
-			},
-		},
-		{
-			Message: "Assign cluster roles to users and groups:",
-			Commands: []*cobra.Command{
-				NewCmdAddClusterRoleToUser(AddClusterRoleToUserRecommendedName, fullName+" "+AddClusterRoleToUserRecommendedName, f, streams),
-				NewCmdAddClusterRoleToGroup(AddClusterRoleToGroupRecommendedName, fullName+" "+AddClusterRoleToGroupRecommendedName, f, streams),
-				NewCmdRemoveClusterRoleFromUser(RemoveClusterRoleFromUserRecommendedName, fullName+" "+RemoveClusterRoleFromUserRecommendedName, f, streams),
-				NewCmdRemoveClusterRoleFromGroup(RemoveClusterRoleFromGroupRecommendedName, fullName+" "+RemoveClusterRoleFromGroupRecommendedName, f, streams),
-			},
-		},
-		{
-			Message: "Manage policy on pods and containers:",
-			Commands: []*cobra.Command{
-				NewCmdAddSCCToUser(AddSCCToUserRecommendedName, fullName+" "+AddSCCToUserRecommendedName, f, streams),
-				NewCmdAddSCCToGroup(AddSCCToGroupRecommendedName, fullName+" "+AddSCCToGroupRecommendedName, f, streams),
-				NewCmdRemoveSCCFromUser(RemoveSCCFromUserRecommendedName, fullName+" "+RemoveSCCFromUserRecommendedName, f, streams),
-				NewCmdRemoveSCCFromGroup(RemoveSCCFromGroupRecommendedName, fullName+" "+RemoveSCCFromGroupRecommendedName, f, streams),
-			},
-		},
-	}
-	groups.Add(cmds)
-	cmdutil.ActsAsRootCommand(cmds, []string{"options"}, groups...)
-
-	return cmds
-}
-
-func getUniqueName(rbacClient rbacv1client.RbacV1Interface, basename string, namespace string) (string, error) {
-	existingNames := sets.String{}
-
-	if len(namespace) > 0 {
-		roleBindings, err := rbacClient.RoleBindings(namespace).List(metav1.ListOptions{})
-		if err != nil && !kapierrors.IsNotFound(err) {
-			return "", err
-		}
-		for _, currBinding := range roleBindings.Items {
-			existingNames.Insert(currBinding.Name)
-		}
-	} else {
-		roleBindings, err := rbacClient.ClusterRoleBindings().List(metav1.ListOptions{})
-		if err != nil && !kapierrors.IsNotFound(err) {
-			return "", err
-		}
-		for _, currBinding := range roleBindings.Items {
-			existingNames.Insert(currBinding.Name)
-		}
-	}
-
-	if !existingNames.Has(basename) {
-		return basename, nil
-	}
-
-	for i := 0; i < 100; i++ {
-		trialName := fmt.Sprintf("%v-%d", basename, i)
-		if !existingNames.Has(trialName) {
-			return trialName, nil
-		}
-	}
-
-	return string(uuid.NewUUID()), nil
-}
-
-type roleBindingAbstraction struct {
-	rbacClient         rbacv1client.RbacV1Interface
-	roleBinding        *rbacv1.RoleBinding
-	clusterRoleBinding *rbacv1.ClusterRoleBinding
-}
-
-func newRoleBindingAbstraction(rbacClient rbacv1client.RbacV1Interface, name string, namespace string, roleName string, roleKind string) (*roleBindingAbstraction, error) {
-	r := roleBindingAbstraction{rbacClient: rbacClient}
-	if len(namespace) > 0 {
-		switch roleKind {
-		case "Role":
-			r.roleBinding = &(rbacv1helpers.NewRoleBinding(roleName, namespace).RoleBinding)
-		case "ClusterRole":
-			r.roleBinding = &(rbacv1helpers.NewRoleBindingForClusterRole(roleName, namespace).RoleBinding)
-		default:
-			return nil, fmt.Errorf("Unknown Role Kind: %q", roleKind)
-		}
-		if name != roleName {
-			r.roleBinding.Name = name
-		}
-	} else {
-		if roleKind != "ClusterRole" {
-			return nil, fmt.Errorf("Cluster Role Bindings can only reference Cluster Roles")
-		}
-		r.clusterRoleBinding = &(rbacv1helpers.NewClusterBinding(roleName).ClusterRoleBinding)
-		if name != roleName {
-			r.clusterRoleBinding.Name = name
-		}
-	}
-	return &r, nil
-}
-
-func getRoleBindingAbstraction(rbacClient rbacv1client.RbacV1Interface, name string, namespace string) (*roleBindingAbstraction, error) {
-	var err error
-	r := roleBindingAbstraction{rbacClient: rbacClient}
-	if len(namespace) > 0 {
-		r.roleBinding, err = rbacClient.RoleBindings(namespace).Get(name, metav1.GetOptions{})
-	} else {
-		r.clusterRoleBinding, err = rbacClient.ClusterRoleBindings().Get(name, metav1.GetOptions{})
-	}
-	if err != nil {
-		return nil, err
-	}
-	return &r, nil
-}
-
-func getRoleBindingAbstractionsForRole(rbacClient rbacv1client.RbacV1Interface, roleName string, roleKind string, namespace string) ([]*roleBindingAbstraction, error) {
-	ret := make([]*roleBindingAbstraction, 0)
-	// see if we can find an existing binding that points to the role in question.
-	if len(namespace) > 0 {
-		roleBindings, err := rbacClient.RoleBindings(namespace).List(metav1.ListOptions{})
-		if err != nil && !kapierrors.IsNotFound(err) {
-			return nil, err
-		}
-		for i := range roleBindings.Items {
-			// shallow copy outside of the loop so that we can take its address
-			roleBinding := roleBindings.Items[i]
-			if roleBinding.RoleRef.Name == roleName && roleBinding.RoleRef.Kind == roleKind {
-				ret = append(ret, &roleBindingAbstraction{rbacClient: rbacClient, roleBinding: &roleBinding})
-			}
-		}
-	} else {
-		clusterRoleBindings, err := rbacClient.ClusterRoleBindings().List(metav1.ListOptions{})
-		if err != nil && !kapierrors.IsNotFound(err) {
-			return nil, err
-		}
-		for i := range clusterRoleBindings.Items {
-			// shallow copy outside of the loop so that we can take its address
-			clusterRoleBinding := clusterRoleBindings.Items[i]
-			if clusterRoleBinding.RoleRef.Name == roleName {
-				ret = append(ret, &roleBindingAbstraction{rbacClient: rbacClient, clusterRoleBinding: &clusterRoleBinding})
-			}
-		}
-	}
-
-	return ret, nil
-}
-
-func (r roleBindingAbstraction) Name() string {
-	if r.roleBinding != nil {
-		return r.roleBinding.Name
-	} else {
-		return r.clusterRoleBinding.Name
-	}
-}
-
-func (r roleBindingAbstraction) RoleName() string {
-	if r.roleBinding != nil {
-		return r.roleBinding.RoleRef.Name
-	} else {
-		return r.clusterRoleBinding.RoleRef.Name
-	}
-}
-
-func (r roleBindingAbstraction) RoleKind() string {
-	if r.roleBinding != nil {
-		return r.roleBinding.RoleRef.Kind
-	} else {
-		return r.clusterRoleBinding.RoleRef.Kind
-	}
-}
-
-func (r roleBindingAbstraction) Annotation(key string) string {
-	if r.roleBinding != nil {
-		return r.roleBinding.Annotations[key]
-	} else {
-		return r.clusterRoleBinding.Annotations[key]
-	}
-}
-
-func (r roleBindingAbstraction) Subjects() []rbacv1.Subject {
-	if r.roleBinding != nil {
-		return r.roleBinding.Subjects
-	} else {
-		return r.clusterRoleBinding.Subjects
-	}
-}
-
-func (r roleBindingAbstraction) SetSubjects(subjects []rbacv1.Subject) {
-	if r.roleBinding != nil {
-		r.roleBinding.Subjects = subjects
-	} else {
-		r.clusterRoleBinding.Subjects = subjects
-	}
-}
-
-func (r roleBindingAbstraction) Object() runtime.Object {
-	if r.roleBinding != nil {
-		return r.roleBinding
-	} else {
-		return r.clusterRoleBinding
-	}
-}
-
-func (r roleBindingAbstraction) Create() error {
-	var err error
-	if r.roleBinding != nil {
-		_, err = r.rbacClient.RoleBindings(r.roleBinding.Namespace).Create(r.roleBinding)
-	} else {
-		_, err = r.rbacClient.ClusterRoleBindings().Create(r.clusterRoleBinding)
-	}
-	return err
-}
-
-func (r roleBindingAbstraction) Update() error {
-	var err error
-	if r.roleBinding != nil {
-		_, err = r.rbacClient.RoleBindings(r.roleBinding.Namespace).Update(r.roleBinding)
-	} else {
-		_, err = r.rbacClient.ClusterRoleBindings().Update(r.clusterRoleBinding)
-	}
-	return err
-}
-
-func (r roleBindingAbstraction) Delete() error {
-	var err error
-	if r.roleBinding != nil {
-		err = r.rbacClient.RoleBindings(r.roleBinding.Namespace).Delete(r.roleBinding.Name, &metav1.DeleteOptions{})
-	} else {
-		err = r.rbacClient.ClusterRoleBindings().Delete(r.clusterRoleBinding.Name, &metav1.DeleteOptions{})
-	}
-	return err
-}
-
-func (r roleBindingAbstraction) Type() string {
-	if r.roleBinding != nil {
-		return "rolebinding"
-	} else {
-		return "clusterrolebinding"
-	}
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/policy/remove_from_project.go b/vendor/github.com/openshift/oc/pkg/cli/admin/policy/remove_from_project.go
deleted file mode 100644
index 03e890d08bd1..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/policy/remove_from_project.go
+++ /dev/null
@@ -1,233 +0,0 @@
-package policy
-
-import (
-	"fmt"
-	"sort"
-
-	"github.com/spf13/cobra"
-
-	rbacv1 "k8s.io/api/rbac/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/util/sets"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	"k8s.io/cli-runtime/pkg/printers"
-	rbacv1client "k8s.io/client-go/kubernetes/typed/rbac/v1"
-	rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/scheme"
-
-	"github.com/openshift/library-go/pkg/authorization/authorizationutil"
-)
-
-const (
-	RemoveGroupRecommendedName = "remove-group"
-	RemoveUserRecommendedName  = "remove-user"
-)
-
-type RemoveFromProjectOptions struct {
-	PrintFlags *genericclioptions.PrintFlags
-
-	Printer printers.ResourcePrinter
-
-	BindingNamespace string
-	Client           rbacv1client.RoleBindingsGetter
-
-	Groups []string
-	Users  []string
-
-	DryRun bool
-
-	Output string
-
-	genericclioptions.IOStreams
-}
-
-func NewRemoveFromProjectOptions(streams genericclioptions.IOStreams) *RemoveFromProjectOptions {
-	return &RemoveFromProjectOptions{
-		PrintFlags: genericclioptions.NewPrintFlags("").WithTypeSetter(scheme.Scheme),
-		IOStreams:  streams,
-	}
-}
-
-// NewCmdRemoveGroupFromProject implements the OpenShift cli remove-group command
-func NewCmdRemoveGroupFromProject(name, fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	o := NewRemoveFromProjectOptions(streams)
-	cmd := &cobra.Command{
-		Use:   name + " GROUP [GROUP ...]",
-		Short: "Remove group from the current project",
-		Long:  `Remove group from the current project`,
-		Run: func(cmd *cobra.Command, args []string) {
-			kcmdutil.CheckErr(o.Complete(f, cmd, args, &o.Groups, "group"))
-			kcmdutil.CheckErr(o.Validate(f, cmd, args))
-			kcmdutil.CheckErr(o.Run())
-		},
-	}
-
-	kcmdutil.AddDryRunFlag(cmd)
-	o.PrintFlags.AddFlags(cmd)
-	return cmd
-}
-
-// NewCmdRemoveUserFromProject implements the OpenShift cli remove-user command
-func NewCmdRemoveUserFromProject(name, fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	o := NewRemoveFromProjectOptions(streams)
-	cmd := &cobra.Command{
-		Use:   name + " USER [USER ...]",
-		Short: "Remove user from the current project",
-		Long:  `Remove user from the current project`,
-		Run: func(cmd *cobra.Command, args []string) {
-			kcmdutil.CheckErr(o.Complete(f, cmd, args, &o.Users, "user"))
-			kcmdutil.CheckErr(o.Validate(f, cmd, args))
-			kcmdutil.CheckErr(o.Run())
-		},
-	}
-
-	kcmdutil.AddDryRunFlag(cmd)
-	o.PrintFlags.AddFlags(cmd)
-	return cmd
-}
-
-func (o *RemoveFromProjectOptions) Complete(f kcmdutil.Factory, cmd *cobra.Command, args []string, target *[]string, targetName string) error {
-	if len(args) < 1 {
-		return fmt.Errorf("you must specify at least one argument: <%s> [%s]...", targetName, targetName)
-	}
-
-	o.Output = kcmdutil.GetFlagString(cmd, "output")
-	o.DryRun = kcmdutil.GetFlagBool(cmd, "dry-run")
-
-	if o.DryRun {
-		o.PrintFlags.Complete("%s (dry run)")
-	}
-
-	var err error
-	o.Printer, err = o.PrintFlags.ToPrinter()
-	if err != nil {
-		return err
-	}
-
-	*target = append(*target, args...)
-
-	clientConfig, err := f.ToRESTConfig()
-	if err != nil {
-		return err
-	}
-	o.Client, err = rbacv1client.NewForConfig(clientConfig)
-	if err != nil {
-		return err
-	}
-	if o.BindingNamespace, _, err = f.ToRawKubeConfigLoader().Namespace(); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (o *RemoveFromProjectOptions) Validate(f kcmdutil.Factory, cmd *cobra.Command, args []string) error {
-	return nil
-}
-
-func (o *RemoveFromProjectOptions) Run() error {
-	roleBindings, err := o.Client.RoleBindings(o.BindingNamespace).List(metav1.ListOptions{})
-	if err != nil {
-		return err
-	}
-	// maintain David's hack from #1973 (see #1975, #1976 and https://bugzilla.redhat.com/show_bug.cgi?id=1215969)
-	sort.Sort(sort.Reverse(roleBindingSorter(roleBindings.Items)))
-
-	usersRemoved := sets.String{}
-	groupsRemoved := sets.String{}
-	sasRemoved := sets.String{}
-	othersRemoved := sets.String{}
-	dryRunText := ""
-	if o.DryRun {
-		dryRunText = " (dry run)"
-	}
-
-	updatedBindings := &rbacv1.RoleBindingList{
-		TypeMeta: metav1.TypeMeta{
-			Kind:       "List",
-			APIVersion: "v1",
-		},
-		ListMeta: metav1.ListMeta{},
-	}
-
-	subjectsToRemove := authorizationutil.BuildRBACSubjects(o.Users, o.Groups)
-
-	for _, currBinding := range roleBindings.Items {
-		originalSubjects := make([]rbacv1.Subject, len(currBinding.Subjects))
-		copy(originalSubjects, currBinding.Subjects)
-		oldUsers, oldGroups, oldSAs, oldOthers := rbacv1helpers.SubjectsStrings(originalSubjects)
-		oldUsersSet, oldGroupsSet, oldSAsSet, oldOtherSet := sets.NewString(oldUsers...), sets.NewString(oldGroups...), sets.NewString(oldSAs...), sets.NewString(oldOthers...)
-
-		currBinding.Subjects, _ = removeSubjects(currBinding.Subjects, subjectsToRemove)
-		newUsers, newGroups, newSAs, newOthers := rbacv1helpers.SubjectsStrings(currBinding.Subjects)
-		newUsersSet, newGroupsSet, newSAsSet, newOtherSet := sets.NewString(newUsers...), sets.NewString(newGroups...), sets.NewString(newSAs...), sets.NewString(newOthers...)
-
-		if len(currBinding.Subjects) == len(originalSubjects) {
-			continue
-		}
-
-		if len(o.Output) > 0 {
-			updatedBindings.Items = append(updatedBindings.Items, currBinding)
-			continue
-		}
-
-		if !o.DryRun {
-			if len(currBinding.Subjects) > 0 {
-				_, err = o.Client.RoleBindings(o.BindingNamespace).Update(&currBinding)
-			} else {
-				err = o.Client.RoleBindings(o.BindingNamespace).Delete(currBinding.Name, &metav1.DeleteOptions{})
-			}
-			if err != nil {
-				return err
-			}
-		}
-
-		roleDisplayName := fmt.Sprintf("%s/%s", currBinding.Namespace, currBinding.RoleRef.Name)
-		if currBinding.RoleRef.Kind == "ClusterRole" {
-			roleDisplayName = currBinding.RoleRef.Name
-		}
-
-		if diff := oldUsersSet.Difference(newUsersSet); len(diff) != 0 {
-			fmt.Fprintf(o.Out, "Removing %s from users %v in project %s%s.\n", roleDisplayName, diff.List(), o.BindingNamespace, dryRunText)
-			usersRemoved.Insert(diff.List()...)
-		}
-		if diff := oldGroupsSet.Difference(newGroupsSet); len(diff) != 0 {
-			fmt.Fprintf(o.Out, "Removing %s from groups %v in project %s%s.\n", roleDisplayName, diff.List(), o.BindingNamespace, dryRunText)
-			groupsRemoved.Insert(diff.List()...)
-		}
-		if diff := oldSAsSet.Difference(newSAsSet); len(diff) != 0 {
-			fmt.Fprintf(o.Out, "Removing %s from serviceaccounts %v in project %s%s.\n", roleDisplayName, diff.List(), o.BindingNamespace, dryRunText)
-			sasRemoved.Insert(diff.List()...)
-		}
-		if diff := oldOtherSet.Difference(newOtherSet); len(diff) != 0 {
-			fmt.Fprintf(o.Out, "Removing %s from subjects %v in project %s%s.\n", roleDisplayName, diff.List(), o.BindingNamespace, dryRunText)
-			othersRemoved.Insert(diff.List()...)
-		}
-	}
-
-	if len(o.Output) > 0 {
-		return o.Printer.PrintObj(updatedBindings, o.Out)
-	}
-
-	if diff := sets.NewString(o.Users...).Difference(usersRemoved); len(diff) != 0 {
-		fmt.Fprintf(o.Out, "Users %v were not bound to roles in project %s%s.\n", diff.List(), o.BindingNamespace, dryRunText)
-	}
-	if diff := sets.NewString(o.Groups...).Difference(groupsRemoved); len(diff) != 0 {
-		fmt.Fprintf(o.Out, "Groups %v were not bound to roles in project %s%s.\n", diff.List(), o.BindingNamespace, dryRunText)
-	}
-
-	return nil
-}
-
-type roleBindingSorter []rbacv1.RoleBinding
-
-func (s roleBindingSorter) Len() int {
-	return len(s)
-}
-func (s roleBindingSorter) Less(i, j int) bool {
-	return s[i].Name < s[j].Name
-}
-func (s roleBindingSorter) Swap(i, j int) {
-	s[i], s[j] = s[j], s[i]
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/policy/review.go b/vendor/github.com/openshift/oc/pkg/cli/admin/policy/review.go
deleted file mode 100644
index 4d23ac537f1c..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/policy/review.go
+++ /dev/null
@@ -1,288 +0,0 @@
-package policy
-
-import (
-	"fmt"
-	"io"
-	"strings"
-	"text/tabwriter"
-
-	"github.com/spf13/cobra"
-
-	appsv1 "k8s.io/api/apps/v1"
-	appsv1beta1 "k8s.io/api/apps/v1beta1"
-	appsv1beta2 "k8s.io/api/apps/v1beta2"
-	corev1 "k8s.io/api/core/v1"
-	"k8s.io/apimachinery/pkg/api/meta"
-	"k8s.io/apimachinery/pkg/runtime"
-	utilerrors "k8s.io/apimachinery/pkg/util/errors"
-	"k8s.io/apiserver/pkg/authentication/serviceaccount"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	"k8s.io/cli-runtime/pkg/printers"
-	"k8s.io/cli-runtime/pkg/resource"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/scheme"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-
-	securityv1 "github.com/openshift/api/security/v1"
-	securityv1typedclient "github.com/openshift/client-go/security/clientset/versioned/typed/security/v1"
-	ometa "github.com/openshift/library-go/pkg/image/referencemutator"
-)
-
-var (
-	reviewLong = templates.LongDesc(`Checks which Service Account can create a Pod.
-	The Pod is inferred from the PodTemplateSpec in the provided resource.
-	If no Service Account is provided the one specified in podTemplateSpec.spec.serviceAccountName is used,
-	unless it is empty, in which case "default" is used.
-	If Service Accounts are provided the podTemplateSpec.spec.serviceAccountName is ignored.
-	`)
-	reviewExamples = templates.Examples(`# Check whether Service Accounts sa1 and sa2 can admit a Pod with TemplatePodSpec specified in my_resource.yaml
-	# Service Account specified in myresource.yaml file is ignored
-	$ %[1]s -z sa1,sa2 -f my_resource.yaml
-
-	# Check whether Service Accounts system:serviceaccount:bob:default can admit a Pod with TemplatePodSpec specified in my_resource.yaml
-	$  %[1]s -z system:serviceaccount:bob:default -f my_resource.yaml
-
-	# Check whether Service Account specified in my_resource_with_sa.yaml can admit the Pod
-	$ %[1]s -f my_resource_with_sa.yaml
-
-	# Check whether default Service Account can admit the Pod, default is taken since no Service Account is defined in myresource_with_no_sa.yaml
-	$  %[1]s -f myresource_with_no_sa.yaml
-	`)
-)
-
-const (
-	ReviewRecommendedName = "scc-review"
-
-	tabWriterMinWidth = 0
-	tabWriterWidth    = 7
-	tabWriterPadding  = 3
-	tabWriterPadChar  = ' '
-	tabWriterFlags    = 0
-)
-
-type SCCReviewOptions struct {
-	PrintFlags *genericclioptions.PrintFlags
-
-	Printer *policyPrinter
-
-	client                   securityv1typedclient.PodSecurityPolicyReviewsGetter
-	namespace                string
-	enforceNamespace         bool
-	builder                  *resource.Builder
-	RESTClientFactory        func(mapping *meta.RESTMapping) (resource.RESTClient, error)
-	FilenameOptions          resource.FilenameOptions
-	noHeaders                bool
-	serviceAccountNames      []string // it contains user inputs it could be long sa name like system:serviceaccount:bob:default or short one
-	shortServiceAccountNames []string // it contains only short sa name for example 'bob'
-
-	genericclioptions.IOStreams
-}
-
-func NewSCCReviewOptions(streams genericclioptions.IOStreams) *SCCReviewOptions {
-	return &SCCReviewOptions{
-		PrintFlags: genericclioptions.NewPrintFlags("").WithTypeSetter(scheme.Scheme),
-		IOStreams:  streams,
-	}
-}
-
-func NewCmdSccReview(name, fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	o := NewSCCReviewOptions(streams)
-	cmd := &cobra.Command{
-		Use:     name,
-		Short:   "Checks which ServiceAccount can create a Pod",
-		Long:    reviewLong,
-		Example: fmt.Sprintf(reviewExamples, fullName),
-		Run: func(cmd *cobra.Command, args []string) {
-			kcmdutil.CheckErr(o.Complete(f, args, cmd))
-			kcmdutil.CheckErr(o.Run(args))
-		},
-	}
-
-	cmd.Flags().StringSliceVarP(&o.serviceAccountNames, "serviceaccount", "z", o.serviceAccountNames, "service account in the current namespace to use as a user")
-	kcmdutil.AddFilenameOptionFlags(cmd, &o.FilenameOptions, "Filename, directory, or URL to a file identifying the resource to get from a server.")
-	cmd.Flags().BoolVar(&o.noHeaders, "no-headers", o.noHeaders, "When using the default output format, don't print headers (default print headers).")
-
-	o.PrintFlags.AddFlags(cmd)
-	return cmd
-}
-
-type policyPrinter struct {
-	humanPrintFunc func(*resource.Info, runtime.Object, *bool, io.Writer) error
-	noHeaders      bool
-	printFlags     *genericclioptions.PrintFlags
-	info           *resource.Info
-}
-
-func (p *policyPrinter) WithInfo(info *resource.Info) *policyPrinter {
-	p.info = info
-	return p
-}
-
-func (p *policyPrinter) PrintObj(obj runtime.Object, out io.Writer) error {
-	if p.printFlags.OutputFormat == nil || len(*p.printFlags.OutputFormat) == 0 || *p.printFlags.OutputFormat == "wide" {
-		return p.humanPrintFunc(p.info, obj, &p.noHeaders, out)
-	}
-
-	printer, err := p.printFlags.ToPrinter()
-	if err != nil {
-		return err
-	}
-	return printer.PrintObj(obj, out)
-}
-
-func (o *SCCReviewOptions) Complete(f kcmdutil.Factory, args []string, cmd *cobra.Command) error {
-	if len(args) == 0 && len(o.FilenameOptions.Filenames) == 0 {
-		return kcmdutil.UsageErrorf(cmd, "one or more resources must be specified")
-	}
-	for _, sa := range o.serviceAccountNames {
-		if strings.HasPrefix(sa, serviceaccount.ServiceAccountUsernamePrefix) {
-			_, user, err := serviceaccount.SplitUsername(sa)
-			if err != nil {
-				return err
-			}
-			o.shortServiceAccountNames = append(o.shortServiceAccountNames, user)
-		} else {
-			o.shortServiceAccountNames = append(o.shortServiceAccountNames, sa)
-		}
-	}
-	var err error
-	o.namespace, o.enforceNamespace, err = f.ToRawKubeConfigLoader().Namespace()
-	if err != nil {
-		return err
-	}
-	clientConfig, err := f.ToRESTConfig()
-	if err != nil {
-		return err
-	}
-	o.client, err = securityv1typedclient.NewForConfig(clientConfig)
-	if err != nil {
-		return fmt.Errorf("unable to obtain client: %v", err)
-	}
-	o.builder = f.NewBuilder()
-	o.RESTClientFactory = f.ClientForMapping
-
-	o.Printer = &policyPrinter{
-		printFlags:     o.PrintFlags,
-		humanPrintFunc: sccReviewHumanPrintFunc,
-		noHeaders:      o.noHeaders,
-	}
-
-	return nil
-}
-
-func (o *SCCReviewOptions) Run(args []string) error {
-	r := o.builder.
-		WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...).
-		NamespaceParam(o.namespace).
-		FilenameParam(o.enforceNamespace, &o.FilenameOptions).
-		ResourceTypeOrNameArgs(true, args...).
-		ContinueOnError().
-		Flatten().
-		Do()
-	err := r.Err()
-	if err != nil {
-		return err
-	}
-	allErrs := []error{}
-	err = r.Visit(func(info *resource.Info, err error) error {
-		if err != nil {
-			return err
-		}
-		objectName := info.Name
-		podTemplateSpec, err := GetPodTemplateForObject(info.Object)
-		if err != nil {
-			return fmt.Errorf(" %q cannot create pod: %v", objectName, err)
-		}
-		err = CheckStatefulSetWithWolumeClaimTemplates(info.Object)
-		if err != nil {
-			return err
-		}
-		review := &securityv1.PodSecurityPolicyReview{
-			Spec: securityv1.PodSecurityPolicyReviewSpec{
-				Template:            *podTemplateSpec,
-				ServiceAccountNames: o.shortServiceAccountNames,
-			},
-		}
-		unversionedObj, err := o.client.PodSecurityPolicyReviews(o.namespace).Create(review)
-		if err != nil {
-			return fmt.Errorf("unable to compute Pod Security Policy Review for %q: %v", objectName, err)
-		}
-		if err = o.Printer.WithInfo(info).PrintObj(unversionedObj, o.Out); err != nil {
-			allErrs = append(allErrs, err)
-		}
-		return nil
-	})
-	allErrs = append(allErrs, err)
-	return utilerrors.NewAggregate(allErrs)
-}
-
-// CheckStatefulSetWithWolumeClaimTemplates checks whether a supplied object is a statefulSet with volumeClaimTemplates
-// Currently scc-review  and scc-subject-review commands cannot handle correctly this case since validation is not based
-// only on podTemplateSpec.
-func CheckStatefulSetWithWolumeClaimTemplates(obj runtime.Object) error {
-	// TODO remove this as soon upstream statefulSet validation for podSpec is fixed.
-	// Currently podTemplateSpec for a statefulSet is not fully validated
-	// spec.volumeClaimTemplates info should be propagated down to
-	// spec.template.spec validateContainers to validate volumeMounts
-	//https://github.com/openshift/origin/blob/master/vendor/k8s.io/kubernetes/pkg/apis/apps/validation/validation.go#L57
-	switch r := obj.(type) {
-	case *appsv1beta1.StatefulSet:
-		if len(r.Spec.VolumeClaimTemplates) > 0 {
-			return fmt.Errorf("StatefulSet %q with spec.volumeClaimTemplates currently not supported.", r.GetName())
-		}
-	case *appsv1beta2.StatefulSet:
-		if len(r.Spec.VolumeClaimTemplates) > 0 {
-			return fmt.Errorf("StatefulSet %q with spec.volumeClaimTemplates currently not supported.", r.GetName())
-		}
-	case *appsv1.StatefulSet:
-		if len(r.Spec.VolumeClaimTemplates) > 0 {
-			return fmt.Errorf("StatefulSet %q with spec.volumeClaimTemplates currently not supported.", r.GetName())
-		}
-	}
-	return nil
-}
-
-func GetPodTemplateForObject(obj runtime.Object) (*corev1.PodTemplateSpec, error) {
-	podSpec, _, err := ometa.GetPodSpecV1(obj)
-	if err != nil {
-		return nil, err
-	}
-	return &corev1.PodTemplateSpec{Spec: *podSpec}, nil
-}
-
-func sccReviewHumanPrintFunc(info *resource.Info, obj runtime.Object, noHeaders *bool, out io.Writer) error {
-	w := tabwriter.NewWriter(out, tabWriterMinWidth, tabWriterWidth, tabWriterPadding, tabWriterPadChar, tabWriterFlags)
-	defer w.Flush()
-
-	if info == nil {
-		return fmt.Errorf("expected non-nil resource info")
-	}
-
-	noHeadersVal := *noHeaders
-	if !noHeadersVal {
-		columns := []string{"RESOURCE", "SERVICE ACCOUNT", "ALLOWED BY"}
-		fmt.Fprintf(w, "%s\t\n", strings.Join(columns, "\t"))
-
-		// printed only the first time if requested
-		*noHeaders = true
-	}
-
-	pspreview, ok := obj.(*securityv1.PodSecurityPolicyReview)
-	if !ok {
-		return fmt.Errorf("unexpected object %T", obj)
-	}
-
-	gk := printers.GetObjectGroupKind(info.Object)
-	for _, allowedSA := range pspreview.Status.AllowedServiceAccounts {
-		allowedBy := ""
-		if allowedSA.AllowedBy != nil {
-			allowedBy = allowedSA.AllowedBy.Name
-		}
-		_, err := fmt.Fprintf(w, "%s/%s\t%s\t%s\t\n", gk.Kind, info.Name, allowedSA.Name, allowedBy)
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/policy/subject_review.go b/vendor/github.com/openshift/oc/pkg/cli/admin/policy/subject_review.go
deleted file mode 100644
index 27a64afb15e0..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/policy/subject_review.go
+++ /dev/null
@@ -1,265 +0,0 @@
-package policy
-
-import (
-	"fmt"
-	"io"
-	"strings"
-	"text/tabwriter"
-
-	"github.com/spf13/cobra"
-
-	corev1 "k8s.io/api/core/v1"
-	"k8s.io/apimachinery/pkg/api/meta"
-	"k8s.io/apimachinery/pkg/runtime"
-	utilerrors "k8s.io/apimachinery/pkg/util/errors"
-	"k8s.io/apiserver/pkg/authentication/serviceaccount"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	"k8s.io/cli-runtime/pkg/printers"
-	"k8s.io/cli-runtime/pkg/resource"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/scheme"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-
-	securityv1 "github.com/openshift/api/security/v1"
-	securityv1typedclient "github.com/openshift/client-go/security/clientset/versioned/typed/security/v1"
-)
-
-var (
-	subjectReviewLong = templates.LongDesc(`Check whether a User, Service Account or a Group can create a Pod.
-	It returns a list of Security Context Constraints that will admit the resource.
-	If User is specified but not Groups, it is interpreted as "What if User is not a member of any groups".
-	If User and Groups are empty, then the check is performed using the current user
-	`)
-	subjectReviewExamples = templates.Examples(`# Check whether user bob can create a pod specified in myresource.yaml
-	$ %[1]s -u bob -f myresource.yaml
-
-	# Check whether user bob who belongs to projectAdmin group can create a pod specified in myresource.yaml
-	$ %[1]s -u bob -g projectAdmin -f myresource.yaml
-
-	# Check whether ServiceAccount specified in podTemplateSpec in myresourcewithsa.yaml can create the Pod
-	$  %[1]s -f myresourcewithsa.yaml `)
-)
-
-const SubjectReviewRecommendedName = "scc-subject-review"
-
-type SCCSubjectReviewOptions struct {
-	PrintFlags *genericclioptions.PrintFlags
-
-	Printer *policyPrinter
-
-	sccSubjectReviewClient securityv1typedclient.SecurityV1Interface
-	namespace              string
-	enforceNamespace       bool
-	builder                *resource.Builder
-	RESTClientFactory      func(mapping *meta.RESTMapping) (resource.RESTClient, error)
-	FilenameOptions        resource.FilenameOptions
-	User                   string
-	Groups                 []string
-	noHeaders              bool
-	serviceAccount         string
-
-	genericclioptions.IOStreams
-}
-
-func NewSCCSubjectReviewOptions(streams genericclioptions.IOStreams) *SCCSubjectReviewOptions {
-	return &SCCSubjectReviewOptions{
-		PrintFlags: genericclioptions.NewPrintFlags("").WithTypeSetter(scheme.Scheme),
-		IOStreams:  streams,
-	}
-}
-
-func NewCmdSccSubjectReview(name, fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	o := NewSCCSubjectReviewOptions(streams)
-	cmd := &cobra.Command{
-		Use:     name,
-		Long:    subjectReviewLong,
-		Short:   "Check whether a user or a ServiceAccount can create a Pod.",
-		Example: fmt.Sprintf(subjectReviewExamples, fullName, fullName),
-		Run: func(cmd *cobra.Command, args []string) {
-			kcmdutil.CheckErr(o.Complete(f, args, cmd))
-			kcmdutil.CheckErr(o.Run(args))
-		},
-	}
-
-	cmd.Flags().StringVarP(&o.User, "user", "u", o.User, "Review will be performed on behalf of this user")
-	cmd.Flags().StringSliceVarP(&o.Groups, "groups", "g", o.Groups, "Comma separated, list of groups. Review will be performed on behalf of these groups")
-	cmd.Flags().StringVarP(&o.serviceAccount, "serviceaccount", "z", o.serviceAccount, "service account in the current namespace to use as a user")
-	cmd.Flags().BoolVar(&o.noHeaders, "no-headers", o.noHeaders, "When using the default output format, don't print headers (default print headers).")
-	kcmdutil.AddFilenameOptionFlags(cmd, &o.FilenameOptions, "Filename, directory, or URL to a file identifying the resource to get from a server.")
-
-	o.PrintFlags.AddFlags(cmd)
-	return cmd
-}
-
-func (o *SCCSubjectReviewOptions) Complete(f kcmdutil.Factory, args []string, cmd *cobra.Command) error {
-	if len(args) == 0 && len(o.FilenameOptions.Filenames) == 0 {
-		return kcmdutil.UsageErrorf(cmd, "one or more resources must be specified")
-	}
-	if len(o.User) > 0 && len(o.serviceAccount) > 0 {
-		return kcmdutil.UsageErrorf(cmd, "--user and --serviceaccount are mutually exclusive")
-	}
-	if len(o.serviceAccount) > 0 { // check whether user supplied a list of SA
-		if len(strings.Split(o.serviceAccount, ",")) > 1 {
-			return kcmdutil.UsageErrorf(cmd, "only one Service Account is supported")
-		}
-		if strings.HasPrefix(o.serviceAccount, serviceaccount.ServiceAccountUsernamePrefix) {
-			_, user, err := serviceaccount.SplitUsername(o.serviceAccount)
-			if err != nil {
-				return err
-			}
-			o.serviceAccount = user
-		}
-	}
-	var err error
-	o.namespace, o.enforceNamespace, err = f.ToRawKubeConfigLoader().Namespace()
-	if err != nil {
-		return err
-	}
-	clientConfig, err := f.ToRESTConfig()
-	if err != nil {
-		return err
-	}
-	securityClient, err := securityv1typedclient.NewForConfig(clientConfig)
-	if err != nil {
-		return fmt.Errorf("unable to obtain client: %v", err)
-	}
-	o.sccSubjectReviewClient = securityClient
-	o.builder = f.NewBuilder()
-	o.RESTClientFactory = f.ClientForMapping
-
-	o.Printer = &policyPrinter{
-		printFlags:     o.PrintFlags,
-		humanPrintFunc: subjectReviewHumanPrinter,
-		noHeaders:      o.noHeaders,
-	}
-
-	return nil
-}
-
-func (o *SCCSubjectReviewOptions) Run(args []string) error {
-	userOrSA := o.User
-	if len(o.serviceAccount) > 0 {
-		userOrSA = o.serviceAccount
-	}
-	r := o.builder.
-		WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...).
-		NamespaceParam(o.namespace).
-		FilenameParam(o.enforceNamespace, &o.FilenameOptions).
-		ResourceTypeOrNameArgs(true, args...).
-		ContinueOnError().
-		Flatten().
-		Do()
-	err := r.Err()
-	if err != nil {
-		return err
-	}
-
-	allErrs := []error{}
-	err = r.Visit(func(info *resource.Info, err error) error {
-		if err != nil {
-			return err
-		}
-		var response runtime.Object
-		objectName := info.Name
-		podTemplateSpec, err := GetPodTemplateForObject(info.Object)
-		if err != nil {
-			return fmt.Errorf(" %q cannot create pod: %v", objectName, err)
-		}
-		err = CheckStatefulSetWithWolumeClaimTemplates(info.Object)
-		if err != nil {
-			return err
-		}
-		if len(userOrSA) > 0 || len(o.Groups) > 0 {
-			unversionedObj, err := o.pspSubjectReview(userOrSA, podTemplateSpec)
-			if err != nil {
-				return fmt.Errorf("unable to compute Pod Security Policy Subject Review for %q: %v", objectName, err)
-			}
-			versionedObj := &securityv1.PodSecurityPolicySubjectReview{}
-			if err := scheme.Scheme.Convert(unversionedObj, versionedObj, nil); err != nil {
-				return err
-			}
-			response = versionedObj
-		} else {
-			unversionedObj, err := o.pspSelfSubjectReview(podTemplateSpec)
-			if err != nil {
-				return fmt.Errorf("unable to compute Pod Security Policy Subject Review for %q: %v", objectName, err)
-			}
-			versionedObj := &securityv1.PodSecurityPolicySelfSubjectReview{}
-			if err := scheme.Scheme.Convert(unversionedObj, versionedObj, nil); err != nil {
-				return err
-			}
-			response = versionedObj
-		}
-		if err := o.Printer.WithInfo(info).PrintObj(response, o.Out); err != nil {
-			allErrs = append(allErrs, err)
-		}
-		return nil
-	})
-	allErrs = append(allErrs, err)
-	return utilerrors.NewAggregate(allErrs)
-}
-
-func (o *SCCSubjectReviewOptions) pspSubjectReview(userOrSA string, podTemplateSpec *corev1.PodTemplateSpec) (*securityv1.PodSecurityPolicySubjectReview, error) {
-	podSecurityPolicySubjectReview := &securityv1.PodSecurityPolicySubjectReview{
-		Spec: securityv1.PodSecurityPolicySubjectReviewSpec{
-			Template: *podTemplateSpec,
-			User:     userOrSA,
-			Groups:   o.Groups,
-		},
-	}
-	return o.sccSubjectReviewClient.PodSecurityPolicySubjectReviews(o.namespace).Create(podSecurityPolicySubjectReview)
-}
-
-func (o *SCCSubjectReviewOptions) pspSelfSubjectReview(podTemplateSpec *corev1.PodTemplateSpec) (*securityv1.PodSecurityPolicySelfSubjectReview, error) {
-	podSecurityPolicySelfSubjectReview := &securityv1.PodSecurityPolicySelfSubjectReview{
-		Spec: securityv1.PodSecurityPolicySelfSubjectReviewSpec{
-			Template: *podTemplateSpec,
-		},
-	}
-	return o.sccSubjectReviewClient.PodSecurityPolicySelfSubjectReviews(o.namespace).Create(podSecurityPolicySelfSubjectReview)
-}
-
-func subjectReviewHumanPrinter(info *resource.Info, obj runtime.Object, noHeaders *bool, out io.Writer) error {
-	w := tabwriter.NewWriter(out, tabWriterMinWidth, tabWriterWidth, tabWriterPadding, tabWriterPadChar, tabWriterFlags)
-	defer w.Flush()
-
-	if info == nil {
-		return fmt.Errorf("expected non-nil resource info")
-	}
-
-	noHeadersVal := *noHeaders
-	if !noHeadersVal {
-		columns := []string{"RESOURCE", "ALLOWED BY"}
-		fmt.Fprintf(w, "%s\t\n", strings.Join(columns, "\t"))
-
-		// printed only the first time if requested
-		*noHeaders = true
-	}
-
-	gk := printers.GetObjectGroupKind(info.Object)
-
-	allowedBy, err := getAllowedBy(obj)
-	if err != nil {
-		return err
-	}
-
-	_, err = fmt.Fprintf(w, "%s/%s\t%s\t\n", gk.Kind, info.Name, allowedBy)
-	return err
-}
-
-func getAllowedBy(obj runtime.Object) (string, error) {
-	value := ""
-	switch review := obj.(type) {
-	case *securityv1.PodSecurityPolicySelfSubjectReview:
-		if review.Status.AllowedBy != nil {
-			value = review.Status.AllowedBy.Name
-		}
-	case *securityv1.PodSecurityPolicySubjectReview:
-		if review.Status.AllowedBy != nil {
-			value = review.Status.AllowedBy.Name
-		}
-	default:
-		return value, fmt.Errorf("unexpected object %T", obj)
-	}
-	return value, nil
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/policy/who_can.go b/vendor/github.com/openshift/oc/pkg/cli/admin/policy/who_can.go
deleted file mode 100644
index a24d244e0cf8..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/policy/who_can.go
+++ /dev/null
@@ -1,191 +0,0 @@
-package policy
-
-import (
-	"bytes"
-	"errors"
-	"fmt"
-	"io"
-	"strings"
-
-	"github.com/spf13/cobra"
-
-	"k8s.io/apimachinery/pkg/api/meta"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/runtime/schema"
-	"k8s.io/apimachinery/pkg/util/sets"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/scheme"
-	"k8s.io/kubernetes/pkg/printers"
-
-	authorizationv1 "github.com/openshift/api/authorization/v1"
-	authorizationv1typedclient "github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1"
-)
-
-const WhoCanRecommendedName = "who-can"
-
-type WhoCanOptions struct {
-	PrintFlags *genericclioptions.PrintFlags
-
-	ToPrinter func(string) (printers.ResourcePrinter, error)
-
-	allNamespaces    bool
-	bindingNamespace string
-	client           authorizationv1typedclient.AuthorizationV1Interface
-
-	verb         string
-	resource     schema.GroupVersionResource
-	resourceName string
-
-	genericclioptions.IOStreams
-}
-
-func NewWhoCanOptions(streams genericclioptions.IOStreams) *WhoCanOptions {
-	return &WhoCanOptions{
-		PrintFlags: genericclioptions.NewPrintFlags("").WithTypeSetter(scheme.Scheme),
-
-		IOStreams: streams,
-	}
-}
-
-// NewCmdWhoCan implements the OpenShift cli who-can command
-func NewCmdWhoCan(name, fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	o := NewWhoCanOptions(streams)
-	cmd := &cobra.Command{
-		Use:   name + " VERB RESOURCE [NAME]",
-		Short: "List who can perform the specified action on a resource",
-		Long:  "List who can perform the specified action on a resource",
-		Run: func(cmd *cobra.Command, args []string) {
-			kcmdutil.CheckErr(o.complete(f, cmd, args))
-			kcmdutil.CheckErr(o.run())
-		},
-	}
-
-	cmd.Flags().BoolVarP(&o.allNamespaces, "all-namespaces", "A", o.allNamespaces, "If true, list who can perform the specified action in all namespaces.")
-
-	o.PrintFlags.AddFlags(cmd)
-	return cmd
-}
-
-func (o *WhoCanOptions) complete(f kcmdutil.Factory, cmd *cobra.Command, args []string) error {
-	mapper, err := f.ToRESTMapper()
-	if err != nil {
-		return err
-	}
-
-	switch len(args) {
-	case 3:
-		o.resourceName = args[2]
-		fallthrough
-	case 2:
-		o.verb = args[0]
-		o.resource = ResourceFor(mapper, args[1], o.ErrOut)
-	default:
-		return errors.New("you must specify two or three arguments: verb, resource, and optional resourceName")
-	}
-
-	clientConfig, err := f.ToRESTConfig()
-	if err != nil {
-		return err
-	}
-	o.client, err = authorizationv1typedclient.NewForConfig(clientConfig)
-	if err != nil {
-		return err
-	}
-
-	o.bindingNamespace, _, err = f.ToRawKubeConfigLoader().Namespace()
-	if err != nil {
-		return err
-	}
-
-	o.ToPrinter = func(operation string) (printers.ResourcePrinter, error) {
-		o.PrintFlags.NamePrintFlags.Operation = operation
-		return o.PrintFlags.ToPrinter()
-	}
-
-	return nil
-}
-
-func ResourceFor(mapper meta.RESTMapper, resourceArg string, errOut io.Writer) schema.GroupVersionResource {
-	fullySpecifiedGVR, groupResource := schema.ParseResourceArg(strings.ToLower(resourceArg))
-	gvr := schema.GroupVersionResource{}
-	if fullySpecifiedGVR != nil {
-		gvr, _ = mapper.ResourceFor(*fullySpecifiedGVR)
-	}
-	if gvr.Empty() {
-		var err error
-		gvr, err = mapper.ResourceFor(groupResource.WithVersion(""))
-		if err != nil {
-			if len(groupResource.Group) == 0 {
-				fmt.Fprintf(errOut, "Warning: the server doesn't have a resource type '%s'\n", groupResource.Resource)
-			} else {
-				fmt.Fprintf(errOut, "Warning: the server doesn't have a resource type '%s' in group '%s'\n", groupResource.Resource, groupResource.Group)
-			}
-			return schema.GroupVersionResource{Resource: resourceArg}
-		}
-	}
-
-	return gvr
-}
-
-func (o *WhoCanOptions) run() error {
-	authorizationAttributes := authorizationv1.Action{
-		Verb:         o.verb,
-		Group:        o.resource.Group,
-		Resource:     o.resource.Resource,
-		ResourceName: o.resourceName,
-	}
-
-	resourceAccessReviewResponse := &authorizationv1.ResourceAccessReviewResponse{}
-	var err error
-	if o.allNamespaces {
-		resourceAccessReviewResponse, err = o.client.ResourceAccessReviews().Create(&authorizationv1.ResourceAccessReview{Action: authorizationAttributes})
-	} else {
-		resourceAccessReviewResponse, err = o.client.LocalResourceAccessReviews(o.bindingNamespace).Create(&authorizationv1.LocalResourceAccessReview{Action: authorizationAttributes})
-	}
-
-	if err != nil {
-		return err
-	}
-
-	message := bytes.NewBuffer([]byte{})
-	fmt.Fprintln(message)
-
-	if resourceAccessReviewResponse.Namespace == metav1.NamespaceAll {
-		fmt.Fprintf(message, "\n%s\n", "Namespace: ")
-	} else {
-		fmt.Fprintf(message, "\nNamespace: %s\n", resourceAccessReviewResponse.Namespace)
-	}
-
-	resourceDisplay := o.resource.Resource
-	if len(o.resource.Group) > 0 {
-		resourceDisplay = resourceDisplay + "." + o.resource.Group
-	}
-
-	fmt.Fprintf(message, "Verb:      %s\n", o.verb)
-	fmt.Fprintf(message, "Resource:  %s\n", resourceDisplay)
-	if len(resourceAccessReviewResponse.UsersSlice) == 0 {
-		fmt.Fprintf(message, "\n%s\n", "Users:  none")
-	} else {
-		userSlice := sets.NewString(resourceAccessReviewResponse.UsersSlice...)
-		fmt.Fprintf(message, "\nUsers:  %s\n", strings.Join(userSlice.List(), "\n        "))
-	}
-
-	if len(resourceAccessReviewResponse.GroupsSlice) == 0 {
-		fmt.Fprintf(message, "\n%s\n", "Groups: none")
-	} else {
-		groupSlice := sets.NewString(resourceAccessReviewResponse.GroupsSlice...)
-		fmt.Fprintf(message, "Groups: %s\n", strings.Join(groupSlice.List(), "\n        "))
-	}
-
-	if len(resourceAccessReviewResponse.EvaluationError) != 0 {
-		fmt.Fprintf(message, "\nError during evaluation, results may not be complete: %s\n", resourceAccessReviewResponse.EvaluationError)
-	}
-
-	p, err := o.ToPrinter(message.String())
-	if err != nil {
-		return err
-	}
-
-	return p.PrintObj(resourceAccessReviewResponse, o.Out)
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/project/new_project.go b/vendor/github.com/openshift/oc/pkg/cli/admin/project/new_project.go
deleted file mode 100644
index b64a873d0baf..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/project/new_project.go
+++ /dev/null
@@ -1,181 +0,0 @@
-package project
-
-import (
-	"errors"
-	"fmt"
-	"time"
-
-	"github.com/spf13/cobra"
-
-	kerrors "k8s.io/apimachinery/pkg/api/errors"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	errorsutil "k8s.io/apimachinery/pkg/util/errors"
-	"k8s.io/apimachinery/pkg/util/wait"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	"k8s.io/cli-runtime/pkg/printers"
-	rbacv1client "k8s.io/client-go/kubernetes/typed/rbac/v1"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-
-	"github.com/openshift/api/annotations"
-	authorizationv1 "github.com/openshift/api/authorization/v1"
-	projectv1 "github.com/openshift/api/project/v1"
-	authorizationv1typedclient "github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1"
-	projectv1typedclient "github.com/openshift/client-go/project/clientset/versioned/typed/project/v1"
-	"github.com/openshift/oc/pkg/cli/admin/policy"
-)
-
-const NewProjectRecommendedName = "new-project"
-
-type NewProjectOptions struct {
-	ProjectName  string
-	DisplayName  string
-	Description  string
-	NodeSelector string
-
-	UseNodeSelector bool
-	ProjectClient   projectv1typedclient.ProjectV1Interface
-	RbacClient      rbacv1client.RbacV1Interface
-	SARClient       authorizationv1typedclient.SubjectAccessReviewInterface
-
-	AdminRole string
-	AdminUser string
-
-	genericclioptions.IOStreams
-}
-
-var newProjectLong = templates.LongDesc(`
-	Create a new project
-
-	Use this command to create a project. You may optionally specify metadata about the project,
-	an admin user (and role, if you want to use a non-default admin role), and a node selector
-	to restrict which nodes pods in this project can be scheduled to.`)
-
-func NewNewProjectOptions(streams genericclioptions.IOStreams) *NewProjectOptions {
-	return &NewProjectOptions{
-		AdminRole: "admin",
-		IOStreams: streams,
-	}
-}
-
-// NewCmdNewProject implements the OpenShift cli new-project command
-func NewCmdNewProject(name, fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	o := NewNewProjectOptions(streams)
-	cmd := &cobra.Command{
-		Use:   name + " NAME [--display-name=DISPLAYNAME] [--description=DESCRIPTION]",
-		Short: "Create a new project",
-		Long:  newProjectLong,
-		Run: func(cmd *cobra.Command, args []string) {
-			kcmdutil.CheckErr(o.Complete(f, cmd, args))
-			kcmdutil.CheckErr(o.Run())
-		},
-	}
-
-	cmd.Flags().StringVar(&o.AdminRole, "admin-role", o.AdminRole, "Project admin role name in the cluster policy")
-	cmd.Flags().StringVar(&o.AdminUser, "admin", o.AdminUser, "Project admin username")
-	cmd.Flags().StringVar(&o.DisplayName, "display-name", o.DisplayName, "Project display name")
-	cmd.Flags().StringVar(&o.Description, "description", o.Description, "Project description")
-	cmd.Flags().StringVar(&o.NodeSelector, "node-selector", o.NodeSelector, "Restrict pods onto nodes matching given label selector. Format: '=, =...'. Specifying \"\" means any node, not default. If unspecified, cluster default node selector will be used.")
-
-	return cmd
-}
-
-func (o *NewProjectOptions) Complete(f kcmdutil.Factory, cmd *cobra.Command, args []string) error {
-	if len(args) != 1 {
-		return errors.New("you must specify one argument: project name")
-	}
-
-	// We can't depend on len(options.NodeSelector) > 0 as node-selector="" is valid
-	// and we want to populate node selector as project annotation only if explicitly set by user
-	o.UseNodeSelector = cmd.Flag("node-selector").Changed
-
-	o.ProjectName = args[0]
-
-	clientConfig, err := f.ToRESTConfig()
-	if err != nil {
-		return err
-	}
-	o.ProjectClient, err = projectv1typedclient.NewForConfig(clientConfig)
-	if err != nil {
-		return err
-	}
-	o.RbacClient, err = rbacv1client.NewForConfig(clientConfig)
-	if err != nil {
-		return err
-	}
-	authorizationClient, err := authorizationv1typedclient.NewForConfig(clientConfig)
-	if err != nil {
-		return err
-	}
-	o.SARClient = authorizationClient.SubjectAccessReviews()
-
-	return nil
-}
-
-func (o *NewProjectOptions) Run() error {
-	if _, err := o.ProjectClient.Projects().Get(o.ProjectName, metav1.GetOptions{}); err != nil {
-		if !kerrors.IsNotFound(err) {
-			return err
-		}
-	} else {
-		return fmt.Errorf("project %v already exists", o.ProjectName)
-	}
-
-	project := &projectv1.Project{}
-	project.Name = o.ProjectName
-	project.Annotations = make(map[string]string)
-	project.Annotations[annotations.OpenShiftDescription] = o.Description
-	project.Annotations[annotations.OpenShiftDisplayName] = o.DisplayName
-	if o.UseNodeSelector {
-		project.Annotations[projectv1.ProjectNodeSelector] = o.NodeSelector
-	}
-	project, err := o.ProjectClient.Projects().Create(project)
-	if err != nil {
-		return err
-	}
-
-	fmt.Fprintf(o.Out, "Created project %v\n", o.ProjectName)
-
-	errs := []error{}
-	if len(o.AdminUser) != 0 {
-		adduser := policy.NewRoleModificationOptions(o.IOStreams)
-		adduser.RoleName = o.AdminRole
-		adduser.RoleKind = "ClusterRole"
-		adduser.RoleBindingNamespace = project.Name
-		adduser.RbacClient = o.RbacClient
-		adduser.Users = []string{o.AdminUser}
-		adduser.ToPrinter = noopPrinter
-
-		if err := adduser.AddRole(); err != nil {
-			fmt.Fprintf(o.Out, "%v could not be added to the %v role: %v\n", o.AdminUser, o.AdminRole, err)
-			errs = append(errs, err)
-		} else {
-			if err := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
-				resp, err := o.SARClient.Create(&authorizationv1.SubjectAccessReview{
-					Action: authorizationv1.Action{
-						Namespace: o.ProjectName,
-						Verb:      "get",
-						Resource:  "projects",
-					},
-					User: o.AdminUser,
-				})
-				if err != nil {
-					return false, err
-				}
-				if !resp.Allowed {
-					return false, nil
-				}
-				return true, nil
-			}); err != nil {
-				fmt.Printf("%s is not able to get project %s with the %s role: %v\n", o.AdminUser, o.ProjectName, o.AdminRole, err)
-				errs = append(errs, err)
-			}
-		}
-	}
-
-	return errorsutil.NewAggregate(errs)
-}
-
-func noopPrinter(operation string) (printers.ResourcePrinter, error) {
-	return printers.NewDiscardingPrinter(), nil
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/auth/bindings.go b/vendor/github.com/openshift/oc/pkg/cli/admin/prune/auth/bindings.go
deleted file mode 100644
index a8b9dfede317..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/auth/bindings.go
+++ /dev/null
@@ -1,99 +0,0 @@
-package auth
-
-import (
-	"fmt"
-	"io"
-	"k8s.io/apiserver/pkg/authentication/serviceaccount"
-
-	corev1 "k8s.io/api/core/v1"
-	kerrors "k8s.io/apimachinery/pkg/api/errors"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
-	authv1client "github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1"
-)
-
-// reapClusterBindings removes the subject from cluster-level role bindings
-func reapClusterBindings(removedSubject corev1.ObjectReference, c authv1client.AuthorizationV1Interface, out io.Writer) []error {
-	errors := []error{}
-
-	clusterBindings, err := c.ClusterRoleBindings().List(metav1.ListOptions{})
-	if err != nil {
-		return []error{err}
-	}
-	for _, binding := range clusterBindings.Items {
-		retainedSubjects := []corev1.ObjectReference{}
-		for _, subject := range binding.Subjects {
-			if subject != removedSubject {
-				retainedSubjects = append(retainedSubjects, subject)
-			}
-		}
-		if len(retainedSubjects) != len(binding.Subjects) {
-			updatedBinding := binding
-			updatedBinding.Subjects = retainedSubjects
-			updatedBinding.UserNames, updatedBinding.GroupNames = stringSubjectsFor(binding.Namespace, retainedSubjects)
-			if _, err := c.ClusterRoleBindings().Update(&updatedBinding); err != nil && !kerrors.IsNotFound(err) {
-				errors = append(errors, err)
-			} else {
-				fmt.Fprintf(out, "clusterrolebinding.rbac.authorization.k8s.io/"+updatedBinding.Name+" updated\n")
-			}
-		}
-	}
-	return errors
-}
-
-// reapNamespacedBindings removes the subject from namespaced role bindings
-func reapNamespacedBindings(removedSubject corev1.ObjectReference, c authv1client.AuthorizationV1Interface, out io.Writer) []error {
-	errors := []error{}
-
-	namespacedBindings, err := c.RoleBindings(metav1.NamespaceAll).List(metav1.ListOptions{})
-	if err != nil {
-		return []error{err}
-	}
-	for _, binding := range namespacedBindings.Items {
-		retainedSubjects := []corev1.ObjectReference{}
-		for _, subject := range binding.Subjects {
-			if subject != removedSubject {
-				retainedSubjects = append(retainedSubjects, subject)
-			}
-		}
-		if len(retainedSubjects) != len(binding.Subjects) {
-			updatedBinding := binding
-			updatedBinding.Subjects = retainedSubjects
-			updatedBinding.UserNames, updatedBinding.GroupNames = stringSubjectsFor(binding.Namespace, retainedSubjects)
-			if _, err := c.RoleBindings(binding.Namespace).Update(&updatedBinding); err != nil && !kerrors.IsNotFound(err) {
-				errors = append(errors, err)
-			} else {
-				fmt.Fprintf(out, "rolebinding.rbac.authorization.k8s.io/"+updatedBinding.Name+" updated\n")
-			}
-		}
-	}
-	return errors
-}
-
-// stringSubjectsFor returns users and groups for comparison against user.Info.  currentNamespace is used to
-// to create usernames for service accounts where namespace=="".
-func stringSubjectsFor(currentNamespace string, subjects []corev1.ObjectReference) ([]string, []string) {
-	// these MUST be nil to indicate empty
-	var users, groups []string
-
-	for _, subject := range subjects {
-		switch subject.Kind {
-		case "ServiceAccount":
-			namespace := currentNamespace
-			if len(subject.Namespace) > 0 {
-				namespace = subject.Namespace
-			}
-			if len(namespace) > 0 {
-				users = append(users, serviceaccount.MakeUsername(namespace, subject.Name))
-			}
-
-		case "User":
-			users = append(users, subject.Name)
-
-		case "Group":
-			groups = append(groups, subject.Name)
-		}
-	}
-
-	return users, groups
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/auth/cluster_role.go b/vendor/github.com/openshift/oc/pkg/cli/admin/prune/auth/cluster_role.go
deleted file mode 100644
index 310fd55b0a50..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/auth/cluster_role.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package auth
-
-import (
-	"fmt"
-	"io"
-
-	kerrors "k8s.io/apimachinery/pkg/api/errors"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	utilerrors "k8s.io/apimachinery/pkg/util/errors"
-	rbacv1client "k8s.io/client-go/kubernetes/typed/rbac/v1"
-	kapi "k8s.io/kubernetes/pkg/apis/core"
-)
-
-func reapForClusterRole(clusterBindingClient rbacv1client.ClusterRoleBindingsGetter, bindingClient rbacv1client.RoleBindingsGetter, namespace, name string, out io.Writer) error {
-	errors := []error{}
-
-	clusterBindings, err := clusterBindingClient.ClusterRoleBindings().List(metav1.ListOptions{})
-	if err != nil {
-		return err
-	}
-	for _, clusterBinding := range clusterBindings.Items {
-		if clusterBinding.RoleRef.Name == name {
-			if err := clusterBindingClient.ClusterRoleBindings().Delete(clusterBinding.Name, &metav1.DeleteOptions{}); err != nil && !kerrors.IsNotFound(err) {
-				errors = append(errors, err)
-			} else {
-				fmt.Fprintf(out, "clusterrolebinding.rbac.authorization.k8s.io/"+clusterBinding.Name+" deleted\n")
-			}
-		}
-	}
-
-	namespacedBindings, err := bindingClient.RoleBindings(kapi.NamespaceNone).List(metav1.ListOptions{})
-	if err != nil {
-		return err
-	}
-	for _, namespacedBinding := range namespacedBindings.Items {
-		if namespacedBinding.RoleRef.Kind == "ClusterRole" && namespacedBinding.RoleRef.Name == name {
-			if err := bindingClient.RoleBindings(namespacedBinding.Namespace).Delete(namespacedBinding.Name, &metav1.DeleteOptions{}); err != nil && !kerrors.IsNotFound(err) {
-				errors = append(errors, err)
-			} else {
-				fmt.Fprintf(out, "rolebinding.rbac.authorization.k8s.io/"+namespacedBinding.Name+" deleted\n")
-			}
-		}
-	}
-
-	return utilerrors.NewAggregate(errors)
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/auth/cluster_role_test.go b/vendor/github.com/openshift/oc/pkg/cli/admin/prune/auth/cluster_role_test.go
deleted file mode 100644
index 052c71133b87..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/auth/cluster_role_test.go
+++ /dev/null
@@ -1,155 +0,0 @@
-package auth
-
-import (
-	"io/ioutil"
-	"reflect"
-	"testing"
-
-	rbacv1 "k8s.io/api/rbac/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/runtime"
-	"k8s.io/apimachinery/pkg/util/sets"
-	"k8s.io/client-go/kubernetes/fake"
-	clientgotesting "k8s.io/client-go/testing"
-)
-
-func TestClusterRoleReaper(t *testing.T) {
-	tests := []struct {
-		name                string
-		role                *rbacv1.ClusterRole
-		bindings            []*rbacv1.ClusterRoleBinding
-		deletedBindingNames []string
-	}{
-		{
-			name: "no bindings",
-			role: &rbacv1.ClusterRole{
-				ObjectMeta: metav1.ObjectMeta{
-					Name: "role",
-				},
-			},
-		},
-		{
-			name: "bindings",
-			role: &rbacv1.ClusterRole{
-				ObjectMeta: metav1.ObjectMeta{
-					Name: "role",
-				},
-			},
-			bindings: []*rbacv1.ClusterRoleBinding{
-				{
-					ObjectMeta: metav1.ObjectMeta{
-						Name: "binding-1",
-					},
-					RoleRef: rbacv1.RoleRef{Name: "role", Kind: "ClusterRole"},
-				},
-				{
-					ObjectMeta: metav1.ObjectMeta{
-						Name: "binding-2",
-					},
-					RoleRef: rbacv1.RoleRef{Name: "role2", Kind: "ClusterRole"},
-				},
-				{
-					ObjectMeta: metav1.ObjectMeta{
-						Name: "binding-3",
-					},
-					RoleRef: rbacv1.RoleRef{Name: "role", Kind: "ClusterRole"},
-				},
-			},
-			deletedBindingNames: []string{"binding-1", "binding-3"},
-		},
-	}
-
-	for _, test := range tests {
-		startingObjects := []runtime.Object{}
-		startingObjects = append(startingObjects, test.role)
-		for _, binding := range test.bindings {
-			startingObjects = append(startingObjects, binding)
-		}
-		tc := fake.NewSimpleClientset(startingObjects...)
-
-		actualDeletedBindingNames := []string{}
-		tc.PrependReactor("delete", "clusterrolebindings", func(action clientgotesting.Action) (handled bool, ret runtime.Object, err error) {
-			actualDeletedBindingNames = append(actualDeletedBindingNames, action.(clientgotesting.DeleteAction).GetName())
-			return true, nil, nil
-		})
-
-		err := reapForClusterRole(tc.RbacV1(), tc.RbacV1(), "", test.role.Name, ioutil.Discard)
-		if err != nil {
-			t.Errorf("%s: unexpected error: %v", test.name, err)
-		}
-
-		expected := sets.NewString(test.deletedBindingNames...)
-		actuals := sets.NewString(actualDeletedBindingNames...)
-		if !reflect.DeepEqual(expected.List(), actuals.List()) {
-			t.Errorf("%s: expected %v, got %v", test.name, expected.List(), actuals.List())
-		}
-	}
-}
-
-func TestClusterRoleReaperAgainstNamespacedBindings(t *testing.T) {
-	tests := []struct {
-		name                string
-		role                *rbacv1.ClusterRole
-		bindings            []*rbacv1.RoleBinding
-		deletedBindingNames []string
-	}{
-		{
-			name: "bindings",
-			role: &rbacv1.ClusterRole{
-				ObjectMeta: metav1.ObjectMeta{
-					Name: "role",
-				},
-			},
-			bindings: []*rbacv1.RoleBinding{
-				{
-					ObjectMeta: metav1.ObjectMeta{
-						Name:      "binding-1",
-						Namespace: "ns-one",
-					},
-					RoleRef: rbacv1.RoleRef{Name: "role", Kind: "ClusterRole"},
-				},
-				{
-					ObjectMeta: metav1.ObjectMeta{
-						Name:      "binding-2",
-						Namespace: "ns-one",
-					},
-					RoleRef: rbacv1.RoleRef{Name: "role2", Kind: "ClusterRole"},
-				},
-				{
-					ObjectMeta: metav1.ObjectMeta{
-						Name:      "binding-3",
-						Namespace: "ns-one",
-					},
-					RoleRef: rbacv1.RoleRef{Name: "role", Kind: "ClusterRole"},
-				},
-			},
-			deletedBindingNames: []string{"binding-1", "binding-3"},
-		},
-	}
-
-	for _, test := range tests {
-		startingObjects := []runtime.Object{}
-		startingObjects = append(startingObjects, test.role)
-		for _, binding := range test.bindings {
-			startingObjects = append(startingObjects, binding)
-		}
-		tc := fake.NewSimpleClientset(startingObjects...)
-
-		actualDeletedBindingNames := []string{}
-		tc.PrependReactor("delete", "rolebindings", func(action clientgotesting.Action) (handled bool, ret runtime.Object, err error) {
-			actualDeletedBindingNames = append(actualDeletedBindingNames, action.(clientgotesting.DeleteAction).GetName())
-			return true, nil, nil
-		})
-
-		err := reapForClusterRole(tc.RbacV1(), tc.RbacV1(), "", test.role.Name, ioutil.Discard)
-		if err != nil {
-			t.Errorf("%s: unexpected error: %v", test.name, err)
-		}
-
-		expected := sets.NewString(test.deletedBindingNames...)
-		actuals := sets.NewString(actualDeletedBindingNames...)
-		if !reflect.DeepEqual(expected.List(), actuals.List()) {
-			t.Errorf("%s: expected %v, got %v", test.name, expected.List(), actuals.List())
-		}
-	}
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/auth/group.go b/vendor/github.com/openshift/oc/pkg/cli/admin/prune/auth/group.go
deleted file mode 100644
index fd96d951b1de..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/auth/group.go
+++ /dev/null
@@ -1,56 +0,0 @@
-package auth
-
-import (
-	"fmt"
-	"io"
-
-	corev1 "k8s.io/api/core/v1"
-	kerrors "k8s.io/apimachinery/pkg/api/errors"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	utilerrors "k8s.io/apimachinery/pkg/util/errors"
-
-	authv1client "github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1"
-	securityv1client "github.com/openshift/client-go/security/clientset/versioned/typed/security/v1"
-)
-
-func reapForGroup(
-	authorizationClient authv1client.AuthorizationV1Interface,
-	securityClient securityv1client.SecurityContextConstraintsInterface,
-	name string,
-	out io.Writer) error {
-
-	errors := []error{}
-
-	removedSubject := corev1.ObjectReference{Kind: "Group", Name: name}
-	errors = append(errors, reapClusterBindings(removedSubject, authorizationClient, out)...)
-	errors = append(errors, reapNamespacedBindings(removedSubject, authorizationClient, out)...)
-
-	// Remove the group from sccs
-	sccs, err := securityClient.List(metav1.ListOptions{})
-	if err != nil {
-		return err
-	}
-	for _, scc := range sccs.Items {
-		retainedGroups := []string{}
-		for _, group := range scc.Groups {
-			if group != name {
-				retainedGroups = append(retainedGroups, group)
-			}
-		}
-		if len(retainedGroups) != len(scc.Groups) {
-			updatedSCC := scc
-			updatedSCC.Groups = retainedGroups
-			if _, err := securityClient.Update(&updatedSCC); err != nil && !kerrors.IsNotFound(err) {
-				errors = append(errors, err)
-			} else {
-				fmt.Fprintf(out, "securitycontextconstraints.security.openshift.io/"+updatedSCC.Name+" updated\n")
-			}
-		}
-	}
-
-	// Intentionally leave identities that reference the user
-	// The user does not "own" the identities
-	// If the admin wants to remove the identities, that is a distinct operation
-
-	return utilerrors.NewAggregate(errors)
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/auth/group_test.go b/vendor/github.com/openshift/oc/pkg/cli/admin/prune/auth/group_test.go
deleted file mode 100644
index d00505091003..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/auth/group_test.go
+++ /dev/null
@@ -1,170 +0,0 @@
-package auth
-
-import (
-	"io/ioutil"
-	"reflect"
-	"testing"
-
-	"github.com/davecgh/go-spew/spew"
-
-	corev1 "k8s.io/api/core/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/runtime"
-	"k8s.io/apimachinery/pkg/runtime/schema"
-	clienttesting "k8s.io/client-go/testing"
-
-	authv1 "github.com/openshift/api/authorization/v1"
-	securityv1 "github.com/openshift/api/security/v1"
-	fakeauthclient "github.com/openshift/client-go/authorization/clientset/versioned/fake"
-	fakeauthv1client "github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/fake"
-	fakesecurityclient "github.com/openshift/client-go/security/clientset/versioned/fake"
-	fakesecurityv1client "github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/fake"
-	fakeuserv1client "github.com/openshift/client-go/user/clientset/versioned/typed/user/v1/fake"
-)
-
-var (
-	groupsResource              = schema.GroupVersionResource{Group: "user.openshift.io", Version: "v1", Resource: "groups"}
-	clusterRoleBindingsResource = schema.GroupVersionResource{Group: "authorization.openshift.io", Version: "v1", Resource: "clusterrolebindings"}
-	roleBindingsResource        = schema.GroupVersionResource{Group: "authorization.openshift.io", Version: "v1", Resource: "rolebindings"}
-	sccResource                 = schema.GroupVersionResource{Group: "security.openshift.io", Version: "v1", Resource: "securitycontextconstraints"}
-)
-
-func TestGroupReaper(t *testing.T) {
-	tests := []struct {
-		name     string
-		group    string
-		objects  []runtime.Object
-		sccs     []runtime.Object
-		expected []interface{}
-	}{
-		{
-			name:     "no objects",
-			group:    "mygroup",
-			objects:  []runtime.Object{},
-			expected: []interface{}{},
-		},
-		{
-			name:  "cluster bindings",
-			group: "mygroup",
-			objects: []runtime.Object{
-				&authv1.ClusterRoleBinding{
-					ObjectMeta: metav1.ObjectMeta{Name: "binding-no-subjects"},
-					RoleRef:    corev1.ObjectReference{Name: "role"},
-					Subjects:   []corev1.ObjectReference{},
-				},
-				&authv1.ClusterRoleBinding{
-					ObjectMeta: metav1.ObjectMeta{Name: "binding-one-subject"},
-					RoleRef:    corev1.ObjectReference{Name: "role"},
-					Subjects:   []corev1.ObjectReference{{Name: "mygroup", Kind: "Group"}},
-				},
-				&authv1.ClusterRoleBinding{
-					ObjectMeta: metav1.ObjectMeta{Name: "binding-mismatched-subject"},
-					RoleRef:    corev1.ObjectReference{Name: "role"},
-					Subjects:   []corev1.ObjectReference{{Name: "mygroup"}, {Name: "mygroup", Kind: "User"}, {Name: "mygroup", Kind: "Other"}},
-				},
-			},
-			expected: []interface{}{
-				clienttesting.UpdateActionImpl{ActionImpl: clienttesting.ActionImpl{Verb: "update", Resource: clusterRoleBindingsResource}, Object: &authv1.ClusterRoleBinding{
-					ObjectMeta: metav1.ObjectMeta{Name: "binding-one-subject"},
-					RoleRef:    corev1.ObjectReference{Name: "role"},
-					Subjects:   []corev1.ObjectReference{},
-				}},
-			},
-		},
-		{
-			name:  "namespaced bindings",
-			group: "mygroup",
-			objects: []runtime.Object{
-				&authv1.RoleBinding{
-					ObjectMeta: metav1.ObjectMeta{Name: "binding-no-subjects", Namespace: "ns1"},
-					RoleRef:    corev1.ObjectReference{Name: "role"},
-					Subjects:   []corev1.ObjectReference{},
-				},
-				&authv1.RoleBinding{
-					ObjectMeta: metav1.ObjectMeta{Name: "binding-one-subject", Namespace: "ns2"},
-					RoleRef:    corev1.ObjectReference{Name: "role"},
-					Subjects:   []corev1.ObjectReference{{Name: "mygroup", Kind: "Group"}},
-				},
-				&authv1.RoleBinding{
-					ObjectMeta: metav1.ObjectMeta{Name: "binding-mismatched-subject", Namespace: "ns3"},
-					RoleRef:    corev1.ObjectReference{Name: "role"},
-					Subjects:   []corev1.ObjectReference{{Name: "mygroup"}, {Name: "mygroup", Kind: "User"}, {Name: "mygroup", Kind: "Other"}},
-				},
-			},
-			expected: []interface{}{
-				clienttesting.UpdateActionImpl{ActionImpl: clienttesting.ActionImpl{Verb: "update", Resource: roleBindingsResource, Namespace: "ns2"}, Object: &authv1.RoleBinding{
-					ObjectMeta: metav1.ObjectMeta{Name: "binding-one-subject", Namespace: "ns2"},
-					RoleRef:    corev1.ObjectReference{Name: "role"},
-					Subjects:   []corev1.ObjectReference{},
-				}},
-			},
-		},
-		{
-			name:  "sccs",
-			group: "mygroup",
-			sccs: []runtime.Object{
-				&securityv1.SecurityContextConstraints{
-					ObjectMeta: metav1.ObjectMeta{Name: "scc-no-subjects"},
-					Groups:     []string{},
-				},
-				&securityv1.SecurityContextConstraints{
-					ObjectMeta: metav1.ObjectMeta{Name: "scc-one-subject"},
-					Groups:     []string{"mygroup"},
-				},
-				&securityv1.SecurityContextConstraints{
-					ObjectMeta: metav1.ObjectMeta{Name: "scc-mismatched-subjects"},
-					Users:      []string{"mygroup"},
-					Groups:     []string{"mygroup2"},
-				},
-			},
-			expected: []interface{}{
-				clienttesting.UpdateActionImpl{ActionImpl: clienttesting.ActionImpl{Verb: "update", Resource: sccResource}, Object: &securityv1.SecurityContextConstraints{
-					ObjectMeta: metav1.ObjectMeta{Name: "scc-one-subject"},
-					Groups:     []string{},
-				}},
-			},
-		},
-	}
-
-	for _, test := range tests {
-		t.Run(test.name, func(t *testing.T) {
-			authFake := &fakeauthv1client.FakeAuthorizationV1{Fake: &(fakeauthclient.NewSimpleClientset(test.objects...).Fake)}
-			userFake := &fakeuserv1client.FakeUserV1{Fake: &clienttesting.Fake{}}
-			securityFake := &fakesecurityv1client.FakeSecurityV1{Fake: &(fakesecurityclient.NewSimpleClientset(test.sccs...).Fake)}
-
-			actual := []interface{}{}
-			oreactor := func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) {
-				t.Logf("oreactor: %#v", action)
-				actual = append(actual, action)
-				return false, nil, nil
-			}
-			kreactor := func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) {
-				t.Logf("kreactor: %#v", action)
-				actual = append(actual, action)
-				return false, nil, nil
-			}
-
-			authFake.PrependReactor("update", "*", oreactor)
-			userFake.PrependReactor("update", "*", oreactor)
-			authFake.PrependReactor("delete", "*", oreactor)
-			userFake.PrependReactor("delete", "*", oreactor)
-			securityFake.Fake.PrependReactor("update", "*", kreactor)
-			securityFake.Fake.PrependReactor("delete", "*", kreactor)
-
-			err := reapForGroup(authFake, securityFake.SecurityContextConstraints(), test.group, ioutil.Discard)
-			if err != nil {
-				t.Errorf("unexpected error: %v", err)
-			}
-
-			if !reflect.DeepEqual(test.expected, actual) {
-				for i, x := range test.expected {
-					t.Logf("Expected %d: %s", i, spew.Sprint(x))
-				}
-				for i, x := range actual {
-					t.Logf("Actual %d:   %s", i, spew.Sprint(x))
-				}
-				t.Error("unexpected actions")
-			}
-		})
-	}
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/auth/prune_command.go b/vendor/github.com/openshift/oc/pkg/cli/admin/prune/auth/prune_command.go
deleted file mode 100644
index 0d973dfa9c19..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/auth/prune_command.go
+++ /dev/null
@@ -1,180 +0,0 @@
-package auth
-
-import (
-	"github.com/spf13/cobra"
-
-	"k8s.io/apimachinery/pkg/api/meta"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	"k8s.io/cli-runtime/pkg/resource"
-	rbacv1client "k8s.io/client-go/kubernetes/typed/rbac/v1"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-
-	authv1client "github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1"
-	oauthv1client "github.com/openshift/client-go/oauth/clientset/versioned/typed/oauth/v1"
-	securityv1client "github.com/openshift/client-go/security/clientset/versioned/typed/security/v1"
-	userv1client "github.com/openshift/client-go/user/clientset/versioned/typed/user/v1"
-)
-
-// PruneRolesOptions holds all the required options for pruning roles.
-type PruneAuthOptions struct {
-	FilenameOptions resource.FilenameOptions
-	Selector        string
-	All             bool
-
-	Builder                  *resource.Builder
-	RoleBindingClient        rbacv1client.RoleBindingsGetter
-	ClusterRoleBindingClient rbacv1client.ClusterRoleBindingsGetter
-
-	UserClient          userv1client.UserV1Interface
-	AuthorizationClient authv1client.AuthorizationV1Interface
-	OAuthClient         oauthv1client.OauthV1Interface
-	SecurityClient      securityv1client.SecurityV1Interface
-
-	genericclioptions.IOStreams
-}
-
-func NewPruneAuthOptions(streams genericclioptions.IOStreams) *PruneAuthOptions {
-	return &PruneAuthOptions{
-		IOStreams: streams,
-	}
-}
-
-// NewCmdPruneRoles implements the OpenShift cli prune roles command.
-func NewCmdPruneAuth(f kcmdutil.Factory, name string, streams genericclioptions.IOStreams) *cobra.Command {
-	o := NewPruneAuthOptions(streams)
-	cmd := &cobra.Command{
-		Use:   name,
-		Short: "Removes references to the specified roles, clusterroles, users, and groups.",
-		Long:  "Removes references to the specified roles, clusterroles, users, and groups.  Other types are ignored",
-
-		Run: func(cmd *cobra.Command, args []string) {
-			kcmdutil.CheckErr(o.Complete(f, cmd, args))
-			kcmdutil.CheckErr(o.RunPrune())
-		},
-	}
-
-	kcmdutil.AddFilenameOptionFlags(cmd, &o.FilenameOptions, "containing the resource to delete.")
-
-	cmd.Flags().StringVarP(&o.Selector, "selector", "l", "", "Selector (label query) to filter on.")
-	cmd.Flags().BoolVar(&o.All, "all", o.All, "Prune all roles in the namespace.")
-
-	return cmd
-}
-
-func (o *PruneAuthOptions) Complete(f kcmdutil.Factory, cmd *cobra.Command, args []string) error {
-	var err error
-
-	clientConfig, err := f.ToRESTConfig()
-	if err != nil {
-		return nil
-	}
-	o.RoleBindingClient, err = rbacv1client.NewForConfig(clientConfig)
-	if err != nil {
-		return nil
-	}
-	o.ClusterRoleBindingClient, err = rbacv1client.NewForConfig(clientConfig)
-	if err != nil {
-		return nil
-	}
-	o.UserClient, err = userv1client.NewForConfig(clientConfig)
-	if err != nil {
-		return nil
-	}
-	o.AuthorizationClient, err = authv1client.NewForConfig(clientConfig)
-	if err != nil {
-		return nil
-	}
-	o.OAuthClient, err = oauthv1client.NewForConfig(clientConfig)
-	if err != nil {
-		return nil
-	}
-	o.SecurityClient, err = securityv1client.NewForConfig(clientConfig)
-	if err != nil {
-		return nil
-	}
-
-	cmdNamespace, enforceNamespace, err := f.ToRawKubeConfigLoader().Namespace()
-	if err != nil {
-		return err
-	}
-
-	o.Builder = f.NewBuilder().
-		Unstructured().
-		ContinueOnError().
-		NamespaceParam(cmdNamespace).DefaultNamespace().
-		FilenameParam(enforceNamespace, &o.FilenameOptions).
-		LabelSelectorParam(o.Selector).
-		SelectAllParam(o.All).
-		ResourceTypeOrNameArgs(false, args...).
-		RequireObject(false).
-		Flatten()
-
-	return nil
-}
-
-func (o *PruneAuthOptions) RunPrune() error {
-	r := o.Builder.Do()
-	if r.Err() != nil {
-		return r.Err()
-	}
-
-	// this is weird, but we do this here for easy compatibility with existing reapers.  This command doesn't make sense
-	// without a server connection.  Still.  Don't do this at home.
-	err := r.Visit(func(info *resource.Info, err error) error {
-		if err != nil {
-			return err
-		}
-
-		switch {
-		case isRole(info.Mapping):
-			reapForRole(o.RoleBindingClient, info.Namespace, info.Name, o.Out)
-
-		case isClusterRole(info.Mapping):
-			reapForClusterRole(o.ClusterRoleBindingClient, o.RoleBindingClient, info.Namespace, info.Name, o.Out)
-
-		case isUser(info.Mapping):
-			reapForUser(o.UserClient, o.AuthorizationClient, o.OAuthClient, o.SecurityClient.SecurityContextConstraints(), info.Name, o.Out)
-
-		case isGroup(info.Mapping):
-			reapForGroup(o.AuthorizationClient, o.SecurityClient.SecurityContextConstraints(), info.Name, o.Out)
-		}
-
-		return nil
-	})
-
-	return err
-}
-
-func isRole(mapping *meta.RESTMapping) bool {
-	if mapping.Resource.Group != "rbac.authorization.k8s.io" && mapping.Resource.Group != "authorization.openshift.io" {
-		return false
-	}
-	if mapping.Resource.Resource != "roles" {
-		return false
-	}
-	return true
-}
-
-func isClusterRole(mapping *meta.RESTMapping) bool {
-	if mapping.Resource.Group != "rbac.authorization.k8s.io" && mapping.Resource.Group != "authorization.openshift.io" {
-		return false
-	}
-	if mapping.Resource.Resource != "clusterroles" {
-		return false
-	}
-	return true
-}
-
-func isUser(mapping *meta.RESTMapping) bool {
-	if mapping.Resource.Group == "user.openshift.io" && mapping.Resource.Resource == "users" {
-		return true
-	}
-	return false
-}
-
-func isGroup(mapping *meta.RESTMapping) bool {
-	if mapping.Resource.Group == "user.openshift.io" && mapping.Resource.Resource == "groups" {
-		return true
-	}
-	return false
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/auth/role.go b/vendor/github.com/openshift/oc/pkg/cli/admin/prune/auth/role.go
deleted file mode 100644
index 9acca6b1c53d..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/auth/role.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package auth
-
-import (
-	"fmt"
-	"io"
-
-	kerrors "k8s.io/apimachinery/pkg/api/errors"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	utilerrors "k8s.io/apimachinery/pkg/util/errors"
-	rbacv1client "k8s.io/client-go/kubernetes/typed/rbac/v1"
-)
-
-func reapForRole(bindingClient rbacv1client.RoleBindingsGetter, namespace, name string, out io.Writer) error {
-	bindings, err := bindingClient.RoleBindings(namespace).List(metav1.ListOptions{})
-	if err != nil {
-		return err
-	}
-
-	errors := []error{}
-	for _, binding := range bindings.Items {
-		if binding.RoleRef.Kind == "Role" && binding.RoleRef.Name == name {
-			foreground := metav1.DeletePropagationForeground
-			if err := bindingClient.RoleBindings(namespace).Delete(binding.Name, &metav1.DeleteOptions{PropagationPolicy: &foreground}); err != nil && !kerrors.IsNotFound(err) {
-				errors = append(errors, err)
-			} else {
-				fmt.Fprintf(out, "rolebinding.rbac.authorization.k8s.io/"+binding.Name+" deleted\n")
-			}
-		}
-	}
-
-	return utilerrors.NewAggregate(errors)
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/auth/role_test.go b/vendor/github.com/openshift/oc/pkg/cli/admin/prune/auth/role_test.go
deleted file mode 100644
index beb8621dba2b..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/auth/role_test.go
+++ /dev/null
@@ -1,110 +0,0 @@
-package auth
-
-import (
-	"io/ioutil"
-	"reflect"
-	"testing"
-
-	rbacv1 "k8s.io/api/rbac/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/runtime"
-	"k8s.io/apimachinery/pkg/util/sets"
-	"k8s.io/client-go/kubernetes/fake"
-	clientgotesting "k8s.io/client-go/testing"
-)
-
-func TestRoleReaper(t *testing.T) {
-	tests := []struct {
-		name                string
-		role                *rbacv1.Role
-		bindings            []*rbacv1.RoleBinding
-		deletedBindingNames []string
-	}{
-		{
-			name: "no bindings",
-			role: &rbacv1.Role{
-				ObjectMeta: metav1.ObjectMeta{
-					Namespace: "foo",
-					Name:      "role",
-				},
-			},
-		},
-		{
-			name: "bindings",
-			role: &rbacv1.Role{
-				ObjectMeta: metav1.ObjectMeta{
-					Name:      "role",
-					Namespace: "one",
-				},
-			},
-			bindings: []*rbacv1.RoleBinding{
-				{
-					ObjectMeta: metav1.ObjectMeta{
-						Name:      "binding-1",
-						Namespace: "one",
-					},
-					RoleRef: rbacv1.RoleRef{Name: "role", Kind: "Role"},
-				},
-				{
-					ObjectMeta: metav1.ObjectMeta{
-						Name:      "binding-2",
-						Namespace: "one",
-					},
-					RoleRef: rbacv1.RoleRef{Name: "role2", Kind: "Role"},
-				},
-				{
-					ObjectMeta: metav1.ObjectMeta{
-						Name:      "binding-3",
-						Namespace: "one",
-					},
-					RoleRef: rbacv1.RoleRef{Name: "role", Kind: "Role"},
-				},
-			},
-			deletedBindingNames: []string{"binding-1", "binding-3"},
-		},
-		{
-			name: "bindings in to cluster scoped ignored",
-			role: &rbacv1.Role{
-				ObjectMeta: metav1.ObjectMeta{
-					Name:      "role",
-					Namespace: "one",
-				},
-			},
-			bindings: []*rbacv1.RoleBinding{
-				{
-					ObjectMeta: metav1.ObjectMeta{
-						Name:      "binding-1",
-						Namespace: "one",
-					},
-					RoleRef: rbacv1.RoleRef{Name: "role", Kind: "ClusterRole"},
-				},
-			},
-		},
-	}
-
-	for _, test := range tests {
-		startingObjects := []runtime.Object{}
-		startingObjects = append(startingObjects, test.role)
-		for _, binding := range test.bindings {
-			startingObjects = append(startingObjects, binding)
-		}
-		tc := fake.NewSimpleClientset(startingObjects...)
-
-		actualDeletedBindingNames := []string{}
-		tc.PrependReactor("delete", "rolebindings", func(action clientgotesting.Action) (handled bool, ret runtime.Object, err error) {
-			actualDeletedBindingNames = append(actualDeletedBindingNames, action.(clientgotesting.DeleteAction).GetName())
-			return true, nil, nil
-		})
-
-		err := reapForRole(tc.RbacV1(), test.role.Namespace, test.role.Name, ioutil.Discard)
-		if err != nil {
-			t.Errorf("%s: unexpected error: %v", test.name, err)
-		}
-
-		expected := sets.NewString(test.deletedBindingNames...)
-		actuals := sets.NewString(actualDeletedBindingNames...)
-		if !reflect.DeepEqual(expected.List(), actuals.List()) {
-			t.Errorf("%s: expected %v, got %v", test.name, expected.List(), actuals.List())
-		}
-	}
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/auth/user.go b/vendor/github.com/openshift/oc/pkg/cli/admin/prune/auth/user.go
deleted file mode 100644
index 74c2b2311fca..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/auth/user.go
+++ /dev/null
@@ -1,100 +0,0 @@
-package auth
-
-import (
-	"fmt"
-	"io"
-
-	corev1 "k8s.io/api/core/v1"
-	kerrors "k8s.io/apimachinery/pkg/api/errors"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	utilerrors "k8s.io/apimachinery/pkg/util/errors"
-
-	authv1client "github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1"
-	oauthv1client "github.com/openshift/client-go/oauth/clientset/versioned/typed/oauth/v1"
-	securityv1client "github.com/openshift/client-go/security/clientset/versioned/typed/security/v1"
-	userv1client "github.com/openshift/client-go/user/clientset/versioned/typed/user/v1"
-)
-
-func reapForUser(
-	userClient userv1client.UserV1Interface,
-	authorizationClient authv1client.AuthorizationV1Interface,
-	oauthClient oauthv1client.OauthV1Interface,
-	securityClient securityv1client.SecurityContextConstraintsInterface,
-	name string,
-	out io.Writer) error {
-
-	errors := []error{}
-
-	removedSubject := corev1.ObjectReference{Kind: "User", Name: name}
-	errors = append(errors, reapClusterBindings(removedSubject, authorizationClient, out)...)
-	errors = append(errors, reapNamespacedBindings(removedSubject, authorizationClient, out)...)
-
-	// Remove the user from sccs
-	sccs, err := securityClient.List(metav1.ListOptions{})
-	if err != nil {
-		return err
-	}
-	for _, scc := range sccs.Items {
-		retainedUsers := []string{}
-		for _, user := range scc.Users {
-			if user != name {
-				retainedUsers = append(retainedUsers, user)
-			}
-		}
-		if len(retainedUsers) != len(scc.Users) {
-			updatedSCC := scc
-			updatedSCC.Users = retainedUsers
-			if _, err := securityClient.Update(&updatedSCC); err != nil && !kerrors.IsNotFound(err) {
-				errors = append(errors, err)
-			} else {
-				fmt.Fprintf(out, "securitycontextconstraints.security.openshift.io/"+updatedSCC.Name+" updated\n")
-			}
-		}
-	}
-
-	// Remove the user from groups
-	groups, err := userClient.Groups().List(metav1.ListOptions{})
-	if err != nil {
-		return err
-	}
-	for _, group := range groups.Items {
-		retainedUsers := []string{}
-		for _, user := range group.Users {
-			if user != name {
-				retainedUsers = append(retainedUsers, user)
-			}
-		}
-		if len(retainedUsers) != len(group.Users) {
-			updatedGroup := group
-			updatedGroup.Users = retainedUsers
-			if _, err := userClient.Groups().Update(&updatedGroup); err != nil && !kerrors.IsNotFound(err) {
-				errors = append(errors, err)
-			} else {
-				fmt.Fprintf(out, "group.user.openshift.io/"+updatedGroup.Name+" updated\n")
-			}
-		}
-	}
-
-	// Remove the user's OAuthClientAuthorizations
-	// Once https://github.com/kubernetes/kubernetes/pull/28112 is fixed, use a field selector
-	// to filter on the userName, rather than fetching all authorizations and filtering client-side
-	authorizations, err := oauthClient.OAuthClientAuthorizations().List(metav1.ListOptions{})
-	if err != nil {
-		return err
-	}
-	for _, authorization := range authorizations.Items {
-		if authorization.UserName == name {
-			if err := oauthClient.OAuthClientAuthorizations().Delete(authorization.Name, &metav1.DeleteOptions{}); err != nil && !kerrors.IsNotFound(err) {
-				errors = append(errors, err)
-			} else {
-				fmt.Fprintf(out, "oauthclientauthorization.oauth.openshift.io/"+authorization.Name+" updated\n")
-			}
-		}
-	}
-
-	// Intentionally leave identities that reference the user
-	// The user does not "own" the identities
-	// If the admin wants to remove the identities, that is a distinct operation
-
-	return utilerrors.NewAggregate(errors)
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/auth/user_test.go b/vendor/github.com/openshift/oc/pkg/cli/admin/prune/auth/user_test.go
deleted file mode 100644
index b36683b7b7c9..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/auth/user_test.go
+++ /dev/null
@@ -1,255 +0,0 @@
-package auth
-
-import (
-	"io/ioutil"
-	"reflect"
-	"testing"
-
-	"github.com/davecgh/go-spew/spew"
-
-	corev1 "k8s.io/api/core/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/runtime"
-	"k8s.io/apimachinery/pkg/runtime/schema"
-	clienttesting "k8s.io/client-go/testing"
-
-	authv1 "github.com/openshift/api/authorization/v1"
-	oauthv1 "github.com/openshift/api/oauth/v1"
-	securityv1 "github.com/openshift/api/security/v1"
-	userv1 "github.com/openshift/api/user/v1"
-	fakeauthclient "github.com/openshift/client-go/authorization/clientset/versioned/fake"
-	fakeauthv1client "github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1/fake"
-	fakeoauthclient "github.com/openshift/client-go/oauth/clientset/versioned/fake"
-	fakeoauthv1client "github.com/openshift/client-go/oauth/clientset/versioned/typed/oauth/v1/fake"
-	fakesecurityclient "github.com/openshift/client-go/security/clientset/versioned/fake"
-	fakesecurityv1client "github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/fake"
-	fakeuserclient "github.com/openshift/client-go/user/clientset/versioned/fake"
-	fakeuserv1client "github.com/openshift/client-go/user/clientset/versioned/typed/user/v1/fake"
-)
-
-var (
-	securityContextContraintsResource = schema.GroupVersionResource{Group: "security.openshift.io", Version: "v1", Resource: "securitycontextconstraints"}
-	oAuthClientAuthorizationsResource = schema.GroupVersionResource{Group: "oauth.openshift.io", Version: "v1", Resource: "oauthclientauthorizations"}
-)
-
-func TestUserReaper(t *testing.T) {
-	tests := []struct {
-		name         string
-		user         string
-		authObjects  []runtime.Object
-		oauthObjects []runtime.Object
-		userObjects  []runtime.Object
-		sccs         []runtime.Object
-		expected     []interface{}
-	}{
-		{
-			name:     "no objects",
-			user:     "bob",
-			expected: []interface{}{},
-		},
-		{
-			name: "cluster bindings",
-			user: "bob",
-			authObjects: []runtime.Object{
-				&authv1.ClusterRoleBinding{
-					ObjectMeta: metav1.ObjectMeta{Name: "binding-no-subjects"},
-					RoleRef:    corev1.ObjectReference{Name: "role"},
-					Subjects:   []corev1.ObjectReference{},
-				},
-				&authv1.ClusterRoleBinding{
-					ObjectMeta: metav1.ObjectMeta{Name: "binding-one-subject"},
-					RoleRef:    corev1.ObjectReference{Name: "role"},
-					Subjects:   []corev1.ObjectReference{{Name: "bob", Kind: "User"}},
-				},
-				&authv1.ClusterRoleBinding{
-					ObjectMeta: metav1.ObjectMeta{Name: "binding-mismatched-subject"},
-					RoleRef:    corev1.ObjectReference{Name: "role"},
-					Subjects:   []corev1.ObjectReference{{Name: "bob"}, {Name: "bob", Kind: "Group"}, {Name: "bob", Kind: "Other"}},
-				},
-			},
-			expected: []interface{}{
-				clienttesting.UpdateActionImpl{ActionImpl: clienttesting.ActionImpl{Verb: "update", Resource: clusterRoleBindingsResource}, Object: &authv1.ClusterRoleBinding{
-					ObjectMeta: metav1.ObjectMeta{Name: "binding-one-subject"},
-					RoleRef:    corev1.ObjectReference{Name: "role"},
-					Subjects:   []corev1.ObjectReference{},
-				}},
-			},
-		},
-		{
-			name: "namespaced bindings",
-			user: "bob",
-			authObjects: []runtime.Object{
-				&authv1.RoleBinding{
-					ObjectMeta: metav1.ObjectMeta{Name: "binding-no-subjects", Namespace: "ns1"},
-					RoleRef:    corev1.ObjectReference{Name: "role"},
-					Subjects:   []corev1.ObjectReference{},
-				},
-				&authv1.RoleBinding{
-					ObjectMeta: metav1.ObjectMeta{Name: "binding-one-subject", Namespace: "ns2"},
-					RoleRef:    corev1.ObjectReference{Name: "role"},
-					Subjects:   []corev1.ObjectReference{{Name: "bob", Kind: "User"}},
-				},
-				&authv1.RoleBinding{
-					ObjectMeta: metav1.ObjectMeta{Name: "binding-mismatched-subject", Namespace: "ns3"},
-					RoleRef:    corev1.ObjectReference{Name: "role"},
-					Subjects:   []corev1.ObjectReference{{Name: "bob"}, {Name: "bob", Kind: "Group"}, {Name: "bob", Kind: "Other"}},
-				},
-			},
-			expected: []interface{}{
-				clienttesting.UpdateActionImpl{ActionImpl: clienttesting.ActionImpl{Verb: "update", Resource: roleBindingsResource, Namespace: "ns2"}, Object: &authv1.RoleBinding{
-					ObjectMeta: metav1.ObjectMeta{Name: "binding-one-subject", Namespace: "ns2"},
-					RoleRef:    corev1.ObjectReference{Name: "role"},
-					Subjects:   []corev1.ObjectReference{},
-				}},
-			},
-		},
-		{
-			name: "sccs",
-			user: "bob",
-			sccs: []runtime.Object{
-				&securityv1.SecurityContextConstraints{
-					ObjectMeta: metav1.ObjectMeta{Name: "scc-no-subjects"},
-					Users:      []string{},
-				},
-				&securityv1.SecurityContextConstraints{
-					ObjectMeta: metav1.ObjectMeta{Name: "scc-one-subject"},
-					Users:      []string{"bob"},
-				},
-				&securityv1.SecurityContextConstraints{
-					ObjectMeta: metav1.ObjectMeta{Name: "scc-mismatched-subjects"},
-					Users:      []string{"bob2"},
-					Groups:     []string{"bob"},
-				},
-			},
-			expected: []interface{}{
-				clienttesting.UpdateActionImpl{ActionImpl: clienttesting.ActionImpl{Verb: "update", Resource: securityContextContraintsResource}, Object: &securityv1.SecurityContextConstraints{
-					ObjectMeta: metav1.ObjectMeta{Name: "scc-one-subject"},
-					Users:      []string{},
-				}},
-			},
-		},
-		{
-			name: "identities",
-			user: "bob",
-			userObjects: []runtime.Object{
-				&userv1.Identity{
-					ObjectMeta: metav1.ObjectMeta{Name: "identity-no-user"},
-					User:       corev1.ObjectReference{},
-				},
-				&userv1.Identity{
-					ObjectMeta: metav1.ObjectMeta{Name: "identity-matching-user"},
-					User:       corev1.ObjectReference{Name: "bob"},
-				},
-				&userv1.Identity{
-					ObjectMeta: metav1.ObjectMeta{Name: "identity-different-uid"},
-					User:       corev1.ObjectReference{Name: "bob", UID: "123"},
-				},
-				&userv1.Identity{
-					ObjectMeta: metav1.ObjectMeta{Name: "identity-different-user"},
-					User:       corev1.ObjectReference{Name: "bob2"},
-				},
-			},
-			expected: []interface{}{},
-		},
-		{
-			name: "groups",
-			user: "bob",
-			userObjects: []runtime.Object{
-				&userv1.Group{
-					ObjectMeta: metav1.ObjectMeta{Name: "group-no-users"},
-					Users:      []string{},
-				},
-				&userv1.Group{
-					ObjectMeta: metav1.ObjectMeta{Name: "group-one-user"},
-					Users:      []string{"bob"},
-				},
-				&userv1.Group{
-					ObjectMeta: metav1.ObjectMeta{Name: "group-multiple-users"},
-					Users:      []string{"bob2", "bob", "steve"},
-				},
-				&userv1.Group{
-					ObjectMeta: metav1.ObjectMeta{Name: "group-mismatched-users"},
-					Users:      []string{"bob2", "steve"},
-				},
-			},
-			expected: []interface{}{
-				clienttesting.UpdateActionImpl{ActionImpl: clienttesting.ActionImpl{Verb: "update", Resource: groupsResource}, Object: &userv1.Group{
-					ObjectMeta: metav1.ObjectMeta{Name: "group-one-user"},
-					Users:      []string{},
-				}},
-				clienttesting.UpdateActionImpl{ActionImpl: clienttesting.ActionImpl{Verb: "update", Resource: groupsResource}, Object: &userv1.Group{
-					ObjectMeta: metav1.ObjectMeta{Name: "group-multiple-users"},
-					Users:      []string{"bob2", "steve"},
-				}},
-			},
-		},
-		{
-			name: "oauth client authorizations",
-			user: "bob",
-			oauthObjects: []runtime.Object{
-				&oauthv1.OAuthClientAuthorization{
-					ObjectMeta: metav1.ObjectMeta{Name: "other-user"},
-					UserName:   "alice",
-					UserUID:    "123",
-				},
-				&oauthv1.OAuthClientAuthorization{
-					ObjectMeta: metav1.ObjectMeta{Name: "bob-authorization-1"},
-					UserName:   "bob",
-					UserUID:    "234",
-				},
-				&oauthv1.OAuthClientAuthorization{
-					ObjectMeta: metav1.ObjectMeta{Name: "bob-authorization-2"},
-					UserName:   "bob",
-					UserUID:    "345",
-				},
-			},
-			expected: []interface{}{
-				clienttesting.DeleteActionImpl{ActionImpl: clienttesting.ActionImpl{Verb: "delete", Resource: oAuthClientAuthorizationsResource}, Name: "bob-authorization-1"},
-				clienttesting.DeleteActionImpl{ActionImpl: clienttesting.ActionImpl{Verb: "delete", Resource: oAuthClientAuthorizationsResource}, Name: "bob-authorization-2"},
-			},
-		},
-	}
-
-	for _, test := range tests {
-		t.Run(test.name, func(t *testing.T) {
-			authFake := &fakeauthv1client.FakeAuthorizationV1{Fake: &(fakeauthclient.NewSimpleClientset(test.authObjects...).Fake)}
-			oauthFake := &fakeoauthv1client.FakeOauthV1{Fake: &(fakeoauthclient.NewSimpleClientset(test.oauthObjects...).Fake)}
-			userFake := &fakeuserv1client.FakeUserV1{Fake: &(fakeuserclient.NewSimpleClientset(test.userObjects...).Fake)}
-			securityFake := &fakesecurityv1client.FakeSecurityV1{Fake: &(fakesecurityclient.NewSimpleClientset(test.sccs...).Fake)}
-
-			actual := []interface{}{}
-			oreactor := func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) {
-				actual = append(actual, action)
-				return false, nil, nil
-			}
-			kreactor := func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) {
-				actual = append(actual, action)
-				return false, nil, nil
-			}
-
-			authFake.PrependReactor("update", "*", oreactor)
-			userFake.PrependReactor("update", "*", oreactor)
-			oauthFake.PrependReactor("update", "*", oreactor)
-			authFake.PrependReactor("delete", "*", oreactor)
-			userFake.PrependReactor("delete", "*", oreactor)
-			oauthFake.PrependReactor("delete", "*", oreactor)
-			securityFake.Fake.PrependReactor("update", "*", kreactor)
-			securityFake.Fake.PrependReactor("delete", "*", kreactor)
-
-			err := reapForUser(userFake, authFake, oauthFake, securityFake.SecurityContextConstraints(), test.user, ioutil.Discard)
-			if err != nil {
-				t.Errorf("unexpected error: %v", err)
-			}
-
-			if !reflect.DeepEqual(test.expected, actual) {
-				for i, x := range test.expected {
-					t.Logf("Expected %d: %s", i, spew.Sprint(x))
-				}
-				for i, x := range actual {
-					t.Logf("Actual %d:   %s", i, spew.Sprint(x))
-				}
-				t.Errorf("unexpected actions")
-			}
-		})
-	}
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/builds/builds.go b/vendor/github.com/openshift/oc/pkg/cli/admin/prune/builds/builds.go
deleted file mode 100644
index e57ee64d4efd..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/builds/builds.go
+++ /dev/null
@@ -1,201 +0,0 @@
-package builds
-
-import (
-	"fmt"
-	"io"
-	"os"
-	"text/tabwriter"
-	"time"
-
-	"github.com/spf13/cobra"
-
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-
-	buildv1 "github.com/openshift/api/build/v1"
-	buildv1client "github.com/openshift/client-go/build/clientset/versioned/typed/build/v1"
-)
-
-const PruneBuildsRecommendedName = "builds"
-
-var (
-	buildsLongDesc = templates.LongDesc(`
-		Prune old completed and failed builds
-
-		By default, the prune operation performs a dry run making no changes to internal registry. A
-		--confirm flag is needed for changes to be effective.`)
-
-	buildsExample = templates.Examples(`
-		# Dry run deleting older completed and failed builds and also including
-	  # all builds whose associated BuildConfig no longer exists
-	  %[1]s %[2]s --orphans
-
-	  # To actually perform the prune operation, the confirm flag must be appended
-	  %[1]s %[2]s --orphans --confirm`)
-)
-
-// PruneBuildsOptions holds all the required options for pruning builds.
-type PruneBuildsOptions struct {
-	Confirm         bool
-	Orphans         bool
-	KeepYoungerThan time.Duration
-	KeepComplete    int
-	KeepFailed      int
-	Namespace       string
-
-	BuildClient buildv1client.BuildV1Interface
-
-	genericclioptions.IOStreams
-}
-
-func NewPruneBuildsOptions(streams genericclioptions.IOStreams) *PruneBuildsOptions {
-	return &PruneBuildsOptions{
-		Confirm:         false,
-		Orphans:         false,
-		KeepYoungerThan: 60 * time.Minute,
-		KeepComplete:    5,
-		KeepFailed:      1,
-		IOStreams:       streams,
-	}
-}
-
-// NewCmdPruneBuilds implements the OpenShift cli prune builds command.
-func NewCmdPruneBuilds(f kcmdutil.Factory, parentName, name string, streams genericclioptions.IOStreams) *cobra.Command {
-	o := NewPruneBuildsOptions(streams)
-	cmd := &cobra.Command{
-		Use:     name,
-		Short:   "Remove old completed and failed builds",
-		Long:    buildsLongDesc,
-		Example: fmt.Sprintf(buildsExample, parentName, name),
-		Run: func(cmd *cobra.Command, args []string) {
-			kcmdutil.CheckErr(o.Complete(f, cmd, args))
-			kcmdutil.CheckErr(o.Validate())
-			kcmdutil.CheckErr(o.Run())
-		},
-	}
-
-	cmd.Flags().BoolVar(&o.Confirm, "confirm", o.Confirm, "If true, specify that build pruning should proceed. Defaults to false, displaying what would be deleted but not actually deleting anything.")
-	cmd.Flags().BoolVar(&o.Orphans, "orphans", o.Orphans, "If true, prune all builds whose associated BuildConfig no longer exists and whose status is complete, failed, error, or cancelled.")
-	cmd.Flags().DurationVar(&o.KeepYoungerThan, "keep-younger-than", o.KeepYoungerThan, "Specify the minimum age of a Build for it to be considered a candidate for pruning.")
-	cmd.Flags().IntVar(&o.KeepComplete, "keep-complete", o.KeepComplete, "Per BuildConfig, specify the number of builds whose status is complete that will be preserved.")
-	cmd.Flags().IntVar(&o.KeepFailed, "keep-failed", o.KeepFailed, "Per BuildConfig, specify the number of builds whose status is failed, error, or cancelled that will be preserved.")
-
-	return cmd
-}
-
-// Complete turns a partially defined PruneBuildsOptions into a solvent structure
-// which can be validated and used for pruning builds.
-func (o *PruneBuildsOptions) Complete(f kcmdutil.Factory, cmd *cobra.Command, args []string) error {
-	if len(args) > 0 {
-		return kcmdutil.UsageErrorf(cmd, "no arguments are allowed to this command")
-	}
-
-	o.Namespace = metav1.NamespaceAll
-	if cmd.Flags().Lookup("namespace").Changed {
-		var err error
-		o.Namespace, _, err = f.ToRawKubeConfigLoader().Namespace()
-		if err != nil {
-			return err
-		}
-	}
-
-	config, err := f.ToRESTConfig()
-	if err != nil {
-		return err
-	}
-	o.BuildClient, err = buildv1client.NewForConfig(config)
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-// Validate ensures that a PruneBuildsOptions is valid and can be used to execute pruning.
-func (o PruneBuildsOptions) Validate() error {
-	if o.KeepYoungerThan < 0 {
-		return fmt.Errorf("--keep-younger-than must be greater than or equal to 0")
-	}
-	if o.KeepComplete < 0 {
-		return fmt.Errorf("--keep-complete must be greater than or equal to 0")
-	}
-	if o.KeepFailed < 0 {
-		return fmt.Errorf("--keep-failed must be greater than or equal to 0")
-	}
-	return nil
-}
-
-// Run contains all the necessary functionality for the OpenShift cli prune builds command.
-func (o PruneBuildsOptions) Run() error {
-	buildConfigList, err := o.BuildClient.BuildConfigs(o.Namespace).List(metav1.ListOptions{})
-	if err != nil {
-		return err
-	}
-	buildConfigs := []*buildv1.BuildConfig{}
-	for i := range buildConfigList.Items {
-		buildConfigs = append(buildConfigs, &buildConfigList.Items[i])
-	}
-
-	buildList, err := o.BuildClient.Builds(o.Namespace).List(metav1.ListOptions{})
-	if err != nil {
-		return err
-	}
-	builds := []*buildv1.Build{}
-	for i := range buildList.Items {
-		builds = append(builds, &buildList.Items[i])
-	}
-
-	options := PrunerOptions{
-		KeepYoungerThan: o.KeepYoungerThan,
-		Orphans:         o.Orphans,
-		KeepComplete:    o.KeepComplete,
-		KeepFailed:      o.KeepFailed,
-		BuildConfigs:    buildConfigs,
-		Builds:          builds,
-	}
-	pruner := NewPruner(options)
-
-	w := tabwriter.NewWriter(o.Out, 10, 4, 3, ' ', 0)
-	defer w.Flush()
-
-	buildDeleter := &describingBuildDeleter{w: w}
-
-	if o.Confirm {
-		buildDeleter.delegate = NewBuildDeleter(o.BuildClient)
-	} else {
-		fmt.Fprintln(os.Stderr, "Dry run enabled - no modifications will be made. Add --confirm to remove builds")
-	}
-
-	return pruner.Prune(buildDeleter)
-}
-
-// describingBuildDeleter prints information about each build it removes.
-// If a delegate exists, its DeleteBuild function is invoked prior to returning.
-type describingBuildDeleter struct {
-	w             io.Writer
-	delegate      BuildDeleter
-	headerPrinted bool
-}
-
-var _ BuildDeleter = &describingBuildDeleter{}
-
-func (p *describingBuildDeleter) DeleteBuild(build *buildv1.Build) error {
-	if !p.headerPrinted {
-		p.headerPrinted = true
-		fmt.Fprintln(p.w, "NAMESPACE\tNAME")
-	}
-
-	fmt.Fprintf(p.w, "%s\t%s\n", build.Namespace, build.Name)
-
-	if p.delegate == nil {
-		return nil
-	}
-
-	if err := p.delegate.DeleteBuild(build); err != nil {
-		return err
-	}
-
-	return nil
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/builds/builds_test.go b/vendor/github.com/openshift/oc/pkg/cli/admin/prune/builds/builds_test.go
deleted file mode 100644
index daa55178e023..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/builds/builds_test.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package builds
-
-import (
-	"testing"
-
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	clienttesting "k8s.io/client-go/testing"
-
-	fakebuildv1client "github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake"
-)
-
-func TestBuildPruneNamespaced(t *testing.T) {
-	osFake := &fakebuildv1client.FakeBuildV1{Fake: &clienttesting.Fake{}}
-	opts := &PruneBuildsOptions{
-		Namespace: "foo",
-
-		BuildClient: osFake,
-		IOStreams:   genericclioptions.NewTestIOStreamsDiscard(),
-	}
-
-	if err := opts.Run(); err != nil {
-		t.Errorf("Unexpected error: %v", err)
-	}
-
-	if len(osFake.Actions()) == 0 {
-		t.Errorf("Missing get build actions")
-	}
-	for _, a := range osFake.Actions() {
-		if a.GetNamespace() != "foo" {
-			t.Errorf("Unexpected namespace while pruning %s: %s", a.GetResource(), a.GetNamespace())
-		}
-	}
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/builds/data.go b/vendor/github.com/openshift/oc/pkg/cli/admin/prune/builds/data.go
deleted file mode 100644
index ea3f639d5bc6..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/builds/data.go
+++ /dev/null
@@ -1,140 +0,0 @@
-package builds
-
-import (
-	"fmt"
-	"time"
-
-	corev1 "k8s.io/api/core/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/client-go/tools/cache"
-
-	buildv1 "github.com/openshift/api/build/v1"
-)
-
-// BuildByBuildConfigIndexFunc indexes Build items by their associated BuildConfig, if none, index with key "orphan"
-func BuildByBuildConfigIndexFunc(obj interface{}) ([]string, error) {
-	build, ok := obj.(*buildv1.Build)
-	if !ok {
-		return nil, fmt.Errorf("not a build: %v", build)
-	}
-	config := build.Status.Config
-	if config == nil {
-		return []string{"orphan"}, nil
-	}
-	return []string{config.Namespace + "/" + config.Name}, nil
-}
-
-// Filter filters the set of objects
-type Filter interface {
-	Filter(builds []*buildv1.Build) []*buildv1.Build
-}
-
-// andFilter ands a set of predicate functions to know if it should be included in the return set
-type andFilter struct {
-	filterPredicates []FilterPredicate
-}
-
-// Filter ands the set of predicates evaluated against each Build to make a filtered set
-func (a *andFilter) Filter(builds []*buildv1.Build) []*buildv1.Build {
-	results := []*buildv1.Build{}
-	for _, build := range builds {
-		include := true
-		for _, filterPredicate := range a.filterPredicates {
-			include = include && filterPredicate(build)
-		}
-		if include {
-			results = append(results, build)
-		}
-	}
-	return results
-}
-
-// FilterPredicate is a function that returns true if the object should be included in the filtered set
-type FilterPredicate func(build *buildv1.Build) bool
-
-// NewFilterBeforePredicate is a function that returns true if the build was created before the current time minus specified duration
-func NewFilterBeforePredicate(d time.Duration) FilterPredicate {
-	now := metav1.Now()
-	before := metav1.NewTime(now.Time.Add(-1 * d))
-	return func(build *buildv1.Build) bool {
-		return build.CreationTimestamp.Before(&before)
-	}
-}
-
-// DataSet provides functions for working with build data
-type DataSet interface {
-	GetBuildConfig(build *buildv1.Build) (*buildv1.BuildConfig, bool, error)
-	ListBuildConfigs() ([]*buildv1.BuildConfig, error)
-	ListBuilds() ([]*buildv1.Build, error)
-	ListBuildsByBuildConfig(buildConfig *buildv1.BuildConfig) ([]*buildv1.Build, error)
-}
-
-type dataSet struct {
-	buildConfigStore cache.Store
-	buildIndexer     cache.Indexer
-}
-
-// NewDataSet returns a DataSet over the specified items
-func NewDataSet(buildConfigs []*buildv1.BuildConfig, builds []*buildv1.Build) DataSet {
-	buildConfigStore := cache.NewStore(cache.MetaNamespaceKeyFunc)
-	for _, buildConfig := range buildConfigs {
-		buildConfigStore.Add(buildConfig)
-	}
-
-	buildIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{
-		"buildConfig": BuildByBuildConfigIndexFunc,
-	})
-	for _, build := range builds {
-		buildIndexer.Add(build)
-	}
-
-	return &dataSet{
-		buildConfigStore: buildConfigStore,
-		buildIndexer:     buildIndexer,
-	}
-}
-
-func (d *dataSet) GetBuildConfig(build *buildv1.Build) (*buildv1.BuildConfig, bool, error) {
-	config := build.Status.Config
-	if config == nil {
-		return nil, false, nil
-	}
-
-	var buildConfig *buildv1.BuildConfig
-	key := &buildv1.BuildConfig{ObjectMeta: metav1.ObjectMeta{Name: config.Name, Namespace: config.Namespace}}
-	item, exists, err := d.buildConfigStore.Get(key)
-	if exists {
-		buildConfig = item.(*buildv1.BuildConfig)
-	}
-	return buildConfig, exists, err
-}
-
-func (d *dataSet) ListBuildConfigs() ([]*buildv1.BuildConfig, error) {
-	results := []*buildv1.BuildConfig{}
-	for _, item := range d.buildConfigStore.List() {
-		results = append(results, item.(*buildv1.BuildConfig))
-	}
-	return results, nil
-}
-
-func (d *dataSet) ListBuilds() ([]*buildv1.Build, error) {
-	results := []*buildv1.Build{}
-	for _, item := range d.buildIndexer.List() {
-		results = append(results, item.(*buildv1.Build))
-	}
-	return results, nil
-}
-
-func (d *dataSet) ListBuildsByBuildConfig(buildConfig *buildv1.BuildConfig) ([]*buildv1.Build, error) {
-	results := []*buildv1.Build{}
-	key := &buildv1.Build{}
-	key.Status.Config = &corev1.ObjectReference{Name: buildConfig.Name, Namespace: buildConfig.Namespace}
-	items, err := d.buildIndexer.Index("buildConfig", key)
-	if err != nil {
-		return nil, err
-	}
-	for _, item := range items {
-		results = append(results, item.(*buildv1.Build))
-	}
-	return results, nil
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/builds/data_test.go b/vendor/github.com/openshift/oc/pkg/cli/admin/prune/builds/data_test.go
deleted file mode 100644
index 5d84ec53a77b..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/builds/data_test.go
+++ /dev/null
@@ -1,179 +0,0 @@
-package builds
-
-import (
-	"reflect"
-	"testing"
-	"time"
-
-	corev1 "k8s.io/api/core/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/util/sets"
-
-	buildv1 "github.com/openshift/api/build/v1"
-)
-
-func mockBuildConfig(namespace, name string) *buildv1.BuildConfig {
-	return &buildv1.BuildConfig{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}}
-}
-
-func withCreated(build *buildv1.Build, creationTimestamp metav1.Time) *buildv1.Build {
-	build.CreationTimestamp = creationTimestamp
-	return build
-}
-
-func withStatus(build *buildv1.Build, status buildv1.BuildPhase) *buildv1.Build {
-	build.Status.Phase = status
-	return build
-}
-
-func mockBuild(namespace, name string, buildConfig *buildv1.BuildConfig) *buildv1.Build {
-	build := &buildv1.Build{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}}
-	if buildConfig != nil {
-		build.Status.Config = &corev1.ObjectReference{
-			Name:      buildConfig.Name,
-			Namespace: buildConfig.Namespace,
-		}
-	}
-	build.Status.Phase = buildv1.BuildPhaseNew
-	return build
-}
-
-func TestBuildByBuildConfigIndexFunc(t *testing.T) {
-	buildWithConfig := &buildv1.Build{
-		Status: buildv1.BuildStatus{
-			Config: &corev1.ObjectReference{
-				Name:      "buildConfigName",
-				Namespace: "buildConfigNamespace",
-			},
-		},
-	}
-	actualKey, err := BuildByBuildConfigIndexFunc(buildWithConfig)
-	if err != nil {
-		t.Errorf("Unexpected error %v", err)
-	}
-	expectedKey := []string{buildWithConfig.Status.Config.Namespace + "/" + buildWithConfig.Status.Config.Name}
-	if !reflect.DeepEqual(actualKey, expectedKey) {
-		t.Errorf("expected %#v, actual %#v", expectedKey, actualKey)
-	}
-	buildWithNoConfig := &buildv1.Build{}
-	actualKey, err = BuildByBuildConfigIndexFunc(buildWithNoConfig)
-	if err != nil {
-		t.Errorf("Unexpected error %v", err)
-	}
-	expectedKey = []string{"orphan"}
-	if !reflect.DeepEqual(actualKey, expectedKey) {
-		t.Errorf("expected %v, actual %v", expectedKey, actualKey)
-	}
-}
-
-func TestFilterBeforePredicate(t *testing.T) {
-	youngerThan := time.Hour
-	now := metav1.Now()
-	old := metav1.NewTime(now.Time.Add(-1 * youngerThan))
-	builds := []*buildv1.Build{
-		{
-			ObjectMeta: metav1.ObjectMeta{
-				Name:              "old",
-				CreationTimestamp: old,
-			},
-		},
-		{
-			ObjectMeta: metav1.ObjectMeta{
-				Name:              "new",
-				CreationTimestamp: now,
-			},
-		},
-	}
-	filter := &andFilter{
-		filterPredicates: []FilterPredicate{NewFilterBeforePredicate(youngerThan)},
-	}
-	result := filter.Filter(builds)
-	if len(result) != 1 {
-		t.Errorf("Unexpected number of results")
-	}
-	if expected, actual := "old", result[0].Name; expected != actual {
-		t.Errorf("expected %v, actual %v", expected, actual)
-	}
-}
-
-func TestEmptyDataSet(t *testing.T) {
-	builds := []*buildv1.Build{}
-	buildConfigs := []*buildv1.BuildConfig{}
-	dataSet := NewDataSet(buildConfigs, builds)
-	_, exists, err := dataSet.GetBuildConfig(&buildv1.Build{})
-	if exists || err != nil {
-		t.Errorf("Unexpected result %v, %v", exists, err)
-	}
-	buildConfigResults, err := dataSet.ListBuildConfigs()
-	if err != nil {
-		t.Errorf("Unexpected result %v", err)
-	}
-	if len(buildConfigResults) != 0 {
-		t.Errorf("Unexpected result %v", buildConfigResults)
-	}
-	buildResults, err := dataSet.ListBuilds()
-	if err != nil {
-		t.Errorf("Unexpected result %v", err)
-	}
-	if len(buildResults) != 0 {
-		t.Errorf("Unexpected result %v", buildResults)
-	}
-	buildResults, err = dataSet.ListBuildsByBuildConfig(&buildv1.BuildConfig{})
-	if err != nil {
-		t.Errorf("Unexpected result %v", err)
-	}
-	if len(buildResults) != 0 {
-		t.Errorf("Unexpected result %v", buildResults)
-	}
-}
-
-func TestPopuldatedDataSet(t *testing.T) {
-	buildConfigs := []*buildv1.BuildConfig{
-		mockBuildConfig("a", "build-config-1"),
-		mockBuildConfig("b", "build-config-2"),
-	}
-	builds := []*buildv1.Build{
-		mockBuild("a", "build-1", buildConfigs[0]),
-		mockBuild("a", "build-2", buildConfigs[0]),
-		mockBuild("b", "build-3", buildConfigs[1]),
-		mockBuild("c", "build-4", nil),
-	}
-	dataSet := NewDataSet(buildConfigs, builds)
-	for _, build := range builds {
-		buildConfig, exists, err := dataSet.GetBuildConfig(build)
-		if build.Status.Config != nil {
-			if err != nil {
-				t.Errorf("Item %v, unexpected error: %v", build, err)
-			}
-			if !exists {
-				t.Errorf("Item %v, unexpected result: %v", build, exists)
-			}
-			if expected, actual := build.Status.Config.Name, buildConfig.Name; expected != actual {
-				t.Errorf("expected %v, actual %v", expected, actual)
-			}
-			if expected, actual := build.Status.Config.Namespace, buildConfig.Namespace; expected != actual {
-				t.Errorf("expected %v, actual %v", expected, actual)
-			}
-		} else {
-			if err != nil {
-				t.Errorf("Item %v, unexpected error: %v", build, err)
-			}
-			if exists {
-				t.Errorf("Item %v, unexpected result: %v", build, exists)
-			}
-		}
-	}
-	expectedNames := sets.NewString("build-1", "build-2")
-	buildResults, err := dataSet.ListBuildsByBuildConfig(buildConfigs[0])
-	if err != nil {
-		t.Errorf("Unexpected result %v", err)
-	}
-	if len(buildResults) != len(expectedNames) {
-		t.Errorf("Unexpected result %v", buildResults)
-	}
-	for _, build := range buildResults {
-		if !expectedNames.Has(build.Name) {
-			t.Errorf("Unexpected name: %v", build.Name)
-		}
-	}
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/builds/prune.go b/vendor/github.com/openshift/oc/pkg/cli/admin/prune/builds/prune.go
deleted file mode 100644
index 08ce98907216..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/builds/prune.go
+++ /dev/null
@@ -1,104 +0,0 @@
-package builds
-
-import (
-	"time"
-
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/klog"
-
-	buildv1 "github.com/openshift/api/build/v1"
-	buildv1client "github.com/openshift/client-go/build/clientset/versioned/typed/build/v1"
-)
-
-type Pruner interface {
-	// Prune is responsible for actual removal of builds identified as candidates
-	// for pruning based on pruning algorithm.
-	Prune(deleter BuildDeleter) error
-}
-
-type BuildDeleter interface {
-	DeleteBuild(build *buildv1.Build) error
-}
-
-// pruner is an object that knows how to prune a data set
-type pruner struct {
-	resolver Resolver
-}
-
-var _ Pruner = &pruner{}
-
-// PrunerOptions contains the fields used to initialize a new Pruner.
-type PrunerOptions struct {
-	// KeepYoungerThan indicates the minimum age a BuildConfig must be to be a
-	// candidate for pruning.
-	KeepYoungerThan time.Duration
-	// Orphans if true will include inactive orphan builds in candidate prune set
-	Orphans bool
-	// KeepComplete is per BuildConfig how many of the most recent builds should be preserved
-	KeepComplete int
-	// KeepFailed is per BuildConfig how many of the most recent failed builds should be preserved
-	KeepFailed int
-	// BuildConfigs is the entire list of buildconfigs across all namespaces in the cluster.
-	BuildConfigs []*buildv1.BuildConfig
-	// Builds is the entire list of builds across all namespaces in the cluster.
-	Builds []*buildv1.Build
-}
-
-// NewPruner returns a Pruner over specified data using specified options.
-func NewPruner(options PrunerOptions) Pruner {
-	klog.V(1).Infof("Creating build pruner with keepYoungerThan=%v, orphans=%v, keepComplete=%v, keepFailed=%v",
-		options.KeepYoungerThan, options.Orphans, options.KeepComplete, options.KeepFailed)
-
-	filter := &andFilter{
-		filterPredicates: []FilterPredicate{NewFilterBeforePredicate(options.KeepYoungerThan)},
-	}
-	builds := filter.Filter(options.Builds)
-	dataSet := NewDataSet(options.BuildConfigs, builds)
-
-	resolvers := []Resolver{}
-	if options.Orphans {
-		inactiveBuildStatus := []buildv1.BuildPhase{
-			buildv1.BuildPhaseCancelled,
-			buildv1.BuildPhaseComplete,
-			buildv1.BuildPhaseError,
-			buildv1.BuildPhaseFailed,
-		}
-		resolvers = append(resolvers, NewOrphanBuildResolver(dataSet, inactiveBuildStatus))
-	}
-	resolvers = append(resolvers, NewPerBuildConfigResolver(dataSet, options.KeepComplete, options.KeepFailed))
-
-	return &pruner{
-		resolver: &mergeResolver{resolvers: resolvers},
-	}
-}
-
-// Prune will visit each item in the prunable set and invoke the associated BuildDeleter.
-func (p *pruner) Prune(deleter BuildDeleter) error {
-	builds, err := p.resolver.Resolve()
-	if err != nil {
-		return err
-	}
-	for _, build := range builds {
-		if err := deleter.DeleteBuild(build); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-// NewBuildDeleter creates a new buildDeleter.
-func NewBuildDeleter(client buildv1client.BuildsGetter) BuildDeleter {
-	return &buildDeleter{
-		client: client,
-	}
-}
-
-type buildDeleter struct {
-	client buildv1client.BuildsGetter
-}
-
-var _ BuildDeleter = &buildDeleter{}
-
-func (c *buildDeleter) DeleteBuild(build *buildv1.Build) error {
-	return c.client.Builds(build.Namespace).Delete(build.Name, &metav1.DeleteOptions{})
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/builds/prune_test.go b/vendor/github.com/openshift/oc/pkg/cli/admin/prune/builds/prune_test.go
deleted file mode 100644
index 6b0adc935baf..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/builds/prune_test.go
+++ /dev/null
@@ -1,114 +0,0 @@
-package builds
-
-import (
-	"sort"
-	"testing"
-	"time"
-
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/util/sets"
-
-	buildv1 "github.com/openshift/api/build/v1"
-)
-
-type mockDeleteRecorder struct {
-	set sets.String
-	err error
-}
-
-var _ BuildDeleter = &mockDeleteRecorder{}
-
-func (m *mockDeleteRecorder) DeleteBuild(build *buildv1.Build) error {
-	m.set.Insert(build.Name)
-	return m.err
-}
-
-func (m *mockDeleteRecorder) Verify(t *testing.T, expected sets.String) {
-	if len(m.set) != len(expected) || !m.set.HasAll(expected.List()...) {
-		expectedValues := expected.List()
-		actualValues := m.set.List()
-		sort.Strings(expectedValues)
-		sort.Strings(actualValues)
-		t.Errorf("expected \n\t%v\n, actual \n\t%v\n", expectedValues, actualValues)
-	}
-}
-
-func TestPruneTask(t *testing.T) {
-	BuildPhaseOptions := []buildv1.BuildPhase{
-		buildv1.BuildPhaseCancelled,
-		buildv1.BuildPhaseComplete,
-		buildv1.BuildPhaseError,
-		buildv1.BuildPhaseFailed,
-		buildv1.BuildPhaseNew,
-		buildv1.BuildPhasePending,
-		buildv1.BuildPhaseRunning,
-	}
-	BuildPhaseFilter := []buildv1.BuildPhase{
-		buildv1.BuildPhaseCancelled,
-		buildv1.BuildPhaseComplete,
-		buildv1.BuildPhaseError,
-		buildv1.BuildPhaseFailed,
-	}
-	BuildPhaseFilterSet := sets.String{}
-	for _, BuildPhase := range BuildPhaseFilter {
-		BuildPhaseFilterSet.Insert(string(BuildPhase))
-	}
-
-	for _, orphans := range []bool{true, false} {
-		for _, BuildPhaseOption := range BuildPhaseOptions {
-			keepYoungerThan := time.Hour
-
-			now := metav1.Now()
-			old := metav1.NewTime(now.Time.Add(-1 * keepYoungerThan))
-
-			buildConfigs := []*buildv1.BuildConfig{}
-			builds := []*buildv1.Build{}
-
-			buildConfig := mockBuildConfig("a", "build-config")
-			buildConfigs = append(buildConfigs, buildConfig)
-
-			builds = append(builds, withCreated(withStatus(mockBuild("a", "build-1", buildConfig), BuildPhaseOption), now))
-			builds = append(builds, withCreated(withStatus(mockBuild("a", "build-2", buildConfig), BuildPhaseOption), old))
-			builds = append(builds, withCreated(withStatus(mockBuild("a", "orphan-build-1", nil), BuildPhaseOption), now))
-			builds = append(builds, withCreated(withStatus(mockBuild("a", "orphan-build-2", nil), BuildPhaseOption), old))
-
-			keepComplete := 1
-			keepFailed := 1
-			expectedValues := sets.String{}
-			filter := &andFilter{
-				filterPredicates: []FilterPredicate{NewFilterBeforePredicate(keepYoungerThan)},
-			}
-			dataSet := NewDataSet(buildConfigs, filter.Filter(builds))
-			resolver := NewPerBuildConfigResolver(dataSet, keepComplete, keepFailed)
-			if orphans {
-				resolver = &mergeResolver{
-					resolvers: []Resolver{resolver, NewOrphanBuildResolver(dataSet, BuildPhaseFilter)},
-				}
-			}
-			expectedBuilds, err := resolver.Resolve()
-			if err != nil {
-				t.Errorf("Unexpected error %v", err)
-			}
-			for _, build := range expectedBuilds {
-				expectedValues.Insert(build.Name)
-			}
-
-			recorder := &mockDeleteRecorder{set: sets.String{}}
-
-			options := PrunerOptions{
-				KeepYoungerThan: keepYoungerThan,
-				Orphans:         orphans,
-				KeepComplete:    keepComplete,
-				KeepFailed:      keepFailed,
-				BuildConfigs:    buildConfigs,
-				Builds:          builds,
-			}
-			pruner := NewPruner(options)
-			if err := pruner.Prune(recorder); err != nil {
-				t.Errorf("Unexpected error %v", err)
-			}
-			recorder.Verify(t, expectedValues)
-		}
-	}
-
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/builds/resolvers.go b/vendor/github.com/openshift/oc/pkg/cli/admin/prune/builds/resolvers.go
deleted file mode 100644
index 4dcb2266c264..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/builds/resolvers.go
+++ /dev/null
@@ -1,128 +0,0 @@
-package builds
-
-import (
-	"sort"
-
-	"k8s.io/apimachinery/pkg/util/sets"
-
-	buildv1 "github.com/openshift/api/build/v1"
-	buildapihelpers "github.com/openshift/oc/pkg/helpers/build"
-)
-
-// Resolver knows how to resolve the set of candidate objects to prune
-type Resolver interface {
-	Resolve() ([]*buildv1.Build, error)
-}
-
-// mergeResolver merges the set of results from multiple resolvers
-type mergeResolver struct {
-	resolvers []Resolver
-}
-
-func (m *mergeResolver) Resolve() ([]*buildv1.Build, error) {
-	results := []*buildv1.Build{}
-	for _, resolver := range m.resolvers {
-		builds, err := resolver.Resolve()
-		if err != nil {
-			return nil, err
-		}
-		results = append(results, builds...)
-	}
-	return results, nil
-}
-
-// NewOrphanBuildResolver returns a Resolver that matches Build objects with no associated BuildConfig and has a BuildPhase in filter
-func NewOrphanBuildResolver(dataSet DataSet, BuildPhaseFilter []buildv1.BuildPhase) Resolver {
-	filter := sets.NewString()
-	for _, BuildPhase := range BuildPhaseFilter {
-		filter.Insert(string(BuildPhase))
-	}
-	return &orphanBuildResolver{
-		dataSet:          dataSet,
-		BuildPhaseFilter: filter,
-	}
-}
-
-// orphanBuildResolver resolves orphan builds that match the specified filter
-type orphanBuildResolver struct {
-	dataSet          DataSet
-	BuildPhaseFilter sets.String
-}
-
-// Resolve the matching set of Build objects
-func (o *orphanBuildResolver) Resolve() ([]*buildv1.Build, error) {
-	builds, err := o.dataSet.ListBuilds()
-	if err != nil {
-		return nil, err
-	}
-
-	results := []*buildv1.Build{}
-	for _, build := range builds {
-		if !o.BuildPhaseFilter.Has(string(build.Status.Phase)) {
-			continue
-		}
-		isOrphan := false
-		if build.Status.Config == nil {
-			isOrphan = true
-		} else {
-			_, exists, _ := o.dataSet.GetBuildConfig(build)
-			isOrphan = !exists
-		}
-		if isOrphan {
-			results = append(results, build)
-		}
-	}
-	return results, nil
-}
-
-type perBuildConfigResolver struct {
-	dataSet      DataSet
-	keepComplete int
-	keepFailed   int
-}
-
-// NewPerBuildConfigResolver returns a Resolver that selects Builds to prune per BuildConfig
-func NewPerBuildConfigResolver(dataSet DataSet, keepComplete int, keepFailed int) Resolver {
-	return &perBuildConfigResolver{
-		dataSet:      dataSet,
-		keepComplete: keepComplete,
-		keepFailed:   keepFailed,
-	}
-}
-
-func (o *perBuildConfigResolver) Resolve() ([]*buildv1.Build, error) {
-	buildConfigs, err := o.dataSet.ListBuildConfigs()
-	if err != nil {
-		return nil, err
-	}
-
-	completeStates := sets.NewString(string(buildv1.BuildPhaseComplete))
-	failedStates := sets.NewString(string(buildv1.BuildPhaseFailed), string(buildv1.BuildPhaseError), string(buildv1.BuildPhaseCancelled))
-
-	prunableBuilds := []*buildv1.Build{}
-	for _, buildConfig := range buildConfigs {
-		builds, err := o.dataSet.ListBuildsByBuildConfig(buildConfig)
-		if err != nil {
-			return nil, err
-		}
-
-		var completeBuilds, failedBuilds []*buildv1.Build
-		for _, build := range builds {
-			if completeStates.Has(string(build.Status.Phase)) {
-				completeBuilds = append(completeBuilds, build)
-			} else if failedStates.Has(string(build.Status.Phase)) {
-				failedBuilds = append(failedBuilds, build)
-			}
-		}
-		sort.Sort(sort.Reverse(buildapihelpers.BuildPtrSliceByCreationTimestamp(completeBuilds)))
-		sort.Sort(sort.Reverse(buildapihelpers.BuildPtrSliceByCreationTimestamp(failedBuilds)))
-
-		if o.keepComplete >= 0 && o.keepComplete < len(completeBuilds) {
-			prunableBuilds = append(prunableBuilds, completeBuilds[o.keepComplete:]...)
-		}
-		if o.keepFailed >= 0 && o.keepFailed < len(failedBuilds) {
-			prunableBuilds = append(prunableBuilds, failedBuilds[o.keepFailed:]...)
-		}
-	}
-	return prunableBuilds, nil
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/builds/resolvers_test.go b/vendor/github.com/openshift/oc/pkg/cli/admin/prune/builds/resolvers_test.go
deleted file mode 100644
index 85e6db1f3c96..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/builds/resolvers_test.go
+++ /dev/null
@@ -1,191 +0,0 @@
-package builds
-
-import (
-	"fmt"
-	"sort"
-	"testing"
-	"time"
-
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/util/sets"
-
-	buildv1 "github.com/openshift/api/build/v1"
-	buildapihelpers "github.com/openshift/oc/pkg/helpers/build"
-)
-
-type mockResolver struct {
-	builds []*buildv1.Build
-	err    error
-}
-
-func (m *mockResolver) Resolve() ([]*buildv1.Build, error) {
-	return m.builds, m.err
-}
-
-func TestMergeResolver(t *testing.T) {
-	resolverA := &mockResolver{
-		builds: []*buildv1.Build{
-			mockBuild("a", "b", nil),
-		},
-	}
-	resolverB := &mockResolver{
-		builds: []*buildv1.Build{
-			mockBuild("c", "d", nil),
-		},
-	}
-	resolver := &mergeResolver{resolvers: []Resolver{resolverA, resolverB}}
-	results, err := resolver.Resolve()
-	if err != nil {
-		t.Errorf("Unexpected error %v", err)
-	}
-	if len(results) != 2 {
-		t.Errorf("Unexpected results %v", results)
-	}
-	expectedNames := sets.NewString("b", "d")
-	for _, build := range results {
-		if !expectedNames.Has(build.Name) {
-			t.Errorf("Unexpected name %v", build.Name)
-		}
-	}
-}
-
-func TestOrphanBuildResolver(t *testing.T) {
-	activeBuildConfig := mockBuildConfig("a", "active-build-config")
-	inactiveBuildConfig := mockBuildConfig("a", "inactive-build-config")
-
-	buildConfigs := []*buildv1.BuildConfig{activeBuildConfig}
-	builds := []*buildv1.Build{}
-
-	expectedNames := sets.String{}
-	BuildPhaseOptions := []buildv1.BuildPhase{
-		buildv1.BuildPhaseCancelled,
-		buildv1.BuildPhaseComplete,
-		buildv1.BuildPhaseError,
-		buildv1.BuildPhaseFailed,
-		buildv1.BuildPhaseNew,
-		buildv1.BuildPhasePending,
-		buildv1.BuildPhaseRunning,
-	}
-	BuildPhaseFilter := []buildv1.BuildPhase{
-		buildv1.BuildPhaseCancelled,
-		buildv1.BuildPhaseComplete,
-		buildv1.BuildPhaseError,
-		buildv1.BuildPhaseFailed,
-	}
-	BuildPhaseFilterSet := sets.String{}
-	for _, BuildPhase := range BuildPhaseFilter {
-		BuildPhaseFilterSet.Insert(string(BuildPhase))
-	}
-
-	for _, BuildPhaseOption := range BuildPhaseOptions {
-		builds = append(builds, withStatus(mockBuild("a", string(BuildPhaseOption)+"-active", activeBuildConfig), BuildPhaseOption))
-		builds = append(builds, withStatus(mockBuild("a", string(BuildPhaseOption)+"-inactive", inactiveBuildConfig), BuildPhaseOption))
-		builds = append(builds, withStatus(mockBuild("a", string(BuildPhaseOption)+"-orphan", nil), BuildPhaseOption))
-		if BuildPhaseFilterSet.Has(string(BuildPhaseOption)) {
-			expectedNames.Insert(string(BuildPhaseOption) + "-inactive")
-			expectedNames.Insert(string(BuildPhaseOption) + "-orphan")
-		}
-	}
-
-	dataSet := NewDataSet(buildConfigs, builds)
-	resolver := NewOrphanBuildResolver(dataSet, BuildPhaseFilter)
-	results, err := resolver.Resolve()
-	if err != nil {
-		t.Errorf("Unexpected error %v", err)
-	}
-	foundNames := sets.String{}
-	for _, result := range results {
-		foundNames.Insert(result.Name)
-	}
-	if len(foundNames) != len(expectedNames) || !expectedNames.HasAll(foundNames.List()...) {
-		t.Errorf("expected %v, actual %v", expectedNames, foundNames)
-	}
-}
-
-func TestPerBuildConfigResolver(t *testing.T) {
-	BuildPhaseOptions := []buildv1.BuildPhase{
-		buildv1.BuildPhaseCancelled,
-		buildv1.BuildPhaseComplete,
-		buildv1.BuildPhaseError,
-		buildv1.BuildPhaseFailed,
-		buildv1.BuildPhaseNew,
-		buildv1.BuildPhasePending,
-		buildv1.BuildPhaseRunning,
-	}
-	buildConfigs := []*buildv1.BuildConfig{
-		mockBuildConfig("a", "build-config-1"),
-		mockBuildConfig("b", "build-config-2"),
-	}
-	buildsPerStatus := 100
-	builds := []*buildv1.Build{}
-	for _, buildConfig := range buildConfigs {
-		for _, BuildPhaseOption := range BuildPhaseOptions {
-			for i := 0; i < buildsPerStatus; i++ {
-				build := withStatus(mockBuild(buildConfig.Namespace, fmt.Sprintf("%v-%v-%v", buildConfig.Name, BuildPhaseOption, i), buildConfig), BuildPhaseOption)
-				builds = append(builds, build)
-			}
-		}
-	}
-
-	now := metav1.Now()
-	for i := range builds {
-		creationTimestamp := metav1.NewTime(now.Time.Add(-1 * time.Duration(i) * time.Hour))
-		builds[i].CreationTimestamp = creationTimestamp
-	}
-
-	// test number to keep at varying ranges
-	for keep := 0; keep < buildsPerStatus*2; keep++ {
-		dataSet := NewDataSet(buildConfigs, builds)
-
-		expectedNames := sets.String{}
-		buildCompleteStatusFilterSet := sets.NewString(string(buildv1.BuildPhaseComplete))
-		buildFailedStatusFilterSet := sets.NewString(string(buildv1.BuildPhaseCancelled), string(buildv1.BuildPhaseError), string(buildv1.BuildPhaseFailed))
-
-		for _, buildConfig := range buildConfigs {
-			buildItems, err := dataSet.ListBuildsByBuildConfig(buildConfig)
-			if err != nil {
-				t.Errorf("Unexpected err %v", err)
-			}
-			var completeBuilds, failedBuilds []*buildv1.Build
-			for _, build := range buildItems {
-				if buildCompleteStatusFilterSet.Has(string(build.Status.Phase)) {
-					completeBuilds = append(completeBuilds, build)
-				} else if buildFailedStatusFilterSet.Has(string(build.Status.Phase)) {
-					failedBuilds = append(failedBuilds, build)
-				}
-			}
-			sort.Sort(sort.Reverse(buildapihelpers.BuildPtrSliceByCreationTimestamp(completeBuilds)))
-			sort.Sort(sort.Reverse(buildapihelpers.BuildPtrSliceByCreationTimestamp(failedBuilds)))
-			var purgeComplete, purgeFailed []*buildv1.Build
-			if keep >= 0 && keep < len(completeBuilds) {
-				purgeComplete = completeBuilds[keep:]
-			}
-			if keep >= 0 && keep < len(failedBuilds) {
-				purgeFailed = failedBuilds[keep:]
-			}
-			for _, build := range purgeComplete {
-				expectedNames.Insert(build.Name)
-			}
-			for _, build := range purgeFailed {
-				expectedNames.Insert(build.Name)
-			}
-		}
-
-		resolver := NewPerBuildConfigResolver(dataSet, keep, keep)
-		results, err := resolver.Resolve()
-		if err != nil {
-			t.Errorf("Unexpected error %v", err)
-		}
-		foundNames := sets.String{}
-		for _, result := range results {
-			foundNames.Insert(result.Name)
-		}
-		if len(foundNames) != len(expectedNames) || !expectedNames.HasAll(foundNames.List()...) {
-			expectedValues := expectedNames.List()
-			actualValues := foundNames.List()
-			sort.Strings(expectedValues)
-			sort.Strings(actualValues)
-			t.Errorf("keep %v\n, expected \n\t%v\n, actual \n\t%v\n", keep, expectedValues, actualValues)
-		}
-	}
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/deployments/data.go b/vendor/github.com/openshift/oc/pkg/cli/admin/prune/deployments/data.go
deleted file mode 100644
index 37d417eeb935..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/deployments/data.go
+++ /dev/null
@@ -1,159 +0,0 @@
-package deployments
-
-import (
-	"fmt"
-	"time"
-
-	corev1 "k8s.io/api/core/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/client-go/tools/cache"
-
-	appsv1 "github.com/openshift/api/apps/v1"
-	"github.com/openshift/library-go/pkg/apps/appsutil"
-)
-
-// DeploymentByDeploymentConfigIndexFunc indexes Deployment items by their associated DeploymentConfig, if none, index with key "orphan"
-func DeploymentByDeploymentConfigIndexFunc(obj interface{}) ([]string, error) {
-	controller, ok := obj.(*corev1.ReplicationController)
-	if !ok {
-		return nil, fmt.Errorf("not a replication controller: %v", obj)
-	}
-	name := appsutil.DeploymentConfigNameFor(controller)
-	if len(name) == 0 {
-		return []string{"orphan"}, nil
-	}
-	return []string{controller.Namespace + "/" + name}, nil
-}
-
-// Filter filters the set of objects
-type Filter interface {
-	Filter(items []*corev1.ReplicationController) []*corev1.ReplicationController
-}
-
-// andFilter ands a set of predicate functions to know if it should be included in the return set
-type andFilter struct {
-	filterPredicates []FilterPredicate
-}
-
-// Filter ands the set of predicates evaluated against each item to make a filtered set
-func (a *andFilter) Filter(items []*corev1.ReplicationController) []*corev1.ReplicationController {
-	results := []*corev1.ReplicationController{}
-	for _, item := range items {
-		include := true
-		for _, filterPredicate := range a.filterPredicates {
-			include = include && filterPredicate(item)
-		}
-		if include {
-			results = append(results, item)
-		}
-	}
-	return results
-}
-
-// FilterPredicate is a function that returns true if the object should be included in the filtered set
-type FilterPredicate func(item *corev1.ReplicationController) bool
-
-// NewFilterBeforePredicate is a function that returns true if the build was created before the current time minus specified duration
-func NewFilterBeforePredicate(d time.Duration) FilterPredicate {
-	now := metav1.Now()
-	before := metav1.NewTime(now.Time.Add(-1 * d))
-	return func(item *corev1.ReplicationController) bool {
-		return item.CreationTimestamp.Before(&before)
-	}
-}
-
-// FilterDeploymentsPredicate is a function that returns true if the replication controller is associated with a DeploymentConfig
-func FilterDeploymentsPredicate(item *corev1.ReplicationController) bool {
-	return len(appsutil.DeploymentConfigNameFor(item)) > 0
-}
-
-// FilterZeroReplicaSize is a function that returns true if the replication controller size is 0
-func FilterZeroReplicaSize(item *corev1.ReplicationController) bool {
-	return *item.Spec.Replicas == 0 && item.Status.Replicas == 0
-}
-
-// DataSet provides functions for working with deployment data
-type DataSet interface {
-	GetDeploymentConfig(deployment *corev1.ReplicationController) (*appsv1.DeploymentConfig, bool, error)
-	ListDeploymentConfigs() ([]*appsv1.DeploymentConfig, error)
-	ListDeployments() ([]*corev1.ReplicationController, error)
-	ListDeploymentsByDeploymentConfig(config *appsv1.DeploymentConfig) ([]*corev1.ReplicationController, error)
-}
-
-type dataSet struct {
-	deploymentConfigStore cache.Store
-	deploymentIndexer     cache.Indexer
-}
-
-// NewDataSet returns a DataSet over the specified items
-func NewDataSet(deploymentConfigs []*appsv1.DeploymentConfig, deployments []*corev1.ReplicationController) DataSet {
-	deploymentConfigStore := cache.NewStore(cache.MetaNamespaceKeyFunc)
-	for _, deploymentConfig := range deploymentConfigs {
-		deploymentConfigStore.Add(deploymentConfig)
-	}
-
-	deploymentIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{
-		"deploymentConfig": DeploymentByDeploymentConfigIndexFunc,
-	})
-	for _, deployment := range deployments {
-		deploymentIndexer.Add(deployment)
-	}
-
-	return &dataSet{
-		deploymentConfigStore: deploymentConfigStore,
-		deploymentIndexer:     deploymentIndexer,
-	}
-}
-
-// GetDeploymentConfig gets the configuration for the given deployment
-func (d *dataSet) GetDeploymentConfig(controller *corev1.ReplicationController) (*appsv1.DeploymentConfig, bool, error) {
-	name := appsutil.DeploymentConfigNameFor(controller)
-	if len(name) == 0 {
-		return nil, false, nil
-	}
-
-	var deploymentConfig *appsv1.DeploymentConfig
-	key := &appsv1.DeploymentConfig{ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: controller.Namespace}}
-	item, exists, err := d.deploymentConfigStore.Get(key)
-	if exists {
-		deploymentConfig = item.(*appsv1.DeploymentConfig)
-	}
-	return deploymentConfig, exists, err
-}
-
-// ListDeploymentConfigs returns a list of DeploymentConfigs
-func (d *dataSet) ListDeploymentConfigs() ([]*appsv1.DeploymentConfig, error) {
-	results := []*appsv1.DeploymentConfig{}
-	for _, item := range d.deploymentConfigStore.List() {
-		results = append(results, item.(*appsv1.DeploymentConfig))
-	}
-	return results, nil
-}
-
-// ListDeployments returns a list of deployments
-func (d *dataSet) ListDeployments() ([]*corev1.ReplicationController, error) {
-	results := []*corev1.ReplicationController{}
-	for _, item := range d.deploymentIndexer.List() {
-		results = append(results, item.(*corev1.ReplicationController))
-	}
-	return results, nil
-}
-
-// ListDeploymentsByDeploymentConfig returns a list of deployments for the provided configuration
-func (d *dataSet) ListDeploymentsByDeploymentConfig(deploymentConfig *appsv1.DeploymentConfig) ([]*corev1.ReplicationController, error) {
-	results := []*corev1.ReplicationController{}
-	key := &corev1.ReplicationController{
-		ObjectMeta: metav1.ObjectMeta{
-			Namespace:   deploymentConfig.Namespace,
-			Annotations: map[string]string{appsv1.DeploymentConfigAnnotation: deploymentConfig.Name},
-		},
-	}
-	items, err := d.deploymentIndexer.Index("deploymentConfig", key)
-	if err != nil {
-		return nil, err
-	}
-	for _, item := range items {
-		results = append(results, item.(*corev1.ReplicationController))
-	}
-	return results, nil
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/deployments/data_test.go b/vendor/github.com/openshift/oc/pkg/cli/admin/prune/deployments/data_test.go
deleted file mode 100644
index 8f3ceec2c5bb..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/deployments/data_test.go
+++ /dev/null
@@ -1,170 +0,0 @@
-package deployments
-
-import (
-	"reflect"
-	"testing"
-	"time"
-
-	corev1 "k8s.io/api/core/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/util/sets"
-
-	appsv1 "github.com/openshift/api/apps/v1"
-)
-
-func mockDeploymentConfig(namespace, name string) *appsv1.DeploymentConfig {
-	return &appsv1.DeploymentConfig{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}}
-}
-
-func withSize(item *corev1.ReplicationController, replicas int32) *corev1.ReplicationController {
-	item.Spec.Replicas = &replicas
-	item.Status.Replicas = int32(replicas)
-	return item
-}
-
-func withCreated(item *corev1.ReplicationController, creationTimestamp metav1.Time) *corev1.ReplicationController {
-	item.CreationTimestamp = creationTimestamp
-	return item
-}
-
-func withStatus(item *corev1.ReplicationController, status appsv1.DeploymentStatus) *corev1.ReplicationController {
-	item.Annotations[appsv1.DeploymentStatusAnnotation] = string(status)
-	return item
-}
-
-func mockDeployment(namespace, name string, deploymentConfig *appsv1.DeploymentConfig) *corev1.ReplicationController {
-	zero := int32(0)
-	item := &corev1.ReplicationController{
-		ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name, Annotations: map[string]string{}},
-		Spec:       corev1.ReplicationControllerSpec{Replicas: &zero},
-	}
-	if deploymentConfig != nil {
-		item.Annotations[appsv1.DeploymentConfigAnnotation] = deploymentConfig.Name
-	}
-	item.Annotations[appsv1.DeploymentStatusAnnotation] = string(appsv1.DeploymentStatusNew)
-	return item
-}
-
-func TestDeploymentByDeploymentConfigIndexFunc(t *testing.T) {
-	config := mockDeploymentConfig("a", "b")
-	deployment := mockDeployment("a", "c", config)
-	actualKey, err := DeploymentByDeploymentConfigIndexFunc(deployment)
-	if err != nil {
-		t.Errorf("Unexpected error %v", err)
-	}
-	expectedKey := []string{"a/b"}
-	if !reflect.DeepEqual(actualKey, expectedKey) {
-		t.Errorf("expected %v, actual %v", expectedKey, actualKey)
-	}
-	deploymentWithNoConfig := &corev1.ReplicationController{}
-	actualKey, err = DeploymentByDeploymentConfigIndexFunc(deploymentWithNoConfig)
-	if err != nil {
-		t.Errorf("Unexpected error %v", err)
-	}
-	expectedKey = []string{"orphan"}
-	if !reflect.DeepEqual(actualKey, expectedKey) {
-		t.Errorf("expected %v, actual %v", expectedKey, actualKey)
-	}
-}
-
-func TestFilterBeforePredicate(t *testing.T) {
-	youngerThan := time.Hour
-	now := metav1.Now()
-	old := metav1.NewTime(now.Time.Add(-1 * youngerThan))
-	items := []*corev1.ReplicationController{}
-	items = append(items, withCreated(mockDeployment("a", "old", nil), old))
-	items = append(items, withCreated(mockDeployment("a", "new", nil), now))
-	filter := &andFilter{
-		filterPredicates: []FilterPredicate{NewFilterBeforePredicate(youngerThan)},
-	}
-	result := filter.Filter(items)
-	if len(result) != 1 {
-		t.Errorf("Unexpected number of results")
-	}
-	if expected, actual := "old", result[0].Name; expected != actual {
-		t.Errorf("expected %v, actual %v", expected, actual)
-	}
-}
-
-func TestEmptyDataSet(t *testing.T) {
-	deployments := []*corev1.ReplicationController{}
-	deploymentConfigs := []*appsv1.DeploymentConfig{}
-	dataSet := NewDataSet(deploymentConfigs, deployments)
-	_, exists, err := dataSet.GetDeploymentConfig(&corev1.ReplicationController{})
-	if exists || err != nil {
-		t.Errorf("Unexpected result %v, %v", exists, err)
-	}
-	deploymentConfigResults, err := dataSet.ListDeploymentConfigs()
-	if err != nil {
-		t.Errorf("Unexpected result %v", err)
-	}
-	if len(deploymentConfigResults) != 0 {
-		t.Errorf("Unexpected result %v", deploymentConfigResults)
-	}
-	deploymentResults, err := dataSet.ListDeployments()
-	if err != nil {
-		t.Errorf("Unexpected result %v", err)
-	}
-	if len(deploymentResults) != 0 {
-		t.Errorf("Unexpected result %v", deploymentResults)
-	}
-	deploymentResults, err = dataSet.ListDeploymentsByDeploymentConfig(&appsv1.DeploymentConfig{})
-	if err != nil {
-		t.Errorf("Unexpected result %v", err)
-	}
-	if len(deploymentResults) != 0 {
-		t.Errorf("Unexpected result %v", deploymentResults)
-	}
-}
-
-func TestPopulatedDataSet(t *testing.T) {
-	deploymentConfigs := []*appsv1.DeploymentConfig{
-		mockDeploymentConfig("a", "deployment-config-1"),
-		mockDeploymentConfig("b", "deployment-config-2"),
-	}
-	deployments := []*corev1.ReplicationController{
-		mockDeployment("a", "deployment-1", deploymentConfigs[0]),
-		mockDeployment("a", "deployment-2", deploymentConfigs[0]),
-		mockDeployment("b", "deployment-3", deploymentConfigs[1]),
-		mockDeployment("c", "deployment-4", nil),
-	}
-	dataSet := NewDataSet(deploymentConfigs, deployments)
-	for _, deployment := range deployments {
-		deploymentConfig, exists, err := dataSet.GetDeploymentConfig(deployment)
-		config, hasConfig := deployment.Annotations[appsv1.DeploymentConfigAnnotation]
-		if hasConfig {
-			if err != nil {
-				t.Errorf("Item %v, unexpected error: %v", deployment, err)
-			}
-			if !exists {
-				t.Errorf("Item %v, unexpected result: %v", deployment, exists)
-			}
-			if expected, actual := config, deploymentConfig.Name; expected != actual {
-				t.Errorf("expected %v, actual %v", expected, actual)
-			}
-			if expected, actual := deployment.Namespace, deploymentConfig.Namespace; expected != actual {
-				t.Errorf("expected %v, actual %v", expected, actual)
-			}
-		} else {
-			if err != nil {
-				t.Errorf("Item %v, unexpected error: %v", deployment, err)
-			}
-			if exists {
-				t.Errorf("Item %v, unexpected result: %v", deployment, exists)
-			}
-		}
-	}
-	expectedNames := sets.NewString("deployment-1", "deployment-2")
-	deploymentResults, err := dataSet.ListDeploymentsByDeploymentConfig(deploymentConfigs[0])
-	if err != nil {
-		t.Errorf("Unexpected result %v", err)
-	}
-	if len(deploymentResults) != len(expectedNames) {
-		t.Errorf("Unexpected result %v", deploymentResults)
-	}
-	for _, deployment := range deploymentResults {
-		if !expectedNames.Has(deployment.Name) {
-			t.Errorf("Unexpected name: %v", deployment.Name)
-		}
-	}
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/deployments/deployments.go b/vendor/github.com/openshift/oc/pkg/cli/admin/prune/deployments/deployments.go
deleted file mode 100644
index 5b13d86aa9e5..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/deployments/deployments.go
+++ /dev/null
@@ -1,206 +0,0 @@
-package deployments
-
-import (
-	"fmt"
-	"io"
-	"os"
-	"text/tabwriter"
-	"time"
-
-	"github.com/spf13/cobra"
-
-	corev1 "k8s.io/api/core/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-
-	appsv1 "github.com/openshift/api/apps/v1"
-	appsv1client "github.com/openshift/client-go/apps/clientset/versioned/typed/apps/v1"
-)
-
-const PruneDeploymentsRecommendedName = "deployments"
-
-var (
-	deploymentsLongDesc = templates.LongDesc(`
-		Prune old completed and failed deployments
-
-		By default, the prune operation performs a dry run making no changes to the deployments.
-		A --confirm flag is needed for changes to be effective.`)
-
-	deploymentsExample = templates.Examples(`
-		# Dry run deleting all but the last complete deployment for every deployment config
-	  %[1]s %[2]s --keep-complete=1
-
-	  # To actually perform the prune operation, the confirm flag must be appended
-	  %[1]s %[2]s --keep-complete=1 --confirm`)
-)
-
-// PruneDeploymentsOptions holds all the required options for pruning deployments.
-type PruneDeploymentsOptions struct {
-	Confirm         bool
-	Orphans         bool
-	KeepYoungerThan time.Duration
-	KeepComplete    int
-	KeepFailed      int
-	Namespace       string
-
-	AppsClient appsv1client.DeploymentConfigsGetter
-	KubeClient corev1client.CoreV1Interface
-
-	genericclioptions.IOStreams
-}
-
-func NewPruneDeploymentsOptions(streams genericclioptions.IOStreams) *PruneDeploymentsOptions {
-	return &PruneDeploymentsOptions{
-		Confirm:         false,
-		KeepYoungerThan: 60 * time.Minute,
-		KeepComplete:    5,
-		KeepFailed:      1,
-		IOStreams:       streams,
-	}
-}
-
-// NewCmdPruneDeployments implements the OpenShift cli prune deployments command.
-func NewCmdPruneDeployments(f kcmdutil.Factory, parentName, name string, streams genericclioptions.IOStreams) *cobra.Command {
-	o := NewPruneDeploymentsOptions(streams)
-	cmd := &cobra.Command{
-		Use:     name,
-		Short:   "Remove old completed and failed deployments",
-		Long:    deploymentsLongDesc,
-		Example: fmt.Sprintf(deploymentsExample, parentName, name),
-		Run: func(cmd *cobra.Command, args []string) {
-			kcmdutil.CheckErr(o.Complete(f, cmd, args))
-			kcmdutil.CheckErr(o.Validate())
-			kcmdutil.CheckErr(o.Run())
-		},
-	}
-
-	cmd.Flags().BoolVar(&o.Confirm, "confirm", o.Confirm, "If true, specify that deployment pruning should proceed. Defaults to false, displaying what would be deleted but not actually deleting anything.")
-	cmd.Flags().BoolVar(&o.Orphans, "orphans", o.Orphans, "If true, prune all deployments where the associated DeploymentConfig no longer exists, the status is complete or failed, and the replica size is 0.")
-	cmd.Flags().DurationVar(&o.KeepYoungerThan, "keep-younger-than", o.KeepYoungerThan, "Specify the minimum age of a deployment for it to be considered a candidate for pruning.")
-	cmd.Flags().IntVar(&o.KeepComplete, "keep-complete", o.KeepComplete, "Per DeploymentConfig, specify the number of deployments whose status is complete that will be preserved whose replica size is 0.")
-	cmd.Flags().IntVar(&o.KeepFailed, "keep-failed", o.KeepFailed, "Per DeploymentConfig, specify the number of deployments whose status is failed that will be preserved whose replica size is 0.")
-
-	return cmd
-}
-
-// Complete turns a partially defined PruneDeploymentsOptions into a solvent structure
-// which can be validated and used for pruning deployments.
-func (o *PruneDeploymentsOptions) Complete(f kcmdutil.Factory, cmd *cobra.Command, args []string) error {
-	if len(args) > 0 {
-		return kcmdutil.UsageErrorf(cmd, "no arguments are allowed to this command")
-	}
-
-	o.Namespace = metav1.NamespaceAll
-	if cmd.Flags().Lookup("namespace").Changed {
-		var err error
-		o.Namespace, _, err = f.ToRawKubeConfigLoader().Namespace()
-		if err != nil {
-			return err
-		}
-	}
-
-	clientConfig, err := f.ToRESTConfig()
-	if err != nil {
-		return err
-	}
-	o.KubeClient, err = corev1client.NewForConfig(clientConfig)
-	if err != nil {
-		return err
-	}
-	o.AppsClient, err = appsv1client.NewForConfig(clientConfig)
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-// Validate ensures that a PruneDeploymentsOptions is valid and can be used to execute pruning.
-func (o PruneDeploymentsOptions) Validate() error {
-	if o.KeepYoungerThan < 0 {
-		return fmt.Errorf("--keep-younger-than must be greater than or equal to 0")
-	}
-	if o.KeepComplete < 0 {
-		return fmt.Errorf("--keep-complete must be greater than or equal to 0")
-	}
-	if o.KeepFailed < 0 {
-		return fmt.Errorf("--keep-failed must be greater than or equal to 0")
-	}
-	return nil
-}
-
-// Run contains all the necessary functionality for the OpenShift cli prune deployments command.
-func (o PruneDeploymentsOptions) Run() error {
-	deploymentConfigList, err := o.AppsClient.DeploymentConfigs(o.Namespace).List(metav1.ListOptions{})
-	if err != nil {
-		return err
-	}
-	deploymentConfigs := []*appsv1.DeploymentConfig{}
-	for i := range deploymentConfigList.Items {
-		deploymentConfigs = append(deploymentConfigs, &deploymentConfigList.Items[i])
-	}
-
-	deploymentList, err := o.KubeClient.ReplicationControllers(o.Namespace).List(metav1.ListOptions{})
-	if err != nil {
-		return err
-	}
-	deployments := []*corev1.ReplicationController{}
-	for i := range deploymentList.Items {
-		deployments = append(deployments, &deploymentList.Items[i])
-	}
-
-	options := PrunerOptions{
-		KeepYoungerThan:   o.KeepYoungerThan,
-		Orphans:           o.Orphans,
-		KeepComplete:      o.KeepComplete,
-		KeepFailed:        o.KeepFailed,
-		DeploymentConfigs: deploymentConfigs,
-		Deployments:       deployments,
-	}
-	pruner := NewPruner(options)
-
-	w := tabwriter.NewWriter(o.Out, 10, 4, 3, ' ', 0)
-	defer w.Flush()
-
-	deploymentDeleter := &describingDeploymentDeleter{w: w}
-
-	if o.Confirm {
-		deploymentDeleter.delegate = NewDeploymentDeleter(o.KubeClient)
-	} else {
-		fmt.Fprintln(os.Stderr, "Dry run enabled - no modifications will be made. Add --confirm to remove deployments")
-	}
-
-	return pruner.Prune(deploymentDeleter)
-}
-
-// describingDeploymentDeleter prints information about each deployment it removes.
-// If a delegate exists, its DeleteDeployment function is invoked prior to returning.
-type describingDeploymentDeleter struct {
-	w             io.Writer
-	delegate      DeploymentDeleter
-	headerPrinted bool
-}
-
-var _ DeploymentDeleter = &describingDeploymentDeleter{}
-
-func (p *describingDeploymentDeleter) DeleteDeployment(deployment *corev1.ReplicationController) error {
-	if !p.headerPrinted {
-		p.headerPrinted = true
-		fmt.Fprintln(p.w, "NAMESPACE\tNAME")
-	}
-
-	fmt.Fprintf(p.w, "%s\t%s\n", deployment.Namespace, deployment.Name)
-
-	if p.delegate == nil {
-		return nil
-	}
-
-	if err := p.delegate.DeleteDeployment(deployment); err != nil {
-		return err
-	}
-
-	return nil
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/deployments/deployments_test.go b/vendor/github.com/openshift/oc/pkg/cli/admin/prune/deployments/deployments_test.go
deleted file mode 100644
index 3460347a3795..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/deployments/deployments_test.go
+++ /dev/null
@@ -1,41 +0,0 @@
-package deployments
-
-import (
-	"testing"
-
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	fakecorev1client "k8s.io/client-go/kubernetes/typed/core/v1/fake"
-	clienttesting "k8s.io/client-go/testing"
-
-	fakeappsv1client "github.com/openshift/client-go/apps/clientset/versioned/typed/apps/v1/fake"
-)
-
-func TestDeploymentPruneNamespaced(t *testing.T) {
-	osFake := &fakeappsv1client.FakeAppsV1{Fake: &clienttesting.Fake{}}
-	coreFake := &fakecorev1client.FakeCoreV1{Fake: &clienttesting.Fake{}}
-	opts := &PruneDeploymentsOptions{
-		Namespace: "foo",
-
-		AppsClient: osFake,
-		KubeClient: coreFake,
-		IOStreams:  genericclioptions.NewTestIOStreamsDiscard(),
-	}
-
-	if err := opts.Run(); err != nil {
-		t.Errorf("Unexpected error: %v", err)
-	}
-
-	if len(osFake.Actions()) == 0 || len(coreFake.Actions()) == 0 {
-		t.Errorf("Missing get deployments actions")
-	}
-	for _, a := range osFake.Actions() {
-		if a.GetNamespace() != "foo" {
-			t.Errorf("Unexpected namespace while pruning %s: %s", a.GetResource(), a.GetNamespace())
-		}
-	}
-	for _, a := range coreFake.Actions() {
-		if a.GetNamespace() != "foo" {
-			t.Errorf("Unexpected namespace while pruning %s: %s", a.GetResource(), a.GetNamespace())
-		}
-	}
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/deployments/prune.go b/vendor/github.com/openshift/oc/pkg/cli/admin/prune/deployments/prune.go
deleted file mode 100644
index 64bd7efe5bff..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/deployments/prune.go
+++ /dev/null
@@ -1,113 +0,0 @@
-package deployments
-
-import (
-	"time"
-
-	"k8s.io/klog"
-
-	corev1 "k8s.io/api/core/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
-
-	appsv1 "github.com/openshift/api/apps/v1"
-)
-
-type Pruner interface {
-	// Prune is responsible for actual removal of deployments identified as candidates
-	// for pruning based on pruning algorithm.
-	Prune(deleter DeploymentDeleter) error
-}
-
-// DeploymentDeleter knows how to delete deployments from OpenShift.
-type DeploymentDeleter interface {
-	// DeleteDeployment removes the deployment from OpenShift's storage.
-	DeleteDeployment(deployment *corev1.ReplicationController) error
-}
-
-// pruner is an object that knows how to prune a data set
-type pruner struct {
-	resolver Resolver
-}
-
-var _ Pruner = &pruner{}
-
-// PrunerOptions contains the fields used to initialize a new Pruner.
-type PrunerOptions struct {
-	// KeepYoungerThan will filter out all objects from prune data set that are younger than the specified time duration.
-	KeepYoungerThan time.Duration
-	// Orphans if true will include inactive orphan deployments in candidate prune set.
-	Orphans bool
-	// KeepComplete is per DeploymentConfig how many of the most recent deployments should be preserved.
-	KeepComplete int
-	// KeepFailed is per DeploymentConfig how many of the most recent failed deployments should be preserved.
-	KeepFailed int
-	// DeploymentConfigs is the entire list of deploymentconfigs across all namespaces in the cluster.
-	DeploymentConfigs []*appsv1.DeploymentConfig
-	// Deployments is the entire list of deployments across all namespaces in the cluster.
-	Deployments []*corev1.ReplicationController
-}
-
-// NewPruner returns a Pruner over specified data using specified options.
-// deploymentConfigs, deployments, opts.KeepYoungerThan, opts.Orphans, opts.KeepComplete, opts.KeepFailed, deploymentPruneFunc
-func NewPruner(options PrunerOptions) Pruner {
-	klog.V(1).Infof("Creating deployment pruner with keepYoungerThan=%v, orphans=%v, keepComplete=%v, keepFailed=%v",
-		options.KeepYoungerThan, options.Orphans, options.KeepComplete, options.KeepFailed)
-
-	filter := &andFilter{
-		filterPredicates: []FilterPredicate{
-			FilterDeploymentsPredicate,
-			FilterZeroReplicaSize,
-			NewFilterBeforePredicate(options.KeepYoungerThan),
-		},
-	}
-	deployments := filter.Filter(options.Deployments)
-	dataSet := NewDataSet(options.DeploymentConfigs, deployments)
-
-	resolvers := []Resolver{}
-	if options.Orphans {
-		inactiveDeploymentStatus := []appsv1.DeploymentStatus{
-			appsv1.DeploymentStatusComplete,
-			appsv1.DeploymentStatusFailed,
-		}
-		resolvers = append(resolvers, NewOrphanDeploymentResolver(dataSet, inactiveDeploymentStatus))
-	}
-	resolvers = append(resolvers, NewPerDeploymentConfigResolver(dataSet, options.KeepComplete, options.KeepFailed))
-
-	return &pruner{
-		resolver: &mergeResolver{resolvers: resolvers},
-	}
-}
-
-// Prune will visit each item in the prunable set and invoke the associated DeploymentDeleter.
-func (p *pruner) Prune(deleter DeploymentDeleter) error {
-	deployments, err := p.resolver.Resolve()
-	if err != nil {
-		return err
-	}
-	for _, deployment := range deployments {
-		if err := deleter.DeleteDeployment(deployment); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-// deploymentDeleter removes a deployment from OpenShift.
-type deploymentDeleter struct {
-	deployments corev1client.ReplicationControllersGetter
-}
-
-var _ DeploymentDeleter = &deploymentDeleter{}
-
-// NewDeploymentDeleter creates a new deploymentDeleter.
-func NewDeploymentDeleter(deployments corev1client.ReplicationControllersGetter) DeploymentDeleter {
-	return &deploymentDeleter{
-		deployments: deployments,
-	}
-}
-
-func (p *deploymentDeleter) DeleteDeployment(deployment *corev1.ReplicationController) error {
-	klog.V(4).Infof("Deleting deployment %q", deployment.Name)
-	policy := metav1.DeletePropagationBackground
-	return p.deployments.ReplicationControllers(deployment.Namespace).Delete(deployment.Name, &metav1.DeleteOptions{PropagationPolicy: &policy})
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/deployments/prune_test.go b/vendor/github.com/openshift/oc/pkg/cli/admin/prune/deployments/prune_test.go
deleted file mode 100644
index 1ebc202a128d..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/deployments/prune_test.go
+++ /dev/null
@@ -1,117 +0,0 @@
-package deployments
-
-import (
-	"sort"
-	"testing"
-	"time"
-
-	corev1 "k8s.io/api/core/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/util/sets"
-
-	appsv1 "github.com/openshift/api/apps/v1"
-)
-
-type mockDeleteRecorder struct {
-	set sets.String
-	err error
-}
-
-var _ DeploymentDeleter = &mockDeleteRecorder{}
-
-func (m *mockDeleteRecorder) DeleteDeployment(deployment *corev1.ReplicationController) error {
-	m.set.Insert(deployment.Name)
-	return m.err
-}
-
-func (m *mockDeleteRecorder) Verify(t *testing.T, expected sets.String) {
-	if len(m.set) != len(expected) || !m.set.HasAll(expected.List()...) {
-		expectedValues := expected.List()
-		actualValues := m.set.List()
-		sort.Strings(expectedValues)
-		sort.Strings(actualValues)
-		t.Errorf("expected \n\t%v\n, actual \n\t%v\n", expectedValues, actualValues)
-	}
-}
-
-func TestPruneTask(t *testing.T) {
-	deploymentStatusOptions := []appsv1.DeploymentStatus{
-		appsv1.DeploymentStatusComplete,
-		appsv1.DeploymentStatusFailed,
-		appsv1.DeploymentStatusNew,
-		appsv1.DeploymentStatusPending,
-		appsv1.DeploymentStatusRunning,
-	}
-	deploymentStatusFilter := []appsv1.DeploymentStatus{
-		appsv1.DeploymentStatusComplete,
-		appsv1.DeploymentStatusFailed,
-	}
-	deploymentStatusFilterSet := sets.String{}
-	for _, deploymentStatus := range deploymentStatusFilter {
-		deploymentStatusFilterSet.Insert(string(deploymentStatus))
-	}
-
-	for _, orphans := range []bool{true, false} {
-		for _, deploymentStatusOption := range deploymentStatusOptions {
-			keepYoungerThan := time.Hour
-
-			now := metav1.Now()
-			old := metav1.NewTime(now.Time.Add(-1 * keepYoungerThan))
-
-			deploymentConfigs := []*appsv1.DeploymentConfig{}
-			deployments := []*corev1.ReplicationController{}
-
-			deploymentConfig := mockDeploymentConfig("a", "deployment-config")
-			deploymentConfigs = append(deploymentConfigs, deploymentConfig)
-
-			deployments = append(deployments, withCreated(withStatus(mockDeployment("a", "build-1", deploymentConfig), deploymentStatusOption), now))
-			deployments = append(deployments, withCreated(withStatus(mockDeployment("a", "build-2", deploymentConfig), deploymentStatusOption), old))
-			deployments = append(deployments, withSize(withCreated(withStatus(mockDeployment("a", "build-3-with-replicas", deploymentConfig), deploymentStatusOption), old), 4))
-			deployments = append(deployments, withCreated(withStatus(mockDeployment("a", "orphan-build-1", nil), deploymentStatusOption), now))
-			deployments = append(deployments, withCreated(withStatus(mockDeployment("a", "orphan-build-2", nil), deploymentStatusOption), old))
-			deployments = append(deployments, withSize(withCreated(withStatus(mockDeployment("a", "orphan-build-3-with-replicas", nil), deploymentStatusOption), old), 4))
-
-			keepComplete := 1
-			keepFailed := 1
-			expectedValues := sets.String{}
-			filter := &andFilter{
-				filterPredicates: []FilterPredicate{
-					FilterDeploymentsPredicate,
-					FilterZeroReplicaSize,
-					NewFilterBeforePredicate(keepYoungerThan),
-				},
-			}
-			dataSet := NewDataSet(deploymentConfigs, filter.Filter(deployments))
-			resolver := NewPerDeploymentConfigResolver(dataSet, keepComplete, keepFailed)
-			if orphans {
-				resolver = &mergeResolver{
-					resolvers: []Resolver{resolver, NewOrphanDeploymentResolver(dataSet, deploymentStatusFilter)},
-				}
-			}
-			expectedDeployments, err := resolver.Resolve()
-			if err != nil {
-				t.Errorf("Unexpected error %v", err)
-			}
-			for _, item := range expectedDeployments {
-				expectedValues.Insert(item.Name)
-			}
-
-			recorder := &mockDeleteRecorder{set: sets.String{}}
-
-			options := PrunerOptions{
-				KeepYoungerThan:   keepYoungerThan,
-				Orphans:           orphans,
-				KeepComplete:      keepComplete,
-				KeepFailed:        keepFailed,
-				DeploymentConfigs: deploymentConfigs,
-				Deployments:       deployments,
-			}
-			pruner := NewPruner(options)
-			if err := pruner.Prune(recorder); err != nil {
-				t.Errorf("Unexpected error %v", err)
-			}
-			recorder.Verify(t, expectedValues)
-		}
-	}
-
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/deployments/resolvers.go b/vendor/github.com/openshift/oc/pkg/cli/admin/prune/deployments/resolvers.go
deleted file mode 100644
index 1bb78dd94317..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/deployments/resolvers.go
+++ /dev/null
@@ -1,134 +0,0 @@
-package deployments
-
-import (
-	"sort"
-
-	corev1 "k8s.io/api/core/v1"
-	"k8s.io/apimachinery/pkg/util/sets"
-
-	appsv1 "github.com/openshift/api/apps/v1"
-	"github.com/openshift/library-go/pkg/apps/appsutil"
-)
-
-// Resolver knows how to resolve the set of candidate objects to prune
-type Resolver interface {
-	Resolve() ([]*corev1.ReplicationController, error)
-}
-
-// mergeResolver merges the set of results from multiple resolvers
-type mergeResolver struct {
-	resolvers []Resolver
-}
-
-func (m *mergeResolver) Resolve() ([]*corev1.ReplicationController, error) {
-	results := []*corev1.ReplicationController{}
-	for _, resolver := range m.resolvers {
-		items, err := resolver.Resolve()
-		if err != nil {
-			return nil, err
-		}
-		results = append(results, items...)
-	}
-	return results, nil
-}
-
-// NewOrphanDeploymentResolver returns a Resolver that matches objects with no associated DeploymentConfig and has a DeploymentStatus in filter
-func NewOrphanDeploymentResolver(dataSet DataSet, deploymentStatusFilter []appsv1.DeploymentStatus) Resolver {
-	filter := sets.NewString()
-	for _, deploymentStatus := range deploymentStatusFilter {
-		filter.Insert(string(deploymentStatus))
-	}
-	return &orphanDeploymentResolver{
-		dataSet:                dataSet,
-		deploymentStatusFilter: filter,
-	}
-}
-
-// orphanDeploymentResolver resolves orphan deployments that match the specified filter
-type orphanDeploymentResolver struct {
-	dataSet                DataSet
-	deploymentStatusFilter sets.String
-}
-
-// Resolve the matching set of objects
-func (o *orphanDeploymentResolver) Resolve() ([]*corev1.ReplicationController, error) {
-	deployments, err := o.dataSet.ListDeployments()
-	if err != nil {
-		return nil, err
-	}
-
-	results := []*corev1.ReplicationController{}
-	for _, deployment := range deployments {
-		deploymentStatus := appsutil.DeploymentStatusFor(deployment)
-		if !o.deploymentStatusFilter.Has(string(deploymentStatus)) {
-			continue
-		}
-		_, exists, _ := o.dataSet.GetDeploymentConfig(deployment)
-		if !exists {
-			results = append(results, deployment)
-		}
-	}
-	return results, nil
-}
-
-type perDeploymentConfigResolver struct {
-	dataSet      DataSet
-	keepComplete int
-	keepFailed   int
-}
-
-// NewPerDeploymentConfigResolver returns a Resolver that selects items to prune per config
-func NewPerDeploymentConfigResolver(dataSet DataSet, keepComplete int, keepFailed int) Resolver {
-	return &perDeploymentConfigResolver{
-		dataSet:      dataSet,
-		keepComplete: keepComplete,
-		keepFailed:   keepFailed,
-	}
-}
-
-// ByMostRecent sorts deployments by most recently created.
-type ByMostRecent []*corev1.ReplicationController
-
-func (s ByMostRecent) Len() int      { return len(s) }
-func (s ByMostRecent) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-func (s ByMostRecent) Less(i, j int) bool {
-	return !s[i].CreationTimestamp.Before(&s[j].CreationTimestamp)
-}
-
-func (o *perDeploymentConfigResolver) Resolve() ([]*corev1.ReplicationController, error) {
-	deploymentConfigs, err := o.dataSet.ListDeploymentConfigs()
-	if err != nil {
-		return nil, err
-	}
-
-	completeStates := sets.NewString(string(appsv1.DeploymentStatusComplete))
-	failedStates := sets.NewString(string(appsv1.DeploymentStatusFailed))
-
-	results := []*corev1.ReplicationController{}
-	for _, deploymentConfig := range deploymentConfigs {
-		deployments, err := o.dataSet.ListDeploymentsByDeploymentConfig(deploymentConfig)
-		if err != nil {
-			return nil, err
-		}
-
-		completeDeployments, failedDeployments := []*corev1.ReplicationController{}, []*corev1.ReplicationController{}
-		for _, deployment := range deployments {
-			status := appsutil.DeploymentStatusFor(deployment)
-			if completeStates.Has(string(status)) {
-				completeDeployments = append(completeDeployments, deployment)
-			} else if failedStates.Has(string(status)) {
-				failedDeployments = append(failedDeployments, deployment)
-			}
-		}
-		sort.Sort(ByMostRecent(completeDeployments))
-		sort.Sort(ByMostRecent(failedDeployments))
-
-		if o.keepComplete >= 0 && o.keepComplete < len(completeDeployments) {
-			results = append(results, completeDeployments[o.keepComplete:]...)
-		}
-		if o.keepFailed >= 0 && o.keepFailed < len(failedDeployments) {
-			results = append(results, failedDeployments[o.keepFailed:]...)
-		}
-	}
-	return results, nil
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/deployments/resolvers_test.go b/vendor/github.com/openshift/oc/pkg/cli/admin/prune/deployments/resolvers_test.go
deleted file mode 100644
index 09b5496f79b3..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/deployments/resolvers_test.go
+++ /dev/null
@@ -1,188 +0,0 @@
-package deployments
-
-import (
-	"fmt"
-	"sort"
-	"testing"
-	"time"
-
-	corev1 "k8s.io/api/core/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/util/sets"
-
-	appsv1 "github.com/openshift/api/apps/v1"
-)
-
-type mockResolver struct {
-	items []*corev1.ReplicationController
-	err   error
-}
-
-func (m *mockResolver) Resolve() ([]*corev1.ReplicationController, error) {
-	return m.items, m.err
-}
-
-func TestMergeResolver(t *testing.T) {
-	resolverA := &mockResolver{
-		items: []*corev1.ReplicationController{
-			mockDeployment("a", "b", nil),
-		},
-	}
-	resolverB := &mockResolver{
-		items: []*corev1.ReplicationController{
-			mockDeployment("c", "d", nil),
-		},
-	}
-	resolver := &mergeResolver{resolvers: []Resolver{resolverA, resolverB}}
-	results, err := resolver.Resolve()
-	if err != nil {
-		t.Errorf("Unexpected error %v", err)
-	}
-	if len(results) != 2 {
-		t.Errorf("Unexpected results %v", results)
-	}
-	expectedNames := sets.NewString("b", "d")
-	for _, item := range results {
-		if !expectedNames.Has(item.Name) {
-			t.Errorf("Unexpected name %v", item.Name)
-		}
-	}
-}
-
-func TestOrphanDeploymentResolver(t *testing.T) {
-	activeDeploymentConfig := mockDeploymentConfig("a", "active-deployment-config")
-	inactiveDeploymentConfig := mockDeploymentConfig("a", "inactive-deployment-config")
-
-	deploymentConfigs := []*appsv1.DeploymentConfig{activeDeploymentConfig}
-	deployments := []*corev1.ReplicationController{}
-
-	expectedNames := sets.String{}
-	deploymentStatusOptions := []appsv1.DeploymentStatus{
-		appsv1.DeploymentStatusComplete,
-		appsv1.DeploymentStatusFailed,
-		appsv1.DeploymentStatusNew,
-		appsv1.DeploymentStatusPending,
-		appsv1.DeploymentStatusRunning,
-	}
-
-	deploymentStatusFilter := []appsv1.DeploymentStatus{
-		appsv1.DeploymentStatusComplete,
-		appsv1.DeploymentStatusFailed,
-	}
-	deploymentStatusFilterSet := sets.String{}
-	for _, deploymentStatus := range deploymentStatusFilter {
-		deploymentStatusFilterSet.Insert(string(deploymentStatus))
-	}
-
-	for _, deploymentStatusOption := range deploymentStatusOptions {
-		deployments = append(deployments, withStatus(mockDeployment("a", string(deploymentStatusOption)+"-active", activeDeploymentConfig), deploymentStatusOption))
-		deployments = append(deployments, withStatus(mockDeployment("a", string(deploymentStatusOption)+"-inactive", inactiveDeploymentConfig), deploymentStatusOption))
-		deployments = append(deployments, withStatus(mockDeployment("a", string(deploymentStatusOption)+"-orphan", nil), deploymentStatusOption))
-		if deploymentStatusFilterSet.Has(string(deploymentStatusOption)) {
-			expectedNames.Insert(string(deploymentStatusOption) + "-inactive")
-			expectedNames.Insert(string(deploymentStatusOption) + "-orphan")
-		}
-	}
-
-	dataSet := NewDataSet(deploymentConfigs, deployments)
-	resolver := NewOrphanDeploymentResolver(dataSet, deploymentStatusFilter)
-	results, err := resolver.Resolve()
-	if err != nil {
-		t.Errorf("Unexpected error %v", err)
-	}
-	foundNames := sets.String{}
-	for _, result := range results {
-		foundNames.Insert(result.Name)
-	}
-	if len(foundNames) != len(expectedNames) || !expectedNames.HasAll(foundNames.List()...) {
-		t.Errorf("expected %v, actual %v", expectedNames, foundNames)
-	}
-}
-
-func TestPerDeploymentConfigResolver(t *testing.T) {
-	deploymentStatusOptions := []appsv1.DeploymentStatus{
-		appsv1.DeploymentStatusComplete,
-		appsv1.DeploymentStatusFailed,
-		appsv1.DeploymentStatusNew,
-		appsv1.DeploymentStatusPending,
-		appsv1.DeploymentStatusRunning,
-	}
-	deploymentConfigs := []*appsv1.DeploymentConfig{
-		mockDeploymentConfig("a", "deployment-config-1"),
-		mockDeploymentConfig("b", "deployment-config-2"),
-	}
-	deploymentsPerStatus := 100
-	deployments := []*corev1.ReplicationController{}
-	for _, deploymentConfig := range deploymentConfigs {
-		for _, deploymentStatusOption := range deploymentStatusOptions {
-			for i := 0; i < deploymentsPerStatus; i++ {
-				deployment := withStatus(mockDeployment(deploymentConfig.Namespace, fmt.Sprintf("%v-%v-%v", deploymentConfig.Name, deploymentStatusOption, i), deploymentConfig), deploymentStatusOption)
-				deployments = append(deployments, deployment)
-			}
-		}
-	}
-
-	now := metav1.Now()
-	for i := range deployments {
-		creationTimestamp := metav1.NewTime(now.Time.Add(-1 * time.Duration(i) * time.Hour))
-		deployments[i].CreationTimestamp = creationTimestamp
-	}
-
-	// test number to keep at varying ranges
-	for keep := 0; keep < deploymentsPerStatus*2; keep++ {
-		dataSet := NewDataSet(deploymentConfigs, deployments)
-
-		expectedNames := sets.String{}
-		deploymentCompleteStatusFilterSet := sets.NewString(string(appsv1.DeploymentStatusComplete))
-		deploymentFailedStatusFilterSet := sets.NewString(string(appsv1.DeploymentStatusFailed))
-
-		for _, deploymentConfig := range deploymentConfigs {
-			deploymentItems, err := dataSet.ListDeploymentsByDeploymentConfig(deploymentConfig)
-			if err != nil {
-				t.Errorf("Unexpected err %v", err)
-			}
-			completedDeployments, failedDeployments := []*corev1.ReplicationController{}, []*corev1.ReplicationController{}
-			for _, deployment := range deploymentItems {
-				status := deployment.Annotations[appsv1.DeploymentStatusAnnotation]
-				if deploymentCompleteStatusFilterSet.Has(status) {
-					completedDeployments = append(completedDeployments, deployment)
-				} else if deploymentFailedStatusFilterSet.Has(status) {
-					failedDeployments = append(failedDeployments, deployment)
-				}
-			}
-			sort.Sort(ByMostRecent(completedDeployments))
-			sort.Sort(ByMostRecent(failedDeployments))
-			purgeCompleted := []*corev1.ReplicationController{}
-			purgeFailed := []*corev1.ReplicationController{}
-			if keep >= 0 && keep < len(completedDeployments) {
-				purgeCompleted = completedDeployments[keep:]
-			}
-			if keep >= 0 && keep < len(failedDeployments) {
-				purgeFailed = failedDeployments[keep:]
-			}
-			for _, deployment := range purgeCompleted {
-				expectedNames.Insert(deployment.Name)
-			}
-			for _, deployment := range purgeFailed {
-				expectedNames.Insert(deployment.Name)
-			}
-		}
-
-		resolver := NewPerDeploymentConfigResolver(dataSet, keep, keep)
-		results, err := resolver.Resolve()
-		if err != nil {
-			t.Errorf("Unexpected error %v", err)
-		}
-		foundNames := sets.String{}
-		for _, result := range results {
-			foundNames.Insert(result.Name)
-		}
-		if len(foundNames) != len(expectedNames) || !expectedNames.HasAll(foundNames.List()...) {
-			expectedValues := expectedNames.List()
-			actualValues := foundNames.List()
-			sort.Strings(expectedValues)
-			sort.Strings(actualValues)
-			t.Errorf("keep %v\n, expected \n\t%v\n, actual \n\t%v\n", keep, expectedValues, actualValues)
-		}
-	}
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/imageprune/OWNERS b/vendor/github.com/openshift/oc/pkg/cli/admin/prune/imageprune/OWNERS
deleted file mode 100644
index 4041bad22cf2..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/imageprune/OWNERS
+++ /dev/null
@@ -1,15 +0,0 @@
-reviewers:
-  - bparees
-  - smarterclayton
-  - soltysh
-  - miminar
-  - mfojtik
-  - adambkaplan
-  - dmage
-approvers:
-  - bparees
-  - smarterclayton
-  - soltysh
-  - mfojtik
-  - adambkaplan
-  - dmage
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/imageprune/doc.go b/vendor/github.com/openshift/oc/pkg/cli/admin/prune/imageprune/doc.go
deleted file mode 100644
index 89dcc68fee9b..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/imageprune/doc.go
+++ /dev/null
@@ -1,2 +0,0 @@
-// Package prune contains logic for pruning images and interoperating with the integrated container image registry.
-package imageprune
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/imageprune/helper.go b/vendor/github.com/openshift/oc/pkg/cli/admin/prune/imageprune/helper.go
deleted file mode 100644
index b1be1dc3afc7..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/imageprune/helper.go
+++ /dev/null
@@ -1,304 +0,0 @@
-package imageprune
-
-import (
-	"fmt"
-	"net/http"
-	"net/url"
-	"sort"
-	"strings"
-
-	"github.com/docker/distribution/registry/api/errcode"
-	"k8s.io/klog"
-
-	corev1 "k8s.io/api/core/v1"
-	kmeta "k8s.io/apimachinery/pkg/api/meta"
-	"k8s.io/apimachinery/pkg/runtime"
-	kerrors "k8s.io/apimachinery/pkg/util/errors"
-	ref "k8s.io/client-go/tools/reference"
-	"k8s.io/kubernetes/pkg/kubectl/scheme"
-
-	imagev1 "github.com/openshift/api/image/v1"
-	"github.com/openshift/library-go/pkg/image/reference"
-	"github.com/openshift/library-go/pkg/network/networkutils"
-)
-
-// order younger images before older
-type imgByAge []*imagev1.Image
-
-func (ba imgByAge) Len() int      { return len(ba) }
-func (ba imgByAge) Swap(i, j int) { ba[i], ba[j] = ba[j], ba[i] }
-func (ba imgByAge) Less(i, j int) bool {
-	return ba[i].CreationTimestamp.After(ba[j].CreationTimestamp.Time)
-}
-
-// order younger image stream before older
-type isByAge []imagev1.ImageStream
-
-func (ba isByAge) Len() int      { return len(ba) }
-func (ba isByAge) Swap(i, j int) { ba[i], ba[j] = ba[j], ba[i] }
-func (ba isByAge) Less(i, j int) bool {
-	return ba[i].CreationTimestamp.After(ba[j].CreationTimestamp.Time)
-}
-
-// DetermineRegistryHost returns registry host embedded in a pull-spec of the latest unmanaged image or the
-// latest imagestream from the provided lists. If no such pull-spec is found, error is returned.
-func DetermineRegistryHost(images *imagev1.ImageList, imageStreams *imagev1.ImageStreamList) (string, error) {
-	var pullSpec string
-	var managedImages []*imagev1.Image
-
-	// 1st try to determine registry url from a pull spec of the youngest managed image
-	for i := range images.Items {
-		image := &images.Items[i]
-		if image.Annotations[imagev1.ManagedByOpenShiftAnnotation] != "true" {
-			continue
-		}
-		managedImages = append(managedImages, image)
-	}
-	// be sure to pick up the newest managed image which should have an up to date information
-	sort.Sort(imgByAge(managedImages))
-
-	if len(managedImages) > 0 {
-		pullSpec = managedImages[0].DockerImageReference
-	} else {
-		// 2nd try to get the pull spec from any image stream
-		// Sorting by creation timestamp may not get us up to date info. Modification time would be much
-		// better if there were such an attribute.
-		sort.Sort(isByAge(imageStreams.Items))
-		for _, is := range imageStreams.Items {
-			if len(is.Status.DockerImageRepository) == 0 {
-				continue
-			}
-			pullSpec = is.Status.DockerImageRepository
-		}
-	}
-
-	if len(pullSpec) == 0 {
-		return "", fmt.Errorf("no managed image found")
-	}
-
-	ref, err := reference.Parse(pullSpec)
-	if err != nil {
-		return "", fmt.Errorf("unable to parse %q: %v", pullSpec, err)
-	}
-
-	if len(ref.Registry) == 0 {
-		return "", fmt.Errorf("%s does not include a registry", pullSpec)
-	}
-
-	return ref.Registry, nil
-}
-
-// RegistryPinger performs a health check against a registry.
-type RegistryPinger interface {
-	// Ping performs a health check against registry. It returns registry url qualified with schema unless an
-	// error occurs.
-	Ping(registry string) (*url.URL, error)
-}
-
-// DefaultRegistryPinger implements RegistryPinger.
-type DefaultRegistryPinger struct {
-	Client   *http.Client
-	Insecure bool
-}
-
-// Ping verifies that the integrated registry is ready, determines its transport protocol and returns its url
-// or error.
-func (drp *DefaultRegistryPinger) Ping(registry string) (*url.URL, error) {
-	var (
-		registryURL *url.URL
-		err         error
-	)
-
-pathLoop:
-	// first try the new default / path, then fall-back to the obsolete /healthz endpoint
-	for _, path := range []string{"/", "/healthz"} {
-		registryURL, err = TryProtocolsWithRegistryURL(registry, drp.Insecure, func(u url.URL) error {
-			u.Path = path
-			healthResponse, err := drp.Client.Get(u.String())
-			if err != nil {
-				return err
-			}
-			defer healthResponse.Body.Close()
-
-			if healthResponse.StatusCode != http.StatusOK {
-				return &retryPath{err: fmt.Errorf("unexpected status: %s", healthResponse.Status)}
-			}
-
-			return nil
-		})
-
-		// determine whether to retry with another endpoint
-		switch t := err.(type) {
-		case *retryPath:
-			// return the nested error if this is the last ping attempt
-			err = t.err
-			continue pathLoop
-		case kerrors.Aggregate:
-			// if any aggregated error indicates a possible retry, do it
-			for _, err := range t.Errors() {
-				if _, ok := err.(*retryPath); ok {
-					continue pathLoop
-				}
-			}
-		}
-
-		break
-	}
-
-	return registryURL, err
-}
-
-// DryRunRegistryPinger implements RegistryPinger.
-type DryRunRegistryPinger struct {
-}
-
-// Ping implements Ping method.
-func (*DryRunRegistryPinger) Ping(registry string) (*url.URL, error) {
-	return url.Parse("https://" + registry)
-}
-
-// TryProtocolsWithRegistryURL runs given action with different protocols until no error is returned. The
-// https protocol is the first attempt. If it fails and allowInsecure is true, http will be the next. Obtained
-// errors will be concatenated and returned.
-func TryProtocolsWithRegistryURL(registry string, allowInsecure bool, action func(registryURL url.URL) error) (*url.URL, error) {
-	var errs []error
-
-	if !strings.Contains(registry, "://") {
-		registry = "unset://" + registry
-	}
-	url, err := url.Parse(registry)
-	if err != nil {
-		return nil, err
-	}
-	var protos []string
-	switch {
-	case len(url.Scheme) > 0 && url.Scheme != "unset":
-		protos = []string{url.Scheme}
-	case allowInsecure || networkutils.IsPrivateAddress(registry):
-		protos = []string{"https", "http"}
-	default:
-		protos = []string{"https"}
-	}
-	registry = url.Host
-
-	for _, proto := range protos {
-		klog.V(4).Infof("Trying protocol %s for the registry URL %s", proto, registry)
-		url.Scheme = proto
-		err := action(*url)
-		if err == nil {
-			return url, nil
-		}
-
-		if err != nil {
-			klog.V(4).Infof("Error with %s for %s: %v", proto, registry, err)
-		}
-
-		if _, ok := err.(*errcode.Errors); ok {
-			// we got a response back from the registry, so return it
-			return url, err
-		}
-		errs = append(errs, err)
-		if proto == "https" && strings.Contains(err.Error(), "server gave HTTP response to HTTPS client") && !allowInsecure {
-			errs = append(errs, fmt.Errorf("\n* Append --force-insecure if you really want to prune the registry using insecure connection."))
-		} else if proto == "http" && strings.Contains(err.Error(), "malformed HTTP response") {
-			errs = append(errs, fmt.Errorf("\n* Are you trying to connect to a TLS-enabled registry without TLS?"))
-		}
-	}
-
-	return nil, kerrors.NewAggregate(errs)
-}
-
-// retryPath is an error indicating that another connection attempt may be retried with a different path
-type retryPath struct{ err error }
-
-func (rp *retryPath) Error() string { return rp.err.Error() }
-
-// ErrBadReference denotes an invalid reference to image, imagestreamtag or imagestreamimage stored in a
-// particular object. The object is identified by kind, namespace and name.
-type ErrBadReference struct {
-	kind       string
-	namespace  string
-	name       string
-	targetKind string
-	reference  string
-	reason     string
-}
-
-func newErrBadReferenceToImage(reference string, obj *corev1.ObjectReference, reason string) error {
-	kind := ""
-	namespace := ""
-	name := ""
-	if obj != nil {
-		kind = obj.Kind
-		namespace = obj.Namespace
-		name = obj.Name
-	}
-
-	return &ErrBadReference{
-		kind:      kind,
-		namespace: namespace,
-		name:      name,
-		reference: reference,
-		reason:    reason,
-	}
-}
-
-func newErrBadReferenceTo(targetKind, reference string, obj *corev1.ObjectReference, reason string) error {
-	return &ErrBadReference{
-		kind:       obj.Kind,
-		namespace:  obj.Namespace,
-		name:       obj.Name,
-		targetKind: targetKind,
-		reference:  reference,
-		reason:     reason,
-	}
-}
-
-func (e *ErrBadReference) Error() string {
-	return e.String()
-}
-
-func (e *ErrBadReference) String() string {
-	name := e.name
-	if len(e.namespace) > 0 {
-		name = e.namespace + "/" + name
-	}
-	targetKind := "container image"
-	if len(e.targetKind) > 0 {
-		targetKind = e.targetKind
-	}
-	return fmt.Sprintf("%s[%s]: invalid %s reference %q: %s", e.kind, name, targetKind, e.reference, e.reason)
-}
-
-func getName(obj runtime.Object) string {
-	accessor, err := kmeta.Accessor(obj)
-	if err != nil {
-		klog.V(4).Infof("Error getting accessor for %#v", obj)
-		return ""
-	}
-	ns := accessor.GetNamespace()
-	if len(ns) == 0 {
-		return accessor.GetName()
-	}
-	return fmt.Sprintf("%s/%s", ns, accessor.GetName())
-}
-
-func getKindName(obj *corev1.ObjectReference) string {
-	if obj == nil {
-		return "unknown object"
-	}
-	name := obj.Name
-	if len(obj.Namespace) > 0 {
-		name = obj.Namespace + "/" + name
-	}
-	return fmt.Sprintf("%s[%s]", obj.Kind, name)
-}
-
-func getRef(obj runtime.Object) *corev1.ObjectReference {
-	ref, err := ref.GetReference(scheme.Scheme, obj)
-	if err != nil {
-		klog.Errorf("failed to get reference to object %T: %v", obj, err)
-		return nil
-	}
-	return ref
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/imageprune/helper_test.go b/vendor/github.com/openshift/oc/pkg/cli/admin/prune/imageprune/helper_test.go
deleted file mode 100644
index 962509b816be..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/imageprune/helper_test.go
+++ /dev/null
@@ -1,217 +0,0 @@
-package imageprune
-
-import (
-	"crypto/tls"
-	"net/http"
-	"net/http/httptest"
-	"reflect"
-	"strings"
-	"sync"
-	"testing"
-
-	"k8s.io/apimachinery/pkg/util/diff"
-	knet "k8s.io/apimachinery/pkg/util/net"
-)
-
-type requestStats struct {
-	lock     sync.Mutex
-	requests []string
-}
-
-func (rs *requestStats) addRequest(r *http.Request) {
-	rs.lock.Lock()
-	defer rs.lock.Unlock()
-	rs.requests = append(rs.requests, r.URL.String())
-}
-func (rs *requestStats) clear() {
-	rs.lock.Lock()
-	defer rs.lock.Unlock()
-	rs.requests = rs.requests[:0]
-}
-func (rs *requestStats) getRequests() []string {
-	rs.lock.Lock()
-	defer rs.lock.Unlock()
-	res := make([]string, 0, len(rs.requests))
-	for _, r := range rs.requests {
-		res = append(res, r)
-	}
-	return res
-}
-
-func TestDefaultImagePinger(t *testing.T) {
-	rs := requestStats{requests: []string{}}
-
-	type statusForPath map[string]int
-
-	for _, tc := range []struct {
-		name                   string
-		schemePrefix           string
-		securedRegistry        bool
-		insecure               bool
-		statusForPath          statusForPath
-		expectedErrorSubstring string
-		expectedRequests       []string
-	}{
-		{
-			name:             "tls secured registry with insecure fallback",
-			securedRegistry:  true,
-			insecure:         true,
-			statusForPath:    statusForPath{"/": http.StatusOK},
-			expectedRequests: []string{"/"},
-		},
-
-		{
-			name:             "tls secured registry prefixed by scheme with insecure fallback",
-			schemePrefix:     "https://",
-			securedRegistry:  true,
-			insecure:         true,
-			statusForPath:    statusForPath{"/": http.StatusOK},
-			expectedRequests: []string{"/"},
-		},
-
-		{
-			name:                   "tls secured registry prefixed by http scheme with insecure fallback",
-			schemePrefix:           "http://",
-			securedRegistry:        true,
-			insecure:               true,
-			statusForPath:          statusForPath{"/": http.StatusOK},
-			expectedErrorSubstring: "unexpected status: 400 Bad Request", // https://github.com/golang/go/issues/23689
-		},
-
-		{
-			name:                   "tls secured registry with no fallback",
-			securedRegistry:        true,
-			insecure:               false,
-			statusForPath:          statusForPath{"/": http.StatusOK, "/healthz": http.StatusOK},
-			expectedErrorSubstring: "x509: certificate signed by unknown authority",
-		},
-
-		{
-			name:             "tls secured registry with old healthz endpoint",
-			securedRegistry:  true,
-			insecure:         true,
-			statusForPath:    statusForPath{"/healthz": http.StatusOK},
-			expectedRequests: []string{"/", "/healthz"},
-		},
-
-		{
-			name:             "insecure registry with insecure fallback",
-			securedRegistry:  false,
-			insecure:         true,
-			statusForPath:    statusForPath{"/": http.StatusOK},
-			expectedRequests: []string{"/"},
-		},
-
-		{
-			name:             "insecure registry prefixed by scheme with insecure fallback",
-			schemePrefix:     "http://",
-			securedRegistry:  false,
-			insecure:         true,
-			statusForPath:    statusForPath{"/": http.StatusOK},
-			expectedRequests: []string{"/"},
-		},
-
-		{
-			name:                   "insecure registry prefixed by https scheme with insecure fallback",
-			schemePrefix:           "https://",
-			securedRegistry:        false,
-			insecure:               true,
-			statusForPath:          statusForPath{"/": http.StatusOK},
-			expectedErrorSubstring: "server gave HTTP response to HTTPS client",
-		},
-
-		{
-			name:                   "insecure registry with no fallback",
-			securedRegistry:        false,
-			statusForPath:          statusForPath{"/": http.StatusOK, "/healthz": http.StatusOK},
-			expectedErrorSubstring: "server gave HTTP response to HTTPS client",
-		},
-
-		{
-			name:             "insecure registry with old healthz endpoint",
-			securedRegistry:  false,
-			insecure:         true,
-			statusForPath:    statusForPath{"/healthz": http.StatusOK},
-			expectedRequests: []string{"/", "/healthz"},
-		},
-
-		{
-			name:                   "initializing insecure registry",
-			securedRegistry:        false,
-			insecure:               true,
-			statusForPath:          statusForPath{},
-			expectedErrorSubstring: "server gave HTTP response to HTTPS client, unexpected status: 404 Not Found",
-			expectedRequests:       []string{"/", "/healthz"},
-		},
-	} {
-		t.Run(tc.name, func(t *testing.T) {
-			defer rs.clear()
-
-			rt := knet.SetTransportDefaults(&http.Transport{
-				TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
-			})
-			insecureClient := http.Client{Transport: rt}
-			secureClient := http.Client{}
-
-			handler := func(w http.ResponseWriter, r *http.Request) {
-				rs.addRequest(r)
-				if s, ok := tc.statusForPath[r.URL.Path]; ok {
-					w.WriteHeader(s)
-				} else {
-					w.WriteHeader(http.StatusNotFound)
-				}
-			}
-
-			var server *httptest.Server
-			if tc.securedRegistry {
-				server = httptest.NewTLSServer(http.HandlerFunc(handler))
-			} else {
-				server = httptest.NewServer(http.HandlerFunc(handler))
-			}
-			defer server.Close()
-			serverHost := strings.TrimLeft(strings.TrimLeft(server.URL, "http://"), "https://")
-
-			client := &secureClient
-			if tc.insecure {
-				client = &insecureClient
-			}
-
-			pinger := DefaultRegistryPinger{
-				Client:   client,
-				Insecure: tc.insecure,
-			}
-
-			registryURL, err := pinger.Ping(tc.schemePrefix + serverHost)
-			if err != nil {
-				if len(tc.expectedErrorSubstring) == 0 {
-					t.Errorf("[%s] got unexpected ping error of type %T: %v", tc.name, err, err)
-				} else if !strings.Contains(err.Error(), tc.expectedErrorSubstring) {
-					t.Errorf("[%s] expected substring %q not found in error message: %s", tc.name, tc.expectedErrorSubstring, err.Error())
-				}
-			} else if len(tc.expectedErrorSubstring) > 0 {
-				t.Errorf("[%s] unexpected non-error", tc.name)
-			}
-
-			e := server.URL
-			if len(tc.expectedErrorSubstring) > 0 {
-				// the pinger should return unchanged input in case of error
-				e = ""
-			}
-			a := ""
-			if registryURL != nil {
-				a = registryURL.String()
-			}
-			if a != e {
-				t.Errorf("[%s] unexpected registry url: %q != %q", tc.name, a, e)
-			}
-
-			ers := tc.expectedRequests
-			if ers == nil {
-				ers = []string{}
-			}
-			if a := rs.getRequests(); !reflect.DeepEqual(a, ers) {
-				t.Errorf("[%s] got unexpected requests: %s", tc.name, diff.ObjectDiff(a, ers))
-			}
-		})
-	}
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/imageprune/prune.go b/vendor/github.com/openshift/oc/pkg/cli/admin/prune/imageprune/prune.go
deleted file mode 100644
index af04b51ade25..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/imageprune/prune.go
+++ /dev/null
@@ -1,1755 +0,0 @@
-package imageprune
-
-import (
-	"encoding/json"
-	"errors"
-	"fmt"
-	"net/http"
-	"net/url"
-	"reflect"
-	"sort"
-	"strings"
-	"time"
-
-	"github.com/docker/distribution/manifest/schema2"
-	"github.com/docker/distribution/registry/api/errcode"
-	gonum "github.com/gonum/graph"
-	"k8s.io/klog"
-
-	kappsv1 "k8s.io/api/apps/v1"
-	corev1 "k8s.io/api/core/v1"
-	kerrapi "k8s.io/apimachinery/pkg/api/errors"
-	"k8s.io/apimachinery/pkg/api/resource"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	kerrors "k8s.io/apimachinery/pkg/util/errors"
-	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
-	"k8s.io/apimachinery/pkg/util/sets"
-	"k8s.io/apimachinery/pkg/watch"
-	"k8s.io/client-go/util/retry"
-
-	appsv1 "github.com/openshift/api/apps/v1"
-	buildv1 "github.com/openshift/api/build/v1"
-	dockerv10 "github.com/openshift/api/image/docker10"
-	imagev1 "github.com/openshift/api/image/v1"
-	imagev1client "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1"
-	"github.com/openshift/library-go/pkg/build/buildutil"
-	"github.com/openshift/library-go/pkg/image/imageutil"
-	"github.com/openshift/library-go/pkg/image/reference"
-	appsgraph "github.com/openshift/oc/pkg/helpers/graph/appsgraph/nodes"
-	buildgraph "github.com/openshift/oc/pkg/helpers/graph/buildgraph/nodes"
-	"github.com/openshift/oc/pkg/helpers/graph/genericgraph"
-	imagegraph "github.com/openshift/oc/pkg/helpers/graph/imagegraph/nodes"
-	kubegraph "github.com/openshift/oc/pkg/helpers/graph/kubegraph/nodes"
-)
-
-// TODO these edges should probably have an `Add***Edges` method in images/graph and be moved there
-const (
-	// ReferencedImageEdgeKind defines a "strong" edge where the tail is an
-	// ImageNode, with strong indicating that the ImageNode tail is not a
-	// candidate for pruning.
-	ReferencedImageEdgeKind = "ReferencedImage"
-	// WeakReferencedImageEdgeKind defines a "weak" edge where the tail is
-	// an ImageNode, with weak indicating that this particular edge does
-	// not keep an ImageNode from being a candidate for pruning.
-	WeakReferencedImageEdgeKind = "WeakReferencedImage"
-
-	// ReferencedImageConfigEdgeKind defines an edge from an ImageStreamNode or an
-	// ImageNode to an ImageComponentNode.
-	ReferencedImageConfigEdgeKind = "ReferencedImageConfig"
-
-	// ReferencedImageLayerEdgeKind defines an edge from an ImageStreamNode or an
-	// ImageNode to an ImageComponentNode.
-	ReferencedImageLayerEdgeKind = "ReferencedImageLayer"
-
-	// ReferencedImageManifestEdgeKind defines an edge from an ImageStreamNode or an
-	// ImageNode to an ImageComponentNode.
-	ReferencedImageManifestEdgeKind = "ReferencedImageManifest"
-
-	defaultPruneImageWorkerCount = 5
-)
-
-// RegistryClientFactoryFunc is a factory function returning a registry client for use in a worker.
-type RegistryClientFactoryFunc func() (*http.Client, error)
-
-//ImagePrunerFactoryFunc is a factory function returning an image deleter for use in a worker.
-type ImagePrunerFactoryFunc func() (ImageDeleter, error)
-
-// FakeRegistryClientFactory is a registry client factory creating no client at all. Useful for dry run.
-func FakeRegistryClientFactory() (*http.Client, error) {
-	return nil, nil
-}
-
-// pruneAlgorithm contains the various settings to use when evaluating images
-// and layers for pruning.
-type pruneAlgorithm struct {
-	keepYoungerThan    time.Time
-	keepTagRevisions   int
-	pruneOverSizeLimit bool
-	namespace          string
-	allImages          bool
-	pruneRegistry      bool
-}
-
-// ImageDeleter knows how to remove images from OpenShift.
-type ImageDeleter interface {
-	// DeleteImage removes the image from OpenShift's storage.
-	DeleteImage(image *imagev1.Image) error
-}
-
-// ImageStreamDeleter knows how to remove an image reference from an image stream.
-type ImageStreamDeleter interface {
-	// GetImageStream returns a fresh copy of an image stream.
-	GetImageStream(stream *imagev1.ImageStream) (*imagev1.ImageStream, error)
-	// UpdateImageStream removes all references to the image from the image
-	// stream's status.tags. The updated image stream is returned.
-	UpdateImageStream(stream *imagev1.ImageStream) (*imagev1.ImageStream, error)
-	// NotifyImageStreamPrune shows notification about updated image stream.
-	NotifyImageStreamPrune(stream *imagev1.ImageStream, updatedTags []string, deletedTags []string)
-}
-
-// BlobDeleter knows how to delete a blob from the container image registry.
-type BlobDeleter interface {
-	// DeleteBlob uses registryClient to ask the registry at registryURL
-	// to remove the blob.
-	DeleteBlob(registryClient *http.Client, registryURL *url.URL, blob string) error
-}
-
-// LayerLinkDeleter knows how to delete a repository layer link from the container image registry.
-type LayerLinkDeleter interface {
-	// DeleteLayerLink uses registryClient to ask the registry at registryURL to
-	// delete the repository layer link.
-	DeleteLayerLink(registryClient *http.Client, registryURL *url.URL, repo, linkName string) error
-}
-
-// ManifestDeleter knows how to delete image manifest data for a repository from
-// the container image registry.
-type ManifestDeleter interface {
-	// DeleteManifest uses registryClient to ask the registry at registryURL to
-	// delete the repository's image manifest data.
-	DeleteManifest(registryClient *http.Client, registryURL *url.URL, repo, manifest string) error
-}
-
-// PrunerOptions contains the fields used to initialize a new Pruner.
-type PrunerOptions struct {
-	// KeepYoungerThan indicates the minimum age an Image must be to be a
-	// candidate for pruning.
-	KeepYoungerThan *time.Duration
-	// KeepTagRevisions is the minimum number of tag revisions to preserve;
-	// revisions older than this value are candidates for pruning.
-	KeepTagRevisions *int
-	// PruneOverSizeLimit indicates that images exceeding defined limits (openshift.io/Image)
-	// will be considered as candidates for pruning.
-	PruneOverSizeLimit *bool
-	// AllImages considers all images for pruning, not just those pushed directly to the registry.
-	AllImages *bool
-	// PruneRegistry controls whether to both prune the API Objects in etcd and corresponding
-	// data in the registry, or just prune the API Object and defer on the corresponding data in
-	// the registry
-	PruneRegistry *bool
-	// Namespace to be pruned, if specified it should never remove Images.
-	Namespace string
-	// Images is the entire list of images in OpenShift. An image must be in this
-	// list to be a candidate for pruning.
-	Images *imagev1.ImageList
-	// ImageWatcher watches for image changes.
-	ImageWatcher watch.Interface
-	// Streams is the entire list of image streams across all namespaces in the
-	// cluster.
-	Streams *imagev1.ImageStreamList
-	// StreamWatcher watches for stream changes.
-	StreamWatcher watch.Interface
-	// Pods is the entire list of pods across all namespaces in the cluster.
-	Pods *corev1.PodList
-	// RCs is the entire list of replication controllers across all namespaces in
-	// the cluster.
-	RCs *corev1.ReplicationControllerList
-	// BCs is the entire list of build configs across all namespaces in the
-	// cluster.
-	BCs *buildv1.BuildConfigList
-	// Builds is the entire list of builds across all namespaces in the cluster.
-	Builds *buildv1.BuildList
-	// DSs is the entire list of daemon sets across all namespaces in the cluster.
-	DSs *kappsv1.DaemonSetList
-	// Deployments is the entire list of kube's deployments across all namespaces in the cluster.
-	Deployments *kappsv1.DeploymentList
-	// DCs is the entire list of deployment configs across all namespaces in the cluster.
-	DCs *appsv1.DeploymentConfigList
-	// RSs is the entire list of replica sets across all namespaces in the cluster.
-	RSs *kappsv1.ReplicaSetList
-	// LimitRanges is a map of LimitRanges across namespaces, being keys in this map.
-	LimitRanges map[string][]*corev1.LimitRange
-	// DryRun indicates that no changes will be made to the cluster and nothing
-	// will be removed.
-	DryRun bool
-	// RegistryClient is the http.Client to use when contacting the registry.
-	RegistryClientFactory RegistryClientFactoryFunc
-	// RegistryURL is the URL of the integrated container image registry.
-	RegistryURL *url.URL
-	// IgnoreInvalidRefs indicates that all invalid references should be ignored.
-	IgnoreInvalidRefs bool
-	// NumWorkers is a desired number of workers concurrently handling image prune jobs. If less than 1, the
-	// default number of workers will be spawned.
-	NumWorkers int
-}
-
-// Pruner knows how to prune istags, images, manifest, layers, image configs and blobs.
-type Pruner interface {
-	// Prune uses imagePruner, streamPruner, layerLinkPruner, blobPruner, and
-	// manifestPruner to remove images that have been identified as candidates
-	// for pruning based on the Pruner's internal pruning algorithm.
-	// Please see NewPruner for details on the algorithm.
-	Prune(
-		imagePrunerFactory ImagePrunerFactoryFunc,
-		streamPruner ImageStreamDeleter,
-		layerLinkPruner LayerLinkDeleter,
-		blobPruner BlobDeleter,
-		manifestPruner ManifestDeleter,
-	) (deletions []Deletion, failures []Failure)
-}
-
-// pruner is an object that knows how to prune a data set
-type pruner struct {
-	g                     genericgraph.Graph
-	algorithm             pruneAlgorithm
-	ignoreInvalidRefs     bool
-	registryClientFactory RegistryClientFactoryFunc
-	registryURL           *url.URL
-	imageWatcher          watch.Interface
-	imageStreamWatcher    watch.Interface
-	imageStreamLimits     map[string][]*corev1.LimitRange
-	// sorted queue of images to prune; nil stands for empty queue
-	queue *nodeItem
-	// contains prunable images removed from queue that are currently being processed
-	processedImages map[*imagegraph.ImageNode]*Job
-	numWorkers      int
-}
-
-var _ Pruner = &pruner{}
-
-// NewPruner creates a Pruner.
-//
-// Images younger than keepYoungerThan and images referenced by image streams
-// and/or pods younger than keepYoungerThan are preserved. All other images are
-// candidates for pruning. For example, if keepYoungerThan is 60m, and an
-// ImageStream is only 59 minutes old, none of the images it references are
-// eligible for pruning.
-//
-// keepTagRevisions is the number of revisions per tag in an image stream's
-// status.tags that are preserved and ineligible for pruning. Any revision older
-// than keepTagRevisions is eligible for pruning.
-//
-// pruneOverSizeLimit is a boolean flag speyfing that all images exceeding limits
-// defined in their namespace will be considered for pruning. Important to note is
-// the fact that this flag does not work in any combination with the keep* flags.
-//
-// images, streams, pods, rcs, bcs, builds, daemonsets and dcs are the resources used to run
-// the pruning algorithm. These should be the full list for each type from the
-// cluster; otherwise, the pruning algorithm might result in incorrect
-// calculations and premature pruning.
-//
-// The ImageDeleter performs the following logic:
-//
-// remove any image that was created at least *n* minutes ago and is *not*
-// currently referenced by:
-//
-// - any pod created less than *n* minutes ago
-// - any image stream created less than *n* minutes ago
-// - any running pods
-// - any pending pods
-// - any replication controllers
-// - any daemonsets
-// - any kube deployments
-// - any deployment configs
-// - any replica sets
-// - any build configs
-// - any builds
-// - the n most recent tag revisions in an image stream's status.tags
-//
-// including only images with the annotation openshift.io/image.managed=true
-// unless allImages is true.
-//
-// When removing an image, remove all references to the image from all
-// ImageStreams having a reference to the image in `status.tags`.
-//
-// Also automatically remove any image layer that is no longer referenced by any
-// images.
-func NewPruner(options PrunerOptions) (Pruner, kerrors.Aggregate) {
-	klog.V(1).Infof("Creating image pruner with keepYoungerThan=%v, keepTagRevisions=%s, pruneOverSizeLimit=%s, allImages=%s",
-		options.KeepYoungerThan, getValue(options.KeepTagRevisions), getValue(options.PruneOverSizeLimit), getValue(options.AllImages))
-
-	algorithm := pruneAlgorithm{}
-	if options.KeepYoungerThan != nil {
-		algorithm.keepYoungerThan = metav1.Now().Add(-*options.KeepYoungerThan)
-	}
-	if options.KeepTagRevisions != nil {
-		algorithm.keepTagRevisions = *options.KeepTagRevisions
-	}
-	if options.PruneOverSizeLimit != nil {
-		algorithm.pruneOverSizeLimit = *options.PruneOverSizeLimit
-	}
-	algorithm.allImages = true
-	if options.AllImages != nil {
-		algorithm.allImages = *options.AllImages
-	}
-	algorithm.pruneRegistry = true
-	if options.PruneRegistry != nil {
-		algorithm.pruneRegistry = *options.PruneRegistry
-	}
-	algorithm.namespace = options.Namespace
-
-	p := &pruner{
-		algorithm:             algorithm,
-		ignoreInvalidRefs:     options.IgnoreInvalidRefs,
-		registryClientFactory: options.RegistryClientFactory,
-		registryURL:           options.RegistryURL,
-		processedImages:       make(map[*imagegraph.ImageNode]*Job),
-		imageWatcher:          options.ImageWatcher,
-		imageStreamWatcher:    options.StreamWatcher,
-		imageStreamLimits:     options.LimitRanges,
-		numWorkers:            options.NumWorkers,
-	}
-
-	if p.numWorkers < 1 {
-		p.numWorkers = defaultPruneImageWorkerCount
-	}
-
-	if err := p.buildGraph(options); err != nil {
-		return nil, err
-	}
-
-	return p, nil
-}
-
-// buildGraph builds a graph
-func (p *pruner) buildGraph(options PrunerOptions) kerrors.Aggregate {
-	p.g = genericgraph.New()
-
-	var errs []error
-
-	errs = append(errs, p.addImagesToGraph(options.Images)...)
-	errs = append(errs, p.addImageStreamsToGraph(options.Streams, options.LimitRanges)...)
-	errs = append(errs, p.addPodsToGraph(options.Pods)...)
-	errs = append(errs, p.addReplicationControllersToGraph(options.RCs)...)
-	errs = append(errs, p.addBuildConfigsToGraph(options.BCs)...)
-	errs = append(errs, p.addBuildsToGraph(options.Builds)...)
-	errs = append(errs, p.addDaemonSetsToGraph(options.DSs)...)
-	errs = append(errs, p.addDeploymentsToGraph(options.Deployments)...)
-	errs = append(errs, p.addDeploymentConfigsToGraph(options.DCs)...)
-	errs = append(errs, p.addReplicaSetsToGraph(options.RSs)...)
-
-	return kerrors.NewAggregate(errs)
-}
-
-func getValue(option interface{}) string {
-	if v := reflect.ValueOf(option); !v.IsNil() {
-		return fmt.Sprintf("%v", v.Elem())
-	}
-	return ""
-}
-
-// addImagesToGraph adds all images, their manifests and their layers to the graph.
-func (p *pruner) addImagesToGraph(images *imagev1.ImageList) []error {
-	var errs []error
-	for i := range images.Items {
-		image := &images.Items[i]
-
-		klog.V(4).Infof("Adding image %q to graph", image.Name)
-		imageNode := imagegraph.EnsureImageNode(p.g, image)
-
-		if err := imageutil.ImageWithMetadata(image); err != nil {
-			klog.V(1).Infof("Failed to read image metadata for image %s: %v", image.Name, err)
-			errs = append(errs, err)
-			continue
-		}
-		dockerImage, ok := image.DockerImageMetadata.Object.(*dockerv10.DockerImage)
-		if !ok {
-			klog.V(1).Infof("Failed to read image metadata for image %s", image.Name)
-			errs = append(errs, fmt.Errorf("Failed to read image metadata for image %s", image.Name))
-			continue
-		}
-		if image.DockerImageManifestMediaType == schema2.MediaTypeManifest && len(dockerImage.ID) > 0 {
-			configName := dockerImage.ID
-			klog.V(4).Infof("Adding image config %q to graph", configName)
-			configNode := imagegraph.EnsureImageComponentConfigNode(p.g, configName)
-			p.g.AddEdge(imageNode, configNode, ReferencedImageConfigEdgeKind)
-		}
-
-		for _, layer := range image.DockerImageLayers {
-			klog.V(4).Infof("Adding image layer %q to graph", layer.Name)
-			layerNode := imagegraph.EnsureImageComponentLayerNode(p.g, layer.Name)
-			p.g.AddEdge(imageNode, layerNode, ReferencedImageLayerEdgeKind)
-		}
-
-		klog.V(4).Infof("Adding image manifest %q to graph", image.Name)
-		manifestNode := imagegraph.EnsureImageComponentManifestNode(p.g, image.Name)
-		p.g.AddEdge(imageNode, manifestNode, ReferencedImageManifestEdgeKind)
-	}
-
-	return errs
-}
-
-// addImageStreamsToGraph adds all the streams to the graph. The most recent n
-// image revisions for a tag will be preserved, where n is specified by the
-// algorithm's keepTagRevisions. Image revisions older than n are candidates
-// for pruning if the image stream's age is at least as old as the minimum
-// threshold in algorithm.  Otherwise, if the image stream is younger than the
-// threshold, all image revisions for that stream are ineligible for pruning.
-// If pruneOverSizeLimit flag is set to true, above does not matter, instead
-// all images size is checked against LimitRanges defined in that same namespace,
-// and whenever its size exceeds the smallest limit in that namespace, it will be
-// considered a candidate for pruning.
-//
-// addImageStreamsToGraph also adds references from each stream to all the
-// layers it references (via each image a stream references).
-func (p *pruner) addImageStreamsToGraph(streams *imagev1.ImageStreamList, limits map[string][]*corev1.LimitRange) []error {
-	for i := range streams.Items {
-		stream := &streams.Items[i]
-
-		klog.V(4).Infof("Examining ImageStream %s", getName(stream))
-
-		// use a weak reference for old image revisions by default
-		oldImageRevisionReferenceKind := WeakReferencedImageEdgeKind
-
-		if !p.algorithm.pruneOverSizeLimit && stream.CreationTimestamp.Time.After(p.algorithm.keepYoungerThan) {
-			// stream's age is below threshold - use a strong reference for old image revisions instead
-			oldImageRevisionReferenceKind = ReferencedImageEdgeKind
-		}
-
-		klog.V(4).Infof("Adding ImageStream %s to graph", getName(stream))
-		isNode := imagegraph.EnsureImageStreamNode(p.g, stream)
-		imageStreamNode := isNode.(*imagegraph.ImageStreamNode)
-
-		for _, tag := range stream.Status.Tags {
-			istNode := imagegraph.EnsureImageStreamTagNode(p.g, makeISTagWithStream(stream, tag.Tag))
-
-			for i, tagEvent := range tag.Items {
-				imageNode := imagegraph.FindImage(p.g, tag.Items[i].Image)
-				if imageNode == nil {
-					klog.V(2).Infof("Unable to find image %q in graph (from tag=%q, revision=%d, dockerImageReference=%s) - skipping",
-						tag.Items[i].Image, tag.Tag, tagEvent.Generation, tag.Items[i].DockerImageReference)
-					continue
-				}
-
-				kind := oldImageRevisionReferenceKind
-				if p.algorithm.pruneOverSizeLimit {
-					if exceedsLimits(stream, imageNode.Image, limits) {
-						kind = WeakReferencedImageEdgeKind
-					} else {
-						kind = ReferencedImageEdgeKind
-					}
-				} else {
-					if i < p.algorithm.keepTagRevisions {
-						kind = ReferencedImageEdgeKind
-					}
-				}
-
-				if i == 0 {
-					klog.V(4).Infof("Adding edge (kind=%s) from %q to %q", kind, istNode.UniqueName(), imageNode.UniqueName())
-					p.g.AddEdge(istNode, imageNode, kind)
-				}
-
-				klog.V(4).Infof("Checking for existing strong reference from stream %s to image %s", getName(stream), imageNode.Image.Name)
-				if edge := p.g.Edge(imageStreamNode, imageNode); edge != nil && p.g.EdgeKinds(edge).Has(ReferencedImageEdgeKind) {
-					klog.V(4).Infof("Strong reference found")
-					continue
-				}
-
-				klog.V(4).Infof("Adding edge (kind=%s) from %q to %q", kind, imageStreamNode.UniqueName(), imageNode.UniqueName())
-				p.g.AddEdge(imageStreamNode, imageNode, kind)
-
-				klog.V(4).Infof("Adding stream->(layer|config) references")
-				// add stream -> layer references so we can prune them later
-				for _, s := range p.g.From(imageNode) {
-					cn, ok := s.(*imagegraph.ImageComponentNode)
-					if !ok {
-						continue
-					}
-
-					klog.V(4).Infof("Adding reference from stream %s to %s", getName(stream), cn.Describe())
-					switch cn.Type {
-					case imagegraph.ImageComponentTypeConfig:
-						p.g.AddEdge(imageStreamNode, s, ReferencedImageConfigEdgeKind)
-					case imagegraph.ImageComponentTypeLayer:
-						p.g.AddEdge(imageStreamNode, s, ReferencedImageLayerEdgeKind)
-					case imagegraph.ImageComponentTypeManifest:
-						p.g.AddEdge(imageStreamNode, s, ReferencedImageManifestEdgeKind)
-					default:
-						utilruntime.HandleError(fmt.Errorf("internal error: unhandled image component type %q", cn.Type))
-					}
-				}
-			}
-		}
-	}
-
-	return nil
-}
-
-// exceedsLimits checks if given image exceeds LimitRanges defined in ImageStream's namespace.
-func exceedsLimits(is *imagev1.ImageStream, image *imagev1.Image, limits map[string][]*corev1.LimitRange) bool {
-	limitRanges, ok := limits[is.Namespace]
-	if !ok || len(limitRanges) == 0 {
-		return false
-	}
-
-	if err := imageutil.ImageWithMetadata(image); err != nil {
-		return false
-	}
-	dockerImage, ok := image.DockerImageMetadata.Object.(*dockerv10.DockerImage)
-	if !ok {
-		return false
-	}
-	imageSize := resource.NewQuantity(dockerImage.Size, resource.BinarySI)
-	for _, limitRange := range limitRanges {
-		if limitRange == nil {
-			continue
-		}
-		for _, limit := range limitRange.Spec.Limits {
-			if limit.Type != imagev1.LimitTypeImage {
-				continue
-			}
-
-			limitQuantity, ok := limit.Max[corev1.ResourceStorage]
-			if !ok {
-				continue
-			}
-			if limitQuantity.Cmp(*imageSize) < 0 {
-				// image size is larger than the permitted limit range max size
-				klog.V(4).Infof("Image %s in stream %s exceeds limit %s: %v vs %v",
-					image.Name, getName(is), limitRange.Name, *imageSize, limitQuantity)
-				return true
-			}
-		}
-	}
-	return false
-}
-
-// addPodsToGraph adds pods to the graph.
-//
-// Edges are added to the graph from each pod to the images specified by that
-// pod's list of containers, as long as the image is managed by OpenShift.
-func (p *pruner) addPodsToGraph(pods *corev1.PodList) []error {
-	var errs []error
-
-	for i := range pods.Items {
-		pod := &pods.Items[i]
-
-		desc := fmt.Sprintf("Pod %s", getName(pod))
-		klog.V(4).Infof("Examining %s", desc)
-
-		// A pod is only *excluded* from being added to the graph if its phase is not
-		// pending or running. Additionally, it has to be at least as old as the minimum
-		// age threshold defined by the algorithm.
-		if pod.Status.Phase != corev1.PodRunning && pod.Status.Phase != corev1.PodPending {
-			if !pod.CreationTimestamp.Time.After(p.algorithm.keepYoungerThan) {
-				klog.V(4).Infof("Ignoring %s for image reference counting because it's not running/pending and is too old", desc)
-				continue
-			}
-		}
-
-		klog.V(4).Infof("Adding %s to graph", desc)
-		podNode := kubegraph.EnsurePodNode(p.g, pod)
-
-		errs = append(errs, p.addPodSpecToGraph(getRef(pod), &pod.Spec, podNode)...)
-	}
-
-	return errs
-}
-
-// Edges are added to the graph from each predecessor (pod or replication
-// controller) to the images specified by the pod spec's list of containers, as
-// long as the image is managed by OpenShift.
-func (p *pruner) addPodSpecToGraph(referrer *corev1.ObjectReference, spec *corev1.PodSpec, predecessor gonum.Node) []error {
-	var errs []error
-
-	for j := range spec.Containers {
-		container := spec.Containers[j]
-
-		if len(strings.TrimSpace(container.Image)) == 0 {
-			klog.V(4).Infof("Ignoring edge from %s because container has no reference to image", getKindName(referrer))
-			continue
-		}
-
-		klog.V(4).Infof("Examining container image %q", container.Image)
-
-		ref, err := reference.Parse(container.Image)
-		if err != nil {
-			klog.Warningf("Unable to parse DockerImageReference %q of %s: %v - skipping", container.Image, getKindName(referrer), err)
-			if !p.ignoreInvalidRefs {
-				errs = append(errs, newErrBadReferenceToImage(container.Image, referrer, err.Error()))
-			}
-			continue
-		}
-
-		if len(ref.ID) == 0 {
-			// Attempt to dereference istag. Since we cannot be sure whether the reference refers to the
-			// integrated registry or not, we ignore the host part completely. As a consequence, we may keep
-			// image otherwise sentenced for a removal just because its pull spec accidentally matches one of
-			// our imagestreamtags.
-
-			// set the tag if empty
-			ref = ref.DockerClientDefaults()
-			klog.V(4).Infof("%q has no image ID", container.Image)
-			node := p.g.Find(imagegraph.ImageStreamTagNodeName(makeISTag(ref.Namespace, ref.Name, ref.Tag)))
-			if node == nil {
-				klog.V(4).Infof("No image stream tag found for %q - skipping", container.Image)
-				continue
-			}
-			for _, n := range p.g.From(node) {
-				imgNode, ok := n.(*imagegraph.ImageNode)
-				if !ok {
-					continue
-				}
-				klog.V(4).Infof("Adding edge from pod to image %q referenced by %s:%s", imgNode.Image.Name, ref.RepositoryName(), ref.Tag)
-				p.g.AddEdge(predecessor, imgNode, ReferencedImageEdgeKind)
-			}
-			continue
-		}
-
-		imageNode := imagegraph.FindImage(p.g, ref.ID)
-		if imageNode == nil {
-			klog.V(2).Infof("Unable to find image %q referenced by %s in the graph - skipping", ref.ID, getKindName(referrer))
-			continue
-		}
-
-		klog.V(4).Infof("Adding edge from %s to image %v", getKindName(referrer), imageNode)
-		p.g.AddEdge(predecessor, imageNode, ReferencedImageEdgeKind)
-	}
-
-	return errs
-}
-
-// addReplicationControllersToGraph adds replication controllers to the graph.
-//
-// Edges are added to the graph from each replication controller to the images
-// specified by its pod spec's list of containers, as long as the image is
-// managed by OpenShift.
-func (p *pruner) addReplicationControllersToGraph(rcs *corev1.ReplicationControllerList) []error {
-	var errs []error
-
-	for i := range rcs.Items {
-		rc := &rcs.Items[i]
-		desc := fmt.Sprintf("ReplicationController %s", getName(rc))
-		klog.V(4).Infof("Examining %s", desc)
-		rcNode := kubegraph.EnsureReplicationControllerNode(p.g, rc)
-		errs = append(errs, p.addPodSpecToGraph(getRef(rc), &rc.Spec.Template.Spec, rcNode)...)
-	}
-
-	return errs
-}
-
-// addDaemonSetsToGraph adds daemon set to the graph.
-//
-// Edges are added to the graph from each daemon set to the images specified by its pod spec's list of
-// containers, as long as the image is managed by OpenShift.
-func (p *pruner) addDaemonSetsToGraph(dss *kappsv1.DaemonSetList) []error {
-	var errs []error
-
-	for i := range dss.Items {
-		ds := &dss.Items[i]
-		desc := fmt.Sprintf("DaemonSet %s", getName(ds))
-		klog.V(4).Infof("Examining %s", desc)
-		dsNode := kubegraph.EnsureDaemonSetNode(p.g, ds)
-		errs = append(errs, p.addPodSpecToGraph(getRef(ds), &ds.Spec.Template.Spec, dsNode)...)
-	}
-
-	return errs
-}
-
-// addDeploymentsToGraph adds kube's deployments to the graph.
-//
-// Edges are added to the graph from each deployment to the images specified by its pod spec's list of
-// containers, as long as the image is managed by OpenShift.
-func (p *pruner) addDeploymentsToGraph(dmnts *kappsv1.DeploymentList) []error {
-	var errs []error
-
-	for i := range dmnts.Items {
-		d := &dmnts.Items[i]
-		ref := getRef(d)
-		klog.V(4).Infof("Examining %s", getKindName(ref))
-		dNode := kubegraph.EnsureDeploymentNode(p.g, d)
-		errs = append(errs, p.addPodSpecToGraph(ref, &d.Spec.Template.Spec, dNode)...)
-	}
-
-	return errs
-}
-
-// addDeploymentConfigsToGraph adds deployment configs to the graph.
-//
-// Edges are added to the graph from each deployment config to the images
-// specified by its pod spec's list of containers, as long as the image is
-// managed by OpenShift.
-func (p *pruner) addDeploymentConfigsToGraph(dcs *appsv1.DeploymentConfigList) []error {
-	var errs []error
-
-	for i := range dcs.Items {
-		dc := &dcs.Items[i]
-		ref := getRef(dc)
-		klog.V(4).Infof("Examining %s", getKindName(ref))
-		dcNode := appsgraph.EnsureDeploymentConfigNode(p.g, dc)
-		errs = append(errs, p.addPodSpecToGraph(getRef(dc), &dc.Spec.Template.Spec, dcNode)...)
-	}
-
-	return errs
-}
-
-// addReplicaSetsToGraph adds replica set to the graph.
-//
-// Edges are added to the graph from each replica set to the images specified by its pod spec's list of
-// containers, as long as the image is managed by OpenShift.
-func (p *pruner) addReplicaSetsToGraph(rss *kappsv1.ReplicaSetList) []error {
-	var errs []error
-
-	for i := range rss.Items {
-		rs := &rss.Items[i]
-		ref := getRef(rs)
-		klog.V(4).Infof("Examining %s", getKindName(ref))
-		rsNode := kubegraph.EnsureReplicaSetNode(p.g, rs)
-		errs = append(errs, p.addPodSpecToGraph(ref, &rs.Spec.Template.Spec, rsNode)...)
-	}
-
-	return errs
-}
-
-// addBuildConfigsToGraph adds build configs to the graph.
-//
-// Edges are added to the graph from each build config to the image specified by its strategy.from.
-func (p *pruner) addBuildConfigsToGraph(bcs *buildv1.BuildConfigList) []error {
-	var errs []error
-
-	for i := range bcs.Items {
-		bc := &bcs.Items[i]
-		ref := getRef(bc)
-		klog.V(4).Infof("Examining %s", getKindName(ref))
-		bcNode := buildgraph.EnsureBuildConfigNode(p.g, bc)
-		errs = append(errs, p.addBuildStrategyImageReferencesToGraph(ref, bc.Spec.Strategy, bcNode)...)
-	}
-
-	return errs
-}
-
-// addBuildsToGraph adds builds to the graph.
-//
-// Edges are added to the graph from each build to the image specified by its strategy.from.
-func (p *pruner) addBuildsToGraph(builds *buildv1.BuildList) []error {
-	var errs []error
-
-	for i := range builds.Items {
-		build := &builds.Items[i]
-		ref := getRef(build)
-		klog.V(4).Infof("Examining %s", getKindName(ref))
-		buildNode := buildgraph.EnsureBuildNode(p.g, build)
-		errs = append(errs, p.addBuildStrategyImageReferencesToGraph(ref, build.Spec.Strategy, buildNode)...)
-	}
-
-	return errs
-}
-
-// resolveISTagName parses  and tries to find it in the graph. If the parsing fails,
-// an error is returned. If the istag cannot be found, nil is returned.
-func (p *pruner) resolveISTagName(g genericgraph.Graph, referrer *corev1.ObjectReference, istagName string) (*imagegraph.ImageStreamTagNode, error) {
-	name, tag, err := imageutil.ParseImageStreamTagName(istagName)
-	if err != nil {
-		if p.ignoreInvalidRefs {
-			klog.Warningf("Failed to parse ImageStreamTag name %q: %v", istagName, err)
-			return nil, nil
-		}
-		return nil, newErrBadReferenceTo("ImageStreamTag", istagName, referrer, err.Error())
-	}
-	node := g.Find(imagegraph.ImageStreamTagNodeName(makeISTag(referrer.Namespace, name, tag)))
-	if istNode, ok := node.(*imagegraph.ImageStreamTagNode); ok {
-		return istNode, nil
-	}
-
-	return nil, nil
-}
-
-// addBuildStrategyImageReferencesToGraph ads references from the build strategy's parent node to the image
-// the build strategy references.
-//
-// Edges are added to the graph from each predecessor (build or build config)
-// to the image specified by strategy.from, as long as the image is managed by
-// OpenShift.
-func (p *pruner) addBuildStrategyImageReferencesToGraph(referrer *corev1.ObjectReference, strategy buildv1.BuildStrategy, predecessor gonum.Node) []error {
-	from := buildutil.GetInputReference(strategy)
-	if from == nil {
-		klog.V(4).Infof("Unable to determine 'from' reference - skipping")
-		return nil
-	}
-
-	klog.V(4).Infof("Examining build strategy with from: %#v", from)
-
-	var imageID string
-
-	switch from.Kind {
-	case "DockerImage":
-		if len(strings.TrimSpace(from.Name)) == 0 {
-			klog.V(4).Infof("Ignoring edge from %s because build strategy has no reference to image", getKindName(referrer))
-			return nil
-		}
-		ref, err := reference.Parse(from.Name)
-		if err != nil {
-			klog.Warningf("Failed to parse DockerImage name %q of %s: %v", from.Name, getKindName(referrer), err)
-			if !p.ignoreInvalidRefs {
-				return []error{newErrBadReferenceToImage(from.Name, referrer, err.Error())}
-			}
-			return nil
-		}
-		imageID = ref.ID
-
-	case "ImageStreamImage":
-		_, id, err := imageutil.ParseImageStreamImageName(from.Name)
-		if err != nil {
-			klog.Warningf("Failed to parse ImageStreamImage name %q of %s: %v", from.Name, getKindName(referrer), err)
-			if !p.ignoreInvalidRefs {
-				return []error{newErrBadReferenceTo("ImageStreamImage", from.Name, referrer, err.Error())}
-			}
-			return nil
-		}
-		imageID = id
-
-	case "ImageStreamTag":
-		istNode, err := p.resolveISTagName(p.g, referrer, from.Name)
-		if err != nil {
-			klog.V(4).Infof(err.Error())
-			return []error{err}
-		}
-		if istNode == nil {
-			klog.V(2).Infof("%s referenced by %s could not be found", getKindName(from), getKindName(referrer))
-			return nil
-		}
-		for _, n := range p.g.From(istNode) {
-			imgNode, ok := n.(*imagegraph.ImageNode)
-			if !ok {
-				continue
-			}
-			imageID = imgNode.Image.Name
-			break
-		}
-		if len(imageID) == 0 {
-			klog.V(4).Infof("No image referenced by %s found", getKindName(from))
-			return nil
-		}
-
-	default:
-		klog.V(4).Infof("Ignoring unrecognized source location %q in %s", getKindName(from), getKindName(referrer))
-		return nil
-	}
-
-	klog.V(4).Infof("Looking for image %q in graph", imageID)
-	imageNode := imagegraph.FindImage(p.g, imageID)
-	if imageNode == nil {
-		klog.V(2).Infof("Unable to find image %q in graph referenced by %s - skipping", imageID, getKindName(referrer))
-		return nil
-	}
-
-	klog.V(4).Infof("Adding edge from %s to image %s", predecessor, imageNode.Image.Name)
-	p.g.AddEdge(predecessor, imageNode, ReferencedImageEdgeKind)
-
-	return nil
-}
-
-func (p *pruner) handleImageStreamEvent(event watch.Event) {
-	getIsNode := func() (*imagev1.ImageStream, *imagegraph.ImageStreamNode) {
-		is, ok := event.Object.(*imagev1.ImageStream)
-		if !ok {
-			utilruntime.HandleError(fmt.Errorf("internal error: expected ImageStream object in %s event, not %T", event.Type, event.Object))
-			return nil, nil
-		}
-		n := p.g.Find(imagegraph.ImageStreamNodeName(is))
-		if isNode, ok := n.(*imagegraph.ImageStreamNode); ok {
-			return is, isNode
-		}
-		return is, nil
-	}
-
-	// NOTE: an addition of an imagestream previously deleted from the graph is a noop due to a limitation of
-	// the current gonum/graph package
-	switch event.Type {
-	case watch.Added:
-		is, isNode := getIsNode()
-		if is == nil {
-			return
-		}
-		if isNode != nil {
-			klog.V(4).Infof("Ignoring added ImageStream %s that is already present in the graph", getName(is))
-			return
-		}
-		klog.V(4).Infof("Adding ImageStream %s to the graph", getName(is))
-		p.addImageStreamsToGraph(&imagev1.ImageStreamList{Items: []imagev1.ImageStream{*is}}, p.imageStreamLimits)
-
-	case watch.Modified:
-		is, isNode := getIsNode()
-		if is == nil {
-			return
-		}
-
-		if isNode != nil {
-			klog.V(4).Infof("Removing updated ImageStream %s from the graph", getName(is))
-			// first remove the current node if present
-			p.g.RemoveNode(isNode)
-		}
-
-		klog.V(4).Infof("Adding updated ImageStream %s back to the graph", getName(is))
-		p.addImageStreamsToGraph(&imagev1.ImageStreamList{Items: []imagev1.ImageStream{*is}}, p.imageStreamLimits)
-	}
-}
-
-func (p *pruner) handleImageEvent(event watch.Event) {
-	getImageNode := func() (*imagev1.Image, *imagegraph.ImageNode) {
-		img, ok := event.Object.(*imagev1.Image)
-		if !ok {
-			utilruntime.HandleError(fmt.Errorf("internal error: expected Image object in %s event, not %T", event.Type, event.Object))
-			return nil, nil
-		}
-		return img, imagegraph.FindImage(p.g, img.Name)
-	}
-
-	switch event.Type {
-	// NOTE: an addition of an image previously deleted from the graph is a noop due to a limitation of the
-	// current gonum/graph package
-	case watch.Added:
-		img, imgNode := getImageNode()
-		if img == nil {
-			return
-		}
-		if imgNode != nil {
-			klog.V(4).Infof("Ignoring added Image %s that is already present in the graph", img.Name)
-			return
-		}
-		klog.V(4).Infof("Adding new Image %s to the graph", img.Name)
-		p.addImagesToGraph(&imagev1.ImageList{Items: []imagev1.Image{*img}})
-
-	case watch.Deleted:
-		img, imgNode := getImageNode()
-		if imgNode == nil {
-			klog.V(4).Infof("Ignoring event for deleted Image %s that is not present in the graph", img.Name)
-			return
-		}
-		klog.V(4).Infof("Removing deleted image %s from the graph", img.Name)
-		p.g.RemoveNode(imgNode)
-	}
-}
-
-// getImageNodes returns only nodes of type ImageNode.
-func getImageNodes(nodes []gonum.Node) map[string]*imagegraph.ImageNode {
-	ret := make(map[string]*imagegraph.ImageNode)
-	for i := range nodes {
-		if node, ok := nodes[i].(*imagegraph.ImageNode); ok {
-			ret[node.Image.Name] = node
-		}
-	}
-	return ret
-}
-
-// edgeKind returns true if the edge from "from" to "to" is of the desired kind.
-func edgeKind(g genericgraph.Graph, from, to gonum.Node, desiredKind string) bool {
-	edge := g.Edge(from, to)
-	kinds := g.EdgeKinds(edge)
-	return kinds.Has(desiredKind)
-}
-
-// imageIsPrunable returns true if the image node only has weak references
-// from its predecessors to it. A weak reference to an image is a reference
-// from an image stream to an image where the image is not the current image
-// for a tag and the image stream is at least as old as the minimum pruning
-// age.
-func imageIsPrunable(g genericgraph.Graph, imageNode *imagegraph.ImageNode, algorithm pruneAlgorithm) bool {
-	if !algorithm.allImages {
-		if imageNode.Image.Annotations[imagev1.ManagedByOpenShiftAnnotation] != "true" {
-			klog.V(4).Infof("Image %q with DockerImageReference %q belongs to an external registry - skipping",
-				imageNode.Image.Name, imageNode.Image.DockerImageReference)
-			return false
-		}
-	}
-
-	if !algorithm.pruneOverSizeLimit && imageNode.Image.CreationTimestamp.Time.After(algorithm.keepYoungerThan) {
-		klog.V(4).Infof("Image %q is younger than minimum pruning age", imageNode.Image.Name)
-		return false
-	}
-
-	for _, n := range g.To(imageNode) {
-		klog.V(4).Infof("Examining predecessor %#v", n)
-		if edgeKind(g, n, imageNode, ReferencedImageEdgeKind) {
-			klog.V(4).Infof("Strong reference detected")
-			return false
-		}
-	}
-
-	return true
-}
-
-func calculatePrunableImages(
-	g genericgraph.Graph,
-	imageNodes map[string]*imagegraph.ImageNode,
-	algorithm pruneAlgorithm,
-) []*imagegraph.ImageNode {
-	prunable := []*imagegraph.ImageNode{}
-
-	for _, imageNode := range imageNodes {
-		klog.V(4).Infof("Examining image %q", imageNode.Image.Name)
-
-		if imageIsPrunable(g, imageNode, algorithm) {
-			klog.V(4).Infof("Image %q is prunable", imageNode.Image.Name)
-			prunable = append(prunable, imageNode)
-		}
-	}
-
-	return prunable
-}
-
-// pruneStreams removes references from all image streams' status.tags entries to prunable images, invoking
-// streamPruner.UpdateImageStream for each updated stream.
-func pruneStreams(
-	g genericgraph.Graph,
-	prunableImageNodes []*imagegraph.ImageNode,
-	streamPruner ImageStreamDeleter,
-	keepYoungerThan time.Time,
-) (deletions []Deletion, failures []Failure) {
-	imageNameToNode := map[string]*imagegraph.ImageNode{}
-	for _, node := range prunableImageNodes {
-		imageNameToNode[node.Image.Name] = node
-	}
-
-	noChangeErr := errors.New("nothing changed")
-
-	klog.V(4).Infof("Removing pruned image references from streams")
-	for _, node := range g.Nodes() {
-		streamNode, ok := node.(*imagegraph.ImageStreamNode)
-		if !ok {
-			continue
-		}
-		streamName := getName(streamNode.ImageStream)
-		err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
-			stream, err := streamPruner.GetImageStream(streamNode.ImageStream)
-			if err != nil {
-				if kerrapi.IsNotFound(err) {
-					klog.V(4).Infof("Unable to get image stream %s: removed during prune", streamName)
-					return noChangeErr
-				}
-				return err
-			}
-
-			updatedTags := sets.NewString()
-			deletedTags := sets.NewString()
-
-			for _, tag := range stream.Status.Tags {
-				if updated, deleted := pruneISTagHistory(g, imageNameToNode, keepYoungerThan, streamName, stream, tag.Tag); deleted {
-					deletedTags.Insert(tag.Tag)
-				} else if updated {
-					updatedTags.Insert(tag.Tag)
-				}
-			}
-
-			if updatedTags.Len() == 0 && deletedTags.Len() == 0 {
-				return noChangeErr
-			}
-
-			updatedStream, err := streamPruner.UpdateImageStream(stream)
-			if err == nil {
-				streamPruner.NotifyImageStreamPrune(stream, updatedTags.List(), deletedTags.List())
-				streamNode.ImageStream = updatedStream
-			}
-
-			if kerrapi.IsNotFound(err) {
-				klog.V(4).Infof("Unable to update image stream %s: removed during prune", streamName)
-				return nil
-			}
-
-			return err
-		})
-
-		if err == noChangeErr {
-			continue
-		}
-		if err != nil {
-			failures = append(failures, Failure{Node: streamNode, Err: err})
-		} else {
-			deletions = append(deletions, Deletion{Node: streamNode})
-		}
-	}
-
-	klog.V(4).Infof("Done removing pruned image references from streams")
-	return
-}
-
-// strengthenReferencesFromFailedImageStreams turns weak references between image streams and images to
-// strong. This must be called right after the image stream pruning to prevent images that failed to be
-// untagged from being pruned.
-func strengthenReferencesFromFailedImageStreams(g genericgraph.Graph, failures []Failure) {
-	for _, f := range failures {
-		for _, n := range g.From(f.Node) {
-			imageNode, ok := n.(*imagegraph.ImageNode)
-			if !ok {
-				continue
-			}
-			edge := g.Edge(f.Node, imageNode)
-			if edge == nil {
-				continue
-			}
-			kinds := g.EdgeKinds(edge)
-			if kinds.Has(ReferencedImageEdgeKind) {
-				continue
-			}
-			g.RemoveEdge(edge)
-			g.AddEdge(f.Node, imageNode, ReferencedImageEdgeKind)
-		}
-	}
-}
-
-// pruneISTagHistory processes tag event list of the given image stream tag. It removes references to images
-// that are going to be removed or are missing in the graph.
-func pruneISTagHistory(
-	g genericgraph.Graph,
-	prunableImageNodes map[string]*imagegraph.ImageNode,
-	keepYoungerThan time.Time,
-	streamName string,
-	imageStream *imagev1.ImageStream,
-	tag string,
-) (tagUpdated, tagDeleted bool) {
-	history, _ := imageutil.StatusHasTag(imageStream, tag)
-	newHistory := imagev1.NamedTagEventList{Tag: tag}
-
-	for _, tagEvent := range history.Items {
-		klog.V(4).Infof("Checking image stream tag %s:%s generation %d with image %q", streamName, tag, tagEvent.Generation, tagEvent.Image)
-
-		if ok, reason := tagEventIsPrunable(tagEvent, g, prunableImageNodes, keepYoungerThan); ok {
-			klog.V(4).Infof("Image stream tag %s:%s generation %d - removing because %s", streamName, tag, tagEvent.Generation, reason)
-			tagUpdated = true
-		} else {
-			klog.V(4).Infof("Image stream tag %s:%s generation %d - keeping because %s", streamName, tag, tagEvent.Generation, reason)
-			newHistory.Items = append(newHistory.Items, tagEvent)
-		}
-	}
-
-	if len(newHistory.Items) == 0 {
-		klog.V(4).Infof("Image stream tag %s:%s - removing empty tag", streamName, tag)
-		tags := []imagev1.NamedTagEventList{}
-		for i := range imageStream.Status.Tags {
-			t := imageStream.Status.Tags[i]
-			if t.Tag != tag {
-				tags = append(tags, t)
-			}
-		}
-		imageStream.Status.Tags = tags
-		tagDeleted = true
-		tagUpdated = false
-	} else if tagUpdated {
-		for i := range imageStream.Status.Tags {
-			t := imageStream.Status.Tags[i]
-			if t.Tag == tag {
-				imageStream.Status.Tags[i] = newHistory
-				break
-			}
-		}
-	}
-
-	return
-}
-
-func tagEventIsPrunable(
-	tagEvent imagev1.TagEvent,
-	g genericgraph.Graph,
-	prunableImageNodes map[string]*imagegraph.ImageNode,
-	keepYoungerThan time.Time,
-) (ok bool, reason string) {
-	if _, ok := prunableImageNodes[tagEvent.Image]; ok {
-		return true, fmt.Sprintf("image %q matches deleted image", tagEvent.Image)
-	}
-
-	n := imagegraph.FindImage(g, tagEvent.Image)
-	if n != nil {
-		return false, fmt.Sprintf("image %q is not deleted", tagEvent.Image)
-	}
-
-	if n == nil && !tagEvent.Created.After(keepYoungerThan) {
-		return true, fmt.Sprintf("image %q is absent", tagEvent.Image)
-	}
-
-	return false, "the tag event is younger than threshold"
-}
-
-// byLayerCountAndAge sorts a list of image nodes from the largest (by the number of image layers) to the
-// smallest. Images with the same number of layers are ordered from the oldest to the youngest.
-type byLayerCountAndAge []*imagegraph.ImageNode
-
-func (b byLayerCountAndAge) Len() int      { return len(b) }
-func (b byLayerCountAndAge) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
-func (b byLayerCountAndAge) Less(i, j int) bool {
-	fst, snd := b[i].Image, b[j].Image
-	if len(fst.DockerImageLayers) > len(snd.DockerImageLayers) {
-		return true
-	}
-	if len(fst.DockerImageLayers) < len(snd.DockerImageLayers) {
-		return false
-	}
-
-	return fst.CreationTimestamp.Before(&snd.CreationTimestamp) ||
-		(!snd.CreationTimestamp.Before(&fst.CreationTimestamp) && fst.Name < snd.Name)
-}
-
-// nodeItem is an item of a doubly-linked list of image nodes.
-type nodeItem struct {
-	node       *imagegraph.ImageNode
-	prev, next *nodeItem
-}
-
-// pop removes the item from a doubly-linked list and returns the image node it holds and its former next
-// neighbour.
-func (i *nodeItem) pop() (node *imagegraph.ImageNode, next *nodeItem) {
-	n, p := i.next, i.prev
-	if p != nil {
-		p.next = n
-	}
-	if n != nil {
-		n.prev = p
-	}
-	return i.node, n
-}
-
-// insertAfter makes a new list item from the given node and inserts it into the list right after the given
-// item. The newly created item is returned.
-func insertAfter(item *nodeItem, node *imagegraph.ImageNode) *nodeItem {
-	newItem := &nodeItem{
-		node: node,
-		prev: item,
-	}
-	if item != nil {
-		if item.next != nil {
-			item.next.prev = newItem
-			newItem.next = item.next
-		}
-		item.next = newItem
-	}
-	return newItem
-}
-
-// makeQueue makes a doubly-linked list of items out of the given array of image nodes.
-func makeQueue(nodes []*imagegraph.ImageNode) *nodeItem {
-	var head, tail *nodeItem
-	for i, n := range nodes {
-		tail = insertAfter(tail, n)
-		if i == 0 {
-			head = tail
-		}
-	}
-	return head
-}
-
-// Prune prunes the objects like this:
-//  1. it calculates the prunable images and builds a queue
-//     - the queue does not ever grow, it only shrinks (newly created images are not added)
-//  2. it untags the prunable images from image streams
-//  3. it spawns workers
-//  4. it turns each prunable image into a job for the workers and makes sure they are busy
-//  5. it terminates the workers once the queue is empty and reports results
-func (p *pruner) Prune(
-	imagePrunerFactory ImagePrunerFactoryFunc,
-	streamPruner ImageStreamDeleter,
-	layerLinkPruner LayerLinkDeleter,
-	blobPruner BlobDeleter,
-	manifestPruner ManifestDeleter,
-) (deletions []Deletion, failures []Failure) {
-	allNodes := p.g.Nodes()
-
-	imageNodes := getImageNodes(allNodes)
-	prunable := calculatePrunableImages(p.g, imageNodes, p.algorithm)
-
-	/* Instead of deleting streams in a per-image job, prune them all at once. Otherwise each image stream
-	 * would have to be modified for each prunable image it contains. */
-	deletions, failures = pruneStreams(p.g, prunable, streamPruner, p.algorithm.keepYoungerThan)
-	/* if namespace is specified, prune only ImageStreams and nothing more if we have any errors after
-	 * ImageStreams pruning this may mean that we still have references to images. */
-	if len(p.algorithm.namespace) > 0 || len(prunable) == 0 {
-		return deletions, failures
-	}
-
-	strengthenReferencesFromFailedImageStreams(p.g, failures)
-
-	// Sorting images from the largest (by number of layers) to the smallest is supposed to distribute the
-	// blob deletion workload equally across whole queue.
-	// If processed randomly, most probably, job processed in the beginning wouldn't delete any blobs (due to
-	// too many remaining referers) contrary to the jobs processed at the end.
-	// The assumption is based on another assumption that images with many layers have a low probability of
-	// sharing their components with other images.
-	sort.Sort(byLayerCountAndAge(prunable))
-	p.queue = makeQueue(prunable)
-
-	var (
-		jobChan    = make(chan *Job)
-		resultChan = make(chan JobResult)
-	)
-
-	defer close(jobChan)
-
-	for i := 0; i < p.numWorkers; i++ {
-		worker, err := NewWorker(
-			p.algorithm,
-			p.registryClientFactory,
-			p.registryURL,
-			imagePrunerFactory,
-			streamPruner,
-			layerLinkPruner,
-			blobPruner,
-			manifestPruner,
-		)
-		if err != nil {
-			failures = append(failures, Failure{
-				Err: fmt.Errorf("failed to initialize worker: %v", err),
-			})
-			return
-		}
-		go worker.Run(jobChan, resultChan)
-	}
-
-	ds, fs := p.runLoop(jobChan, resultChan)
-	deletions = append(deletions, ds...)
-	failures = append(failures, fs...)
-
-	return
-}
-
-// runLoop processes the queue of prunable images until empty. It makes the workers busy and updates the graph
-// with each change.
-func (p *pruner) runLoop(
-	jobChan chan<- *Job,
-	resultChan <-chan JobResult,
-) (deletions []Deletion, failures []Failure) {
-	imgUpdateChan := p.imageWatcher.ResultChan()
-	isUpdateChan := p.imageStreamWatcher.ResultChan()
-	for {
-		// make workers busy
-		for len(p.processedImages) < p.numWorkers {
-			job, blocked := p.getNextJob()
-			if blocked {
-				break
-			}
-			if job == nil {
-				if len(p.processedImages) == 0 {
-					return
-				}
-				break
-			}
-			jobChan <- job
-			p.processedImages[job.Image] = job
-		}
-
-		select {
-		case res := <-resultChan:
-			p.updateGraphWithResult(&res)
-			for _, deletion := range res.Deletions {
-				deletions = append(deletions, deletion)
-			}
-			for _, failure := range res.Failures {
-				failures = append(failures, failure)
-			}
-			delete(p.processedImages, res.Job.Image)
-		case <-isUpdateChan:
-			// TODO: fix gonum/graph to not reuse IDs of deleted nodes and reenable event handling
-			//p.handleImageStreamEvent(event)
-		case <-imgUpdateChan:
-			// TODO: fix gonum/graph to not reuse IDs of deleted nodes and reenable event handling
-			//p.handleImageEvent(event)
-		}
-	}
-}
-
-// getNextJob removes a prunable image from the queue, makes a job out of it and returns it.
-// Image may be removed from the queue without being processed if it becomes not prunable (by being referred
-// by a new image stream). Image may also be skipped and processed later when it is currently blocked.
-//
-// Image is blocked when at least one of its components is currently being processed in a running job and
-// the component has either:
-//   - only one remaining strong reference from the blocked image (the other references are being currently
-//     removed)
-//   - only one remaining reference in an image stream, where the component is tagged (via image) (the other
-//     references are being currently removed)
-//
-// The concept of blocked images attempts to preserve image components until the very last image
-// referencing them is deleted. Otherwise an image previously considered as prunable becomes not prunable may
-// become not usable since its components have been removed already.
-func (p *pruner) getNextJob() (job *Job, blocked bool) {
-	if p.queue == nil {
-		return
-	}
-
-	pop := func(item *nodeItem) (*imagegraph.ImageNode, *nodeItem) {
-		node, next := item.pop()
-		if item == p.queue {
-			p.queue = next
-		}
-		return node, next
-	}
-
-	for item := p.queue; item != nil; {
-		// something could have changed
-		if !imageIsPrunable(p.g, item.node, p.algorithm) {
-			_, item = pop(item)
-			continue
-		}
-
-		if components, blocked := getImageComponents(p.g, p.processedImages, item.node); !blocked {
-			job = &Job{
-				Image:      item.node,
-				Components: components,
-			}
-			_, item = pop(item)
-			break
-		}
-		item = item.next
-	}
-
-	blocked = job == nil && p.queue != nil
-
-	return
-}
-
-// updateGraphWithResult updates the graph with the result from completed job. Image nodes are deleted for
-// each deleted image. Image components are deleted if they were removed from the global blob store. Unlinked
-// imagecomponent (layer/config/manifest link) will cause an edge between image stream and the component to be
-// deleted.
-func (p *pruner) updateGraphWithResult(res *JobResult) {
-	imageDeleted := false
-	for _, d := range res.Deletions {
-		switch d.Node.(type) {
-		case *imagegraph.ImageNode:
-			imageDeleted = true
-			p.g.RemoveNode(d.Node)
-		case *imagegraph.ImageComponentNode:
-			// blob -> delete the node with all the edges
-			if d.Parent == nil {
-				p.g.RemoveNode(d.Node)
-				continue
-			}
-
-			// link in a repository -> delete just edges
-			isn, ok := d.Parent.(*imagegraph.ImageStreamNode)
-			if !ok {
-				continue
-			}
-			edge := p.g.Edge(isn, d.Node)
-			if edge == nil {
-				continue
-			}
-			p.g.RemoveEdge(edge)
-		case *imagegraph.ImageStreamNode:
-			// ignore
-		default:
-			utilruntime.HandleError(fmt.Errorf("internal error: unhandled graph node %t", d.Node))
-		}
-	}
-
-	if imageDeleted {
-		return
-	}
-}
-
-// getImageComponents gathers image components with locations, where they can be removed at this time.
-// Each component can be prunable in several image streams and in the global blob store.
-func getImageComponents(
-	g genericgraph.Graph,
-	processedImages map[*imagegraph.ImageNode]*Job,
-	image *imagegraph.ImageNode,
-) (components ComponentRetentions, blocked bool) {
-	components = make(ComponentRetentions)
-
-	for _, node := range g.From(image) {
-		kinds := g.EdgeKinds(g.Edge(image, node))
-		if len(kinds.Intersection(sets.NewString(
-			ReferencedImageLayerEdgeKind,
-			ReferencedImageConfigEdgeKind,
-			ReferencedImageManifestEdgeKind,
-		))) == 0 {
-			continue
-		}
-
-		imageStrongRefCounter := 0
-		imageMarkedForDeletionCounter := 0
-		referencingStreams := map[*imagegraph.ImageStreamNode]struct{}{}
-		referencingImages := map[*imagegraph.ImageNode]struct{}{}
-
-		comp, ok := node.(*imagegraph.ImageComponentNode)
-		if !ok {
-			continue
-		}
-
-		for _, ref := range g.To(comp) {
-			switch t := ref.(type) {
-			case (*imagegraph.ImageNode):
-				imageStrongRefCounter++
-				if _, processed := processedImages[t]; processed {
-					imageMarkedForDeletionCounter++
-				}
-				referencingImages[t] = struct{}{}
-
-			case *imagegraph.ImageStreamNode:
-				referencingStreams[t] = struct{}{}
-
-			default:
-				continue
-			}
-		}
-
-		switch {
-		// the component is referenced only by the given image -> prunable globally
-		case imageStrongRefCounter < 2:
-			components.Add(comp, true)
-		// the component can be pruned once the other referencing image that is being deleted is finished;
-		// don't touch it until then
-		case imageStrongRefCounter-imageMarkedForDeletionCounter < 2:
-			return nil, true
-		// not prunable component
-		default:
-			components.Add(comp, false)
-		}
-
-		if addComponentReferencingStreams(
-			g,
-			components,
-			referencingImages,
-			referencingStreams,
-			processedImages,
-			comp,
-		) {
-			return nil, true
-		}
-	}
-
-	return
-}
-
-// addComponentReferencingStreams records information about prunability of the given component in all the
-// streams referencing it (via tagged image). It updates given components attribute.
-func addComponentReferencingStreams(
-	g genericgraph.Graph,
-	components ComponentRetentions,
-	referencingImages map[*imagegraph.ImageNode]struct{},
-	referencingStreams map[*imagegraph.ImageStreamNode]struct{},
-	processedImages map[*imagegraph.ImageNode]*Job,
-	comp *imagegraph.ImageComponentNode,
-) (blocked bool) {
-streamLoop:
-	for stream := range referencingStreams {
-		refCounter := 0
-		markedForDeletionCounter := 0
-
-		for image := range referencingImages {
-			edge := g.Edge(stream, image)
-			if edge == nil {
-				continue
-			}
-			kinds := g.EdgeKinds(edge)
-			// tagged not prunable image -> keep the component in the stream
-			if kinds.Has(ReferencedImageEdgeKind) {
-				components.AddReferencingStreams(comp, false, stream)
-				continue streamLoop
-			}
-			if !kinds.Has(WeakReferencedImageEdgeKind) {
-				continue
-			}
-
-			refCounter++
-			if _, processed := processedImages[image]; processed {
-				markedForDeletionCounter++
-			}
-
-			if refCounter-markedForDeletionCounter > 1 {
-				components.AddReferencingStreams(comp, false, stream)
-				continue streamLoop
-			}
-		}
-
-		switch {
-		// there's just one remaining strong reference from the stream -> unlink
-		case refCounter < 2:
-			components.AddReferencingStreams(comp, true, stream)
-		// there's just one remaining strong reference and at least one another reference now being
-		// dereferenced in a running job -> wait until it completes
-		case refCounter-markedForDeletionCounter < 2:
-			return true
-		// not yet prunable
-		default:
-			components.AddReferencingStreams(comp, false, stream)
-		}
-	}
-
-	return false
-}
-
-// imageComponentIsPrunable returns true if the image component is not referenced by any images.
-func imageComponentIsPrunable(g genericgraph.Graph, cn *imagegraph.ImageComponentNode) bool {
-	for _, predecessor := range g.To(cn) {
-		klog.V(4).Infof("Examining predecessor %#v of image config %v", predecessor, cn)
-		if g.Kind(predecessor) == imagegraph.ImageNodeKind {
-			klog.V(4).Infof("Config %v has an image predecessor", cn)
-			return false
-		}
-	}
-
-	return true
-}
-
-// streamReferencingImageComponent returns a list of ImageStreamNodes that reference a
-// given ImageComponentNode.
-func streamsReferencingImageComponent(g genericgraph.Graph, cn *imagegraph.ImageComponentNode) []*imagegraph.ImageStreamNode {
-	ret := []*imagegraph.ImageStreamNode{}
-	for _, predecessor := range g.To(cn) {
-		if g.Kind(predecessor) != imagegraph.ImageStreamNodeKind {
-			continue
-		}
-		ret = append(ret, predecessor.(*imagegraph.ImageStreamNode))
-	}
-
-	return ret
-}
-
-// imageDeleter removes an image from OpenShift.
-type imageDeleter struct {
-	images imagev1client.ImagesGetter
-}
-
-var _ ImageDeleter = &imageDeleter{}
-
-// NewImageDeleter creates a new imageDeleter.
-func NewImageDeleter(images imagev1client.ImagesGetter) ImageDeleter {
-	return &imageDeleter{
-		images: images,
-	}
-}
-
-func (p *imageDeleter) DeleteImage(image *imagev1.Image) error {
-	klog.V(4).Infof("Deleting image %q", image.Name)
-	return p.images.Images().Delete(image.Name, metav1.NewDeleteOptions(0))
-}
-
-// imageStreamDeleter updates an image stream in OpenShift.
-type imageStreamDeleter struct {
-	streams imagev1client.ImageStreamsGetter
-}
-
-var _ ImageStreamDeleter = &imageStreamDeleter{}
-
-// NewImageStreamDeleter creates a new imageStreamDeleter.
-func NewImageStreamDeleter(streams imagev1client.ImageStreamsGetter) ImageStreamDeleter {
-	return &imageStreamDeleter{
-		streams: streams,
-	}
-}
-
-func (p *imageStreamDeleter) GetImageStream(stream *imagev1.ImageStream) (*imagev1.ImageStream, error) {
-	return p.streams.ImageStreams(stream.Namespace).Get(stream.Name, metav1.GetOptions{})
-}
-
-func (p *imageStreamDeleter) UpdateImageStream(stream *imagev1.ImageStream) (*imagev1.ImageStream, error) {
-	klog.V(4).Infof("Updating ImageStream %s", getName(stream))
-	is, err := p.streams.ImageStreams(stream.Namespace).UpdateStatus(stream)
-	if err == nil {
-		klog.V(5).Infof("Updated ImageStream: %#v", is)
-	}
-	return is, err
-}
-
-// NotifyImageStreamPrune shows notification about updated image stream.
-func (p *imageStreamDeleter) NotifyImageStreamPrune(stream *imagev1.ImageStream, updatedTags []string, deletedTags []string) {
-	return
-}
-
-// deleteFromRegistry uses registryClient to send a DELETE request to the
-// provided url. It attempts an https request first; if that fails, it fails
-// back to http.
-func deleteFromRegistry(registryClient *http.Client, url string) error {
-	req, err := http.NewRequest(http.MethodDelete, url, nil)
-	if err != nil {
-		return err
-	}
-
-	klog.V(5).Infof(`Sending request "%s %s" to the registry`, req.Method, req.URL.String())
-	resp, err := registryClient.Do(req)
-	if err != nil {
-		return err
-	}
-	defer resp.Body.Close()
-
-	// TODO: investigate why we're getting non-existent layers, for now we're logging
-	// them out and continue working
-	if resp.StatusCode == http.StatusNotFound {
-		klog.Warningf("Unable to prune layer %s, returned %v", url, resp.Status)
-		return nil
-	}
-
-	// non-2xx/3xx response doesn't cause an error, so we need to check for it
-	// manually and return it to caller
-	if resp.StatusCode < http.StatusOK || resp.StatusCode >= http.StatusBadRequest {
-		return fmt.Errorf(resp.Status)
-	}
-
-	if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusAccepted {
-		klog.V(1).Infof("Unexpected status code in response: %d", resp.StatusCode)
-		var response errcode.Errors
-		decoder := json.NewDecoder(resp.Body)
-		if err := decoder.Decode(&response); err != nil {
-			return err
-		}
-		klog.V(1).Infof("Response: %#v", response)
-		return &response
-	}
-
-	return err
-}
-
-// layerLinkDeleter removes a repository layer link from the registry.
-type layerLinkDeleter struct{}
-
-var _ LayerLinkDeleter = &layerLinkDeleter{}
-
-// NewLayerLinkDeleter creates a new layerLinkDeleter.
-func NewLayerLinkDeleter() LayerLinkDeleter {
-	return &layerLinkDeleter{}
-}
-
-func (p *layerLinkDeleter) DeleteLayerLink(registryClient *http.Client, registryURL *url.URL, repoName, linkName string) error {
-	klog.V(4).Infof("Deleting layer link %s from repository %s/%s", linkName, registryURL.Host, repoName)
-	return deleteFromRegistry(registryClient, fmt.Sprintf("%s/v2/%s/blobs/%s", registryURL.String(), repoName, linkName))
-}
-
-// blobDeleter removes a blob from the registry.
-type blobDeleter struct{}
-
-var _ BlobDeleter = &blobDeleter{}
-
-// NewBlobDeleter creates a new blobDeleter.
-func NewBlobDeleter() BlobDeleter {
-	return &blobDeleter{}
-}
-
-func (p *blobDeleter) DeleteBlob(registryClient *http.Client, registryURL *url.URL, blob string) error {
-	klog.V(4).Infof("Deleting blob %s from registry %s", blob, registryURL.Host)
-	return deleteFromRegistry(registryClient, fmt.Sprintf("%s/admin/blobs/%s", registryURL.String(), blob))
-}
-
-// manifestDeleter deletes repository manifest data from the registry.
-type manifestDeleter struct{}
-
-var _ ManifestDeleter = &manifestDeleter{}
-
-// NewManifestDeleter creates a new manifestDeleter.
-func NewManifestDeleter() ManifestDeleter {
-	return &manifestDeleter{}
-}
-
-func (p *manifestDeleter) DeleteManifest(registryClient *http.Client, registryURL *url.URL, repoName, manifest string) error {
-	klog.V(4).Infof("Deleting manifest %s from repository %s/%s", manifest, registryURL.Host, repoName)
-	return deleteFromRegistry(registryClient, fmt.Sprintf("%s/v2/%s/manifests/%s", registryURL.String(), repoName, manifest))
-}
-
-func makeISTag(namespace, name, tag string) *imagev1.ImageStreamTag {
-	return &imagev1.ImageStreamTag{
-		ObjectMeta: metav1.ObjectMeta{
-			Namespace: namespace,
-			Name:      imageutil.JoinImageStreamTag(name, tag),
-		},
-	}
-}
-
-func makeISTagWithStream(is *imagev1.ImageStream, tag string) *imagev1.ImageStreamTag {
-	return makeISTag(is.Namespace, is.Name, tag)
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/imageprune/prune_test.go b/vendor/github.com/openshift/oc/pkg/cli/admin/prune/imageprune/prune_test.go
deleted file mode 100644
index 6d025159ad74..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/imageprune/prune_test.go
+++ /dev/null
@@ -1,2263 +0,0 @@
-package imageprune
-
-import (
-	"bytes"
-	"errors"
-	"flag"
-	"fmt"
-	"io/ioutil"
-	"net/http"
-	"net/url"
-	"reflect"
-	"regexp"
-	"sort"
-	"sync"
-	"testing"
-	"time"
-
-	kappsv1 "k8s.io/api/apps/v1"
-	corev1 "k8s.io/api/core/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/runtime"
-	"k8s.io/apimachinery/pkg/util/diff"
-	"k8s.io/apimachinery/pkg/util/sets"
-	"k8s.io/apimachinery/pkg/watch"
-	"k8s.io/client-go/rest/fake"
-	clienttesting "k8s.io/client-go/testing"
-	"k8s.io/klog"
-	"k8s.io/kubernetes/pkg/kubectl/scheme"
-
-	"github.com/openshift/api"
-	appsv1 "github.com/openshift/api/apps/v1"
-	buildv1 "github.com/openshift/api/build/v1"
-	imagev1 "github.com/openshift/api/image/v1"
-	fakeimageclient "github.com/openshift/client-go/image/clientset/versioned/fake"
-	imagev1client "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1"
-	fakeimagev1client "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake"
-	"github.com/openshift/oc/pkg/helpers/graph/genericgraph"
-	imagegraph "github.com/openshift/oc/pkg/helpers/graph/imagegraph/nodes"
-	imagetest "github.com/openshift/oc/pkg/helpers/image/test"
-)
-
-var logLevel = flag.Int("loglevel", 0, "")
-
-func TestImagePruning(t *testing.T) {
-	var level klog.Level
-	level.Set(fmt.Sprint(*logLevel))
-
-	registryHost := "registry.io"
-	registryURL := "https://" + registryHost
-
-	tests := []struct {
-		name                          string
-		pruneOverSizeLimit            *bool
-		allImages                     *bool
-		pruneRegistry                 *bool
-		ignoreInvalidRefs             *bool
-		keepTagRevisions              *int
-		namespace                     string
-		images                        imagev1.ImageList
-		pods                          corev1.PodList
-		streams                       imagev1.ImageStreamList
-		rcs                           corev1.ReplicationControllerList
-		bcs                           buildv1.BuildConfigList
-		builds                        buildv1.BuildList
-		dss                           kappsv1.DaemonSetList
-		deployments                   kappsv1.DeploymentList
-		dcs                           appsv1.DeploymentConfigList
-		rss                           kappsv1.ReplicaSetList
-		limits                        map[string][]*corev1.LimitRange
-		imageDeleterErr               error
-		imageStreamDeleterErr         error
-		layerDeleterErr               error
-		manifestDeleterErr            error
-		blobDeleterErrorGetter        errorForSHA
-		expectedImageDeletions        []string
-		expectedStreamUpdates         []string
-		expectedLayerLinkDeletions    []string
-		expectedManifestLinkDeletions []string
-		expectedBlobDeletions         []string
-		expectedFailures              []string
-		expectedErrorString           string
-	}{
-		{
-			name:                   "1 pod - phase pending - don't prune",
-			images:                 imagetest.ImageList(imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			pods:                   imagetest.PodList(imagetest.Pod("foo", "pod1", corev1.PodPending, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			expectedImageDeletions: []string{},
-		},
-
-		{
-			name:   "3 pods - last phase pending - don't prune",
-			images: imagetest.ImageList(imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			pods: imagetest.PodList(
-				imagetest.Pod("foo", "pod1", corev1.PodSucceeded, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
-				imagetest.Pod("foo", "pod2", corev1.PodSucceeded, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
-				imagetest.Pod("foo", "pod3", corev1.PodPending, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
-			),
-			expectedImageDeletions: []string{},
-		},
-
-		{
-			name:                   "1 pod - phase running - don't prune",
-			images:                 imagetest.ImageList(imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			pods:                   imagetest.PodList(imagetest.Pod("foo", "pod1", corev1.PodRunning, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			expectedImageDeletions: []string{},
-		},
-
-		{
-			name:   "3 pods - last phase running - don't prune",
-			images: imagetest.ImageList(imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			pods: imagetest.PodList(
-				imagetest.Pod("foo", "pod1", corev1.PodSucceeded, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
-				imagetest.Pod("foo", "pod2", corev1.PodSucceeded, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
-				imagetest.Pod("foo", "pod3", corev1.PodRunning, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
-			),
-			expectedImageDeletions: []string{},
-		},
-
-		{
-			name:                   "pod phase succeeded - prune",
-			images:                 imagetest.ImageList(imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			pods:                   imagetest.PodList(imagetest.Pod("foo", "pod1", corev1.PodSucceeded, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			expectedImageDeletions: []string{"sha256:0000000000000000000000000000000000000000000000000000000000000000"},
-			expectedBlobDeletions: []string{
-				registryURL + "|sha256:0000000000000000000000000000000000000000000000000000000000000000",
-				registryURL + "|" + imagetest.Layer1,
-				registryURL + "|" + imagetest.Layer2,
-				registryURL + "|" + imagetest.Layer3,
-				registryURL + "|" + imagetest.Layer4,
-				registryURL + "|" + imagetest.Layer5,
-			},
-		},
-
-		{
-			name:                   "pod phase succeeded - prune leave registry alone",
-			pruneRegistry:          newBool(false),
-			images:                 imagetest.ImageList(imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			pods:                   imagetest.PodList(imagetest.Pod("foo", "pod1", corev1.PodSucceeded, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			expectedImageDeletions: []string{"sha256:0000000000000000000000000000000000000000000000000000000000000000"},
-			expectedBlobDeletions:  []string{},
-		},
-
-		{
-			name:                   "pod phase succeeded, pod less than min pruning age - don't prune",
-			images:                 imagetest.ImageList(imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			pods:                   imagetest.PodList(imagetest.AgedPod("foo", "pod1", corev1.PodSucceeded, 5, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			expectedImageDeletions: []string{},
-		},
-
-		{
-			name:                   "pod phase succeeded, image less than min pruning age - don't prune",
-			images:                 imagetest.ImageList(imagetest.AgedImage("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000", 5)),
-			pods:                   imagetest.PodList(imagetest.Pod("foo", "pod1", corev1.PodSucceeded, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			expectedImageDeletions: []string{},
-		},
-
-		{
-			name:   "pod phase failed - prune",
-			images: imagetest.ImageList(imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			pods: imagetest.PodList(
-				imagetest.Pod("foo", "pod1", corev1.PodFailed, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
-				imagetest.Pod("foo", "pod2", corev1.PodFailed, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
-				imagetest.Pod("foo", "pod3", corev1.PodFailed, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
-			),
-			expectedImageDeletions: []string{"sha256:0000000000000000000000000000000000000000000000000000000000000000"},
-			expectedBlobDeletions: []string{
-				registryURL + "|sha256:0000000000000000000000000000000000000000000000000000000000000000",
-				registryURL + "|" + imagetest.Layer1,
-				registryURL + "|" + imagetest.Layer2,
-				registryURL + "|" + imagetest.Layer3,
-				registryURL + "|" + imagetest.Layer4,
-				registryURL + "|" + imagetest.Layer5,
-			},
-		},
-
-		{
-			name:   "pod phase unknown - prune",
-			images: imagetest.ImageList(imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			pods: imagetest.PodList(
-				imagetest.Pod("foo", "pod1", corev1.PodUnknown, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
-				imagetest.Pod("foo", "pod2", corev1.PodUnknown, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
-				imagetest.Pod("foo", "pod3", corev1.PodUnknown, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
-			),
-			expectedImageDeletions: []string{"sha256:0000000000000000000000000000000000000000000000000000000000000000"},
-			expectedBlobDeletions: []string{
-				registryURL + "|sha256:0000000000000000000000000000000000000000000000000000000000000000",
-				registryURL + "|" + imagetest.Layer1,
-				registryURL + "|" + imagetest.Layer2,
-				registryURL + "|" + imagetest.Layer3,
-				registryURL + "|" + imagetest.Layer4,
-				registryURL + "|" + imagetest.Layer5,
-			},
-		},
-
-		{
-			name:   "pod container image not parsable",
-			images: imagetest.ImageList(imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			pods: imagetest.PodList(
-				imagetest.Pod("foo", "pod1", corev1.PodRunning, "a/b/c/d/e"),
-			),
-			expectedImageDeletions: []string{"sha256:0000000000000000000000000000000000000000000000000000000000000000"},
-			expectedBlobDeletions: []string{
-				registryURL + "|sha256:0000000000000000000000000000000000000000000000000000000000000000",
-				registryURL + "|" + imagetest.Layer1,
-				registryURL + "|" + imagetest.Layer2,
-				registryURL + "|" + imagetest.Layer3,
-				registryURL + "|" + imagetest.Layer4,
-				registryURL + "|" + imagetest.Layer5,
-			},
-		},
-
-		{
-			name:   "pod container image doesn't have an id",
-			images: imagetest.ImageList(imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			pods: imagetest.PodList(
-				imagetest.Pod("foo", "pod1", corev1.PodRunning, "foo/bar:latest"),
-			),
-			expectedImageDeletions: []string{"sha256:0000000000000000000000000000000000000000000000000000000000000000"},
-			expectedBlobDeletions: []string{
-				registryURL + "|sha256:0000000000000000000000000000000000000000000000000000000000000000",
-				registryURL + "|" + imagetest.Layer1,
-				registryURL + "|" + imagetest.Layer2,
-				registryURL + "|" + imagetest.Layer3,
-				registryURL + "|" + imagetest.Layer4,
-				registryURL + "|" + imagetest.Layer5,
-			},
-		},
-
-		{
-			name:   "pod refers to image not in graph",
-			images: imagetest.ImageList(imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			pods: imagetest.PodList(
-				imagetest.Pod("foo", "pod1", corev1.PodRunning, registryHost+"/foo/bar@sha256:ABC0000000000000000000000000000000000000000000000000000000000002"),
-			),
-			expectedImageDeletions: []string{"sha256:0000000000000000000000000000000000000000000000000000000000000000"},
-			expectedBlobDeletions: []string{
-				registryURL + "|sha256:0000000000000000000000000000000000000000000000000000000000000000",
-				registryURL + "|" + imagetest.Layer1,
-				registryURL + "|" + imagetest.Layer2,
-				registryURL + "|" + imagetest.Layer3,
-				registryURL + "|" + imagetest.Layer4,
-				registryURL + "|" + imagetest.Layer5,
-			},
-		},
-
-		{
-			name:                   "referenced by rc - don't prune",
-			images:                 imagetest.ImageList(imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			rcs:                    imagetest.RCList(imagetest.RC("foo", "rc1", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			expectedImageDeletions: []string{},
-		},
-
-		{
-			name:                   "referenced by dc - don't prune",
-			images:                 imagetest.ImageList(imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			dcs:                    imagetest.DCList(imagetest.DC("foo", "rc1", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			expectedImageDeletions: []string{},
-		},
-
-		{
-			name: "referenced by daemonset - don't prune",
-			images: imagetest.ImageList(
-				imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
-				imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000001", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001"),
-			),
-			dss:                    imagetest.DSList(imagetest.DS("foo", "rc1", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			expectedImageDeletions: []string{"sha256:0000000000000000000000000000000000000000000000000000000000000001"},
-			expectedBlobDeletions:  []string{registryURL + "|sha256:0000000000000000000000000000000000000000000000000000000000000001"},
-		},
-
-		{
-			name: "referenced by replicaset - don't prune",
-			images: imagetest.ImageList(
-				imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
-				imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000001", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001"),
-			),
-			rss:                    imagetest.RSList(imagetest.RS("foo", "rc1", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			expectedImageDeletions: []string{"sha256:0000000000000000000000000000000000000000000000000000000000000001"},
-			expectedBlobDeletions:  []string{registryURL + "|sha256:0000000000000000000000000000000000000000000000000000000000000001"},
-		},
-
-		{
-			name: "referenced by upstream deployment - don't prune",
-			images: imagetest.ImageList(
-				imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
-				imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000001", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001"),
-			),
-			deployments:            imagetest.DeploymentList(imagetest.Deployment("foo", "rc1", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			expectedImageDeletions: []string{"sha256:0000000000000000000000000000000000000000000000000000000000000001"},
-			expectedBlobDeletions:  []string{registryURL + "|sha256:0000000000000000000000000000000000000000000000000000000000000001"},
-		},
-
-		{
-			name:                   "referenced by bc - sti - ImageStreamImage - don't prune",
-			images:                 imagetest.ImageList(imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			bcs:                    imagetest.BCList(imagetest.BC("foo", "bc1", "source", "ImageStreamImage", "foo", "bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			expectedImageDeletions: []string{},
-		},
-
-		{
-			name:                   "referenced by bc - docker - ImageStreamImage - don't prune",
-			images:                 imagetest.ImageList(imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			bcs:                    imagetest.BCList(imagetest.BC("foo", "bc1", "docker", "ImageStreamImage", "foo", "bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			expectedImageDeletions: []string{},
-		},
-
-		{
-			name:                   "referenced by bc - custom - ImageStreamImage - don't prune",
-			images:                 imagetest.ImageList(imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			bcs:                    imagetest.BCList(imagetest.BC("foo", "bc1", "custom", "ImageStreamImage", "foo", "bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			expectedImageDeletions: []string{},
-		},
-
-		{
-			name:                   "referenced by bc - sti - DockerImage - don't prune",
-			images:                 imagetest.ImageList(imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			bcs:                    imagetest.BCList(imagetest.BC("foo", "bc1", "source", "DockerImage", "foo", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			expectedImageDeletions: []string{},
-		},
-
-		{
-			name:                   "referenced by bc - docker - DockerImage - don't prune",
-			images:                 imagetest.ImageList(imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			bcs:                    imagetest.BCList(imagetest.BC("foo", "bc1", "docker", "DockerImage", "foo", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			expectedImageDeletions: []string{},
-		},
-
-		{
-			name:                   "referenced by bc - custom - DockerImage - don't prune",
-			images:                 imagetest.ImageList(imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			bcs:                    imagetest.BCList(imagetest.BC("foo", "bc1", "custom", "DockerImage", "foo", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			expectedImageDeletions: []string{},
-		},
-
-		{
-			name:                   "referenced by build - sti - ImageStreamImage - don't prune",
-			images:                 imagetest.ImageList(imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			builds:                 imagetest.BuildList(imagetest.Build("foo", "build1", "source", "ImageStreamImage", "foo", "bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			expectedImageDeletions: []string{},
-		},
-
-		{
-			name:                   "referenced by build - docker - ImageStreamImage - don't prune",
-			images:                 imagetest.ImageList(imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			builds:                 imagetest.BuildList(imagetest.Build("foo", "build1", "docker", "ImageStreamImage", "foo", "bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			expectedImageDeletions: []string{},
-		},
-
-		{
-			name:                   "referenced by build - custom - ImageStreamImage - don't prune",
-			images:                 imagetest.ImageList(imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			builds:                 imagetest.BuildList(imagetest.Build("foo", "build1", "custom", "ImageStreamImage", "foo", "bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			expectedImageDeletions: []string{},
-		},
-
-		{
-			name:                   "referenced by build - sti - DockerImage - don't prune",
-			images:                 imagetest.ImageList(imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			builds:                 imagetest.BuildList(imagetest.Build("foo", "build1", "source", "DockerImage", "foo", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			expectedImageDeletions: []string{},
-		},
-
-		{
-			name:                   "referenced by build - docker - DockerImage - don't prune",
-			images:                 imagetest.ImageList(imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			builds:                 imagetest.BuildList(imagetest.Build("foo", "build1", "docker", "DockerImage", "foo", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			expectedImageDeletions: []string{},
-		},
-
-		{
-			name:                   "referenced by build - custom - DockerImage - don't prune",
-			images:                 imagetest.ImageList(imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			builds:                 imagetest.BuildList(imagetest.Build("foo", "build1", "custom", "DockerImage", "foo", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			expectedImageDeletions: []string{},
-		},
-
-		{
-			name: "image stream - keep most recent n images",
-			images: imagetest.ImageList(
-				imagetest.UnmanagedImage("sha256:0000000000000000000000000000000000000000000000000000000000000000", "otherregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000", false, "", ""),
-				imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
-				imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003"),
-				imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000004", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000004"),
-			),
-			streams: imagetest.StreamList(
-				imagetest.Stream(registryHost, "foo", "bar", []imagev1.NamedTagEventList{
-					imagetest.Tag("latest",
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000000", "otherregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000004", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000004"),
-					),
-				}),
-			),
-			expectedImageDeletions:        []string{"sha256:0000000000000000000000000000000000000000000000000000000000000004"},
-			expectedStreamUpdates:         []string{"foo/bar|sha256:0000000000000000000000000000000000000000000000000000000000000004"},
-			expectedManifestLinkDeletions: []string{registryURL + "|foo/bar|sha256:0000000000000000000000000000000000000000000000000000000000000004"},
-			expectedBlobDeletions:         []string{registryURL + "|" + "sha256:0000000000000000000000000000000000000000000000000000000000000004"},
-		},
-
-		{
-			name: "continue on blob deletion failure",
-			images: imagetest.ImageList(
-				imagetest.UnmanagedImage("sha256:0000000000000000000000000000000000000000000000000000000000000000", "otherregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000", false, "", ""),
-				imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
-				imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003"),
-				imagetest.ImageWithLayers("sha256:0000000000000000000000000000000000000000000000000000000000000004", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000004", nil, "layer1", "layer2"),
-			),
-			streams: imagetest.StreamList(
-				imagetest.Stream(registryHost, "foo", "bar", []imagev1.NamedTagEventList{
-					imagetest.Tag("latest",
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000000", "otherregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000004", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000004"),
-					),
-				}),
-			),
-			blobDeleterErrorGetter: func(dgst string) error {
-				if dgst == "layer1" {
-					return errors.New("err")
-				}
-				return nil
-			},
-			expectedImageDeletions:        []string{"sha256:0000000000000000000000000000000000000000000000000000000000000004"},
-			expectedStreamUpdates:         []string{"foo/bar|sha256:0000000000000000000000000000000000000000000000000000000000000004"},
-			expectedManifestLinkDeletions: []string{registryURL + "|foo/bar|sha256:0000000000000000000000000000000000000000000000000000000000000004"},
-			expectedLayerLinkDeletions:    []string{registryURL + "|foo/bar|layer1", registryURL + "|foo/bar|layer2"},
-			expectedBlobDeletions: []string{
-				registryURL + "|" + "layer1",
-				registryURL + "|" + "layer2",
-				registryURL + "|" + "sha256:0000000000000000000000000000000000000000000000000000000000000004",
-			},
-			expectedFailures: []string{registryURL + "|" + "layer1|err"},
-		},
-
-		{
-			name: "keep image when all blob deletions fail",
-			images: imagetest.ImageList(
-				imagetest.UnmanagedImage("sha256:0000000000000000000000000000000000000000000000000000000000000000", "otherregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000", false, "", ""),
-				imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
-				imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003"),
-				imagetest.ImageWithLayers("sha256:0000000000000000000000000000000000000000000000000000000000000004", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000004", nil, "layer1", "layer2"),
-			),
-			streams: imagetest.StreamList(
-				imagetest.Stream(registryHost, "foo", "bar", []imagev1.NamedTagEventList{
-					imagetest.Tag("latest",
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000000", "otherregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000004", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000004"),
-					),
-				}),
-			),
-			blobDeleterErrorGetter:        func(dgst string) error { return errors.New("err") },
-			expectedImageDeletions:        []string{},
-			expectedStreamUpdates:         []string{"foo/bar|sha256:0000000000000000000000000000000000000000000000000000000000000004"},
-			expectedManifestLinkDeletions: []string{registryURL + "|foo/bar|sha256:0000000000000000000000000000000000000000000000000000000000000004"},
-			expectedLayerLinkDeletions:    []string{registryURL + "|foo/bar|layer1", registryURL + "|foo/bar|layer2"},
-			expectedBlobDeletions:         []string{registryURL + "|layer1", registryURL + "|layer2", registryURL + "|" + "sha256:0000000000000000000000000000000000000000000000000000000000000004"},
-			expectedFailures:              []string{registryURL + "|" + "layer1|err", registryURL + "|" + "layer2|err", registryURL + "|sha256:0000000000000000000000000000000000000000000000000000000000000004|err"},
-		},
-
-		{
-			name: "continue on manifest link deletion failure",
-			images: imagetest.ImageList(
-				imagetest.UnmanagedImage("sha256:0000000000000000000000000000000000000000000000000000000000000000", "otherregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000", false, "", ""),
-				imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
-				imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003"),
-				imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000004", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000004"),
-			),
-			streams: imagetest.StreamList(
-				imagetest.Stream(registryHost, "foo", "bar", []imagev1.NamedTagEventList{
-					imagetest.Tag("latest",
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000000", "otherregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000004", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000004"),
-					),
-				}),
-			),
-			manifestDeleterErr:            fmt.Errorf("err"),
-			expectedImageDeletions:        []string{"sha256:0000000000000000000000000000000000000000000000000000000000000004"},
-			expectedStreamUpdates:         []string{"foo/bar|sha256:0000000000000000000000000000000000000000000000000000000000000004"},
-			expectedManifestLinkDeletions: []string{registryURL + "|foo/bar|sha256:0000000000000000000000000000000000000000000000000000000000000004"},
-			expectedBlobDeletions:         []string{registryURL + "|" + "sha256:0000000000000000000000000000000000000000000000000000000000000004"},
-			expectedFailures:              []string{registryURL + "|foo/bar|sha256:0000000000000000000000000000000000000000000000000000000000000004|err"},
-		},
-
-		{
-			name: "stop on image stream update failure",
-			images: imagetest.ImageList(
-				imagetest.UnmanagedImage("sha256:0000000000000000000000000000000000000000000000000000000000000000", "otherregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000", false, "", ""),
-				imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
-				imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003"),
-				imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000004", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000004"),
-			),
-			streams: imagetest.StreamList(
-				imagetest.Stream(registryHost, "foo", "bar", []imagev1.NamedTagEventList{
-					imagetest.Tag("latest",
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000000", "otherregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000004", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000004"),
-					),
-				}),
-			),
-			imageStreamDeleterErr: fmt.Errorf("err"),
-			expectedFailures:      []string{"foo/bar|err"},
-		},
-
-		{
-			name: "image stream - same manifest listed multiple times in tag history",
-			images: imagetest.ImageList(
-				imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000001", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001"),
-				imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
-			),
-			streams: imagetest.StreamList(
-				imagetest.Stream(registryHost, "foo", "bar", []imagev1.NamedTagEventList{
-					imagetest.Tag("latest",
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000001", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000001", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
-					),
-				}),
-			),
-		},
-
-		{
-			name: "image stream age less than min pruning age - don't prune",
-			images: imagetest.ImageList(
-				imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
-				imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
-				imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003"),
-				imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000004", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000004"),
-			),
-			streams: imagetest.StreamList(
-				imagetest.AgedStream(registryHost, "foo", "bar", 5, []imagev1.NamedTagEventList{
-					imagetest.Tag("latest",
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000004", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000004"),
-					),
-				}),
-			),
-			expectedImageDeletions: []string{},
-			expectedStreamUpdates:  []string{},
-		},
-
-		{
-			name: "image stream - unreference absent image",
-			images: imagetest.ImageList(
-				imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000001", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001"),
-			),
-			streams: imagetest.StreamList(
-				imagetest.Stream(registryHost, "foo", "bar", []imagev1.NamedTagEventList{
-					imagetest.Tag("latest",
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000001", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001"),
-					),
-				}),
-			),
-			expectedStreamUpdates: []string{"foo/bar|sha256:0000000000000000000000000000000000000000000000000000000000000000"},
-		},
-
-		{
-			name: "image stream with dangling references - delete tags",
-			images: imagetest.ImageList(
-				imagetest.ImageWithLayers("sha256:0000000000000000000000000000000000000000000000000000000000000001", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001", nil, "layer1"),
-			),
-			streams: imagetest.StreamList(
-				imagetest.Stream(registryHost, "foo", "bar", []imagev1.NamedTagEventList{
-					imagetest.Tag("latest",
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
-					),
-					imagetest.Tag("tag",
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
-					),
-				}),
-			),
-			expectedImageDeletions: []string{"sha256:0000000000000000000000000000000000000000000000000000000000000001"},
-			expectedStreamUpdates: []string{
-				"foo/bar:latest",
-				"foo/bar:tag",
-				"foo/bar|sha256:0000000000000000000000000000000000000000000000000000000000000000",
-				"foo/bar|sha256:0000000000000000000000000000000000000000000000000000000000000002",
-			},
-			expectedBlobDeletions: []string{registryURL + "|sha256:0000000000000000000000000000000000000000000000000000000000000001", registryURL + "|layer1"},
-		},
-
-		{
-			name: "image stream - keep reference to a young absent image",
-			images: imagetest.ImageList(
-				imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000001", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001"),
-				imagetest.ImageWithLayers("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002", nil),
-			),
-			streams: imagetest.StreamList(
-				imagetest.Stream(registryHost, "foo", "bar", []imagev1.NamedTagEventList{
-					imagetest.Tag("latest",
-						imagetest.YoungTagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000", metav1.Now()),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000001", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001"),
-					),
-				}),
-			),
-			expectedImageDeletions: []string{"sha256:0000000000000000000000000000000000000000000000000000000000000002"},
-			expectedBlobDeletions:  []string{registryURL + "|sha256:0000000000000000000000000000000000000000000000000000000000000002"},
-		},
-
-		{
-			name:             "images referenced by istag - keep",
-			keepTagRevisions: keepTagRevisions(0),
-			images: imagetest.ImageList(
-				imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000001", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001"),
-				imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
-				imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003"),
-				imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000004", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000004"),
-				imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000005", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000005"),
-				imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000006", registryHost+"/foo/baz@sha256:0000000000000000000000000000000000000000000000000000000000000006"),
-			),
-			streams: imagetest.StreamList(
-				imagetest.Stream(registryHost, "foo", "bar", []imagev1.NamedTagEventList{
-					imagetest.Tag("latest",
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000001", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000004", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000004"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000005", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000005"),
-					),
-					imagetest.Tag("dummy", // removed because no object references the image (the nm/dcfoo has mismatched repository name)
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000005", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000005"),
-					),
-				}),
-				imagetest.Stream(registryHost, "foo", "baz", []imagev1.NamedTagEventList{
-					imagetest.Tag("late", // kept because replicaset references the tagged image
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
-					),
-					imagetest.Tag("keepme", // kept because a deployment references the tagged image
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000006", registryHost+"/foo/baz@sha256:0000000000000000000000000000000000000000000000000000000000000006"),
-					),
-				}),
-			),
-			dss: imagetest.DSList(imagetest.DS("nm", "dsfoo", fmt.Sprintf("%s/%s/%s:%s", registryHost, "foo", "bar", "latest"))),
-			dcs: imagetest.DCList(imagetest.DC("nm", "dcfoo", fmt.Sprintf("%s/%s/%s:%s", registryHost, "foo", "repo", "dummy"))),
-			rss: imagetest.RSList(imagetest.RS("nm", "rsfoo", fmt.Sprintf("%s/%s/%s:%s", registryHost, "foo", "baz", "late"))),
-			// ignore different registry hostname
-			deployments: imagetest.DeploymentList(imagetest.Deployment("nm", "depfoo", fmt.Sprintf("%s/%s/%s:%s", "external.registry:5000", "foo", "baz", "keepme"))),
-			expectedImageDeletions: []string{
-				"sha256:0000000000000000000000000000000000000000000000000000000000000001",
-				"sha256:0000000000000000000000000000000000000000000000000000000000000003",
-				"sha256:0000000000000000000000000000000000000000000000000000000000000004",
-				"sha256:0000000000000000000000000000000000000000000000000000000000000005",
-			},
-			expectedStreamUpdates: []string{
-				"foo/bar:dummy",
-				"foo/bar|sha256:0000000000000000000000000000000000000000000000000000000000000000",
-				"foo/bar|sha256:0000000000000000000000000000000000000000000000000000000000000001",
-				"foo/bar|sha256:0000000000000000000000000000000000000000000000000000000000000003",
-				"foo/bar|sha256:0000000000000000000000000000000000000000000000000000000000000004",
-				"foo/bar|sha256:0000000000000000000000000000000000000000000000000000000000000005",
-			},
-			expectedManifestLinkDeletions: []string{
-				registryURL + "|foo/bar|sha256:0000000000000000000000000000000000000000000000000000000000000001",
-				registryURL + "|foo/bar|sha256:0000000000000000000000000000000000000000000000000000000000000003",
-				registryURL + "|foo/bar|sha256:0000000000000000000000000000000000000000000000000000000000000004",
-				registryURL + "|foo/bar|sha256:0000000000000000000000000000000000000000000000000000000000000005",
-			},
-			expectedBlobDeletions: []string{
-				registryURL + "|sha256:0000000000000000000000000000000000000000000000000000000000000001",
-				registryURL + "|" + "sha256:0000000000000000000000000000000000000000000000000000000000000003",
-				registryURL + "|" + "sha256:0000000000000000000000000000000000000000000000000000000000000004",
-				registryURL + "|" + "sha256:0000000000000000000000000000000000000000000000000000000000000005",
-			},
-		},
-
-		{
-			name: "multiple resources pointing to image - don't prune",
-			images: imagetest.ImageList(
-				imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
-				imagetest.Image("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
-			),
-			streams: imagetest.StreamList(
-				imagetest.Stream(registryHost, "foo", "bar", []imagev1.NamedTagEventList{
-					imagetest.Tag("latest",
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
-					),
-				}),
-			),
-			rcs:                    imagetest.RCList(imagetest.RC("foo", "rc1", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002")),
-			pods:                   imagetest.PodList(imagetest.Pod("foo", "pod1", corev1.PodRunning, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002")),
-			dcs:                    imagetest.DCList(imagetest.DC("foo", "rc1", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			bcs:                    imagetest.BCList(imagetest.BC("foo", "bc1", "source", "DockerImage", "foo", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			builds:                 imagetest.BuildList(imagetest.Build("foo", "build1", "custom", "ImageStreamImage", "foo", "bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			expectedImageDeletions: []string{},
-			expectedStreamUpdates:  []string{},
-		},
-
-		{
-			name: "image with nil annotations",
-			images: imagetest.ImageList(
-				imagetest.UnmanagedImage("sha256:0000000000000000000000000000000000000000000000000000000000000000", "someregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000", false, "", ""),
-			),
-			expectedImageDeletions: []string{"sha256:0000000000000000000000000000000000000000000000000000000000000000"},
-			expectedStreamUpdates:  []string{},
-			expectedBlobDeletions:  []string{registryURL + "|sha256:0000000000000000000000000000000000000000000000000000000000000000"},
-		},
-
-		{
-			name:      "prune all-images=true image with nil annotations",
-			allImages: newBool(true),
-			images: imagetest.ImageList(
-				imagetest.UnmanagedImage("sha256:0000000000000000000000000000000000000000000000000000000000000000", "someregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000", false, "", ""),
-			),
-			expectedImageDeletions: []string{"sha256:0000000000000000000000000000000000000000000000000000000000000000"},
-			expectedStreamUpdates:  []string{},
-			expectedBlobDeletions:  []string{registryURL + "|sha256:0000000000000000000000000000000000000000000000000000000000000000"},
-		},
-
-		{
-			name:      "prune all-images=false image with nil annotations",
-			allImages: newBool(false),
-			images: imagetest.ImageList(
-				imagetest.UnmanagedImage("sha256:0000000000000000000000000000000000000000000000000000000000000000", "someregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000", false, "", ""),
-			),
-			expectedImageDeletions: []string{},
-			expectedStreamUpdates:  []string{},
-		},
-
-		{
-			name: "image missing managed annotation",
-			images: imagetest.ImageList(
-				imagetest.UnmanagedImage("sha256:0000000000000000000000000000000000000000000000000000000000000000", "someregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000", true, "foo", "bar"),
-			),
-			expectedImageDeletions: []string{"sha256:0000000000000000000000000000000000000000000000000000000000000000"},
-			expectedStreamUpdates:  []string{},
-			expectedBlobDeletions:  []string{registryURL + "|sha256:0000000000000000000000000000000000000000000000000000000000000000"},
-		},
-
-		{
-			name: "image with managed annotation != true",
-			images: imagetest.ImageList(
-				imagetest.UnmanagedImage("sha256:0000000000000000000000000000000000000000000000000000000000000000", "someregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000", true, imagev1.ManagedByOpenShiftAnnotation, "false"),
-				imagetest.UnmanagedImage("sha256:0000000000000000000000000000000000000000000000000000000000000001", "someregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000", true, imagev1.ManagedByOpenShiftAnnotation, "0"),
-				imagetest.UnmanagedImage("sha256:0000000000000000000000000000000000000000000000000000000000000002", "someregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000", true, imagev1.ManagedByOpenShiftAnnotation, "1"),
-				imagetest.UnmanagedImage("sha256:0000000000000000000000000000000000000000000000000000000000000003", "someregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000", true, imagev1.ManagedByOpenShiftAnnotation, "True"),
-				imagetest.UnmanagedImage("sha256:0000000000000000000000000000000000000000000000000000000000000004", "someregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000", true, imagev1.ManagedByOpenShiftAnnotation, "yes"),
-				imagetest.UnmanagedImage("sha256:0000000000000000000000000000000000000000000000000000000000000005", "someregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000", true, imagev1.ManagedByOpenShiftAnnotation, "Yes"),
-			),
-			expectedImageDeletions: []string{
-				"sha256:0000000000000000000000000000000000000000000000000000000000000000",
-				"sha256:0000000000000000000000000000000000000000000000000000000000000001",
-				"sha256:0000000000000000000000000000000000000000000000000000000000000002",
-				"sha256:0000000000000000000000000000000000000000000000000000000000000003",
-				"sha256:0000000000000000000000000000000000000000000000000000000000000004",
-				"sha256:0000000000000000000000000000000000000000000000000000000000000005",
-			},
-			expectedStreamUpdates: []string{},
-			expectedBlobDeletions: []string{
-				registryURL + "|sha256:0000000000000000000000000000000000000000000000000000000000000000",
-				registryURL + "|sha256:0000000000000000000000000000000000000000000000000000000000000001",
-				registryURL + "|" + "sha256:0000000000000000000000000000000000000000000000000000000000000002",
-				registryURL + "|" + "sha256:0000000000000000000000000000000000000000000000000000000000000003",
-				registryURL + "|" + "sha256:0000000000000000000000000000000000000000000000000000000000000004",
-				registryURL + "|" + "sha256:0000000000000000000000000000000000000000000000000000000000000005",
-			},
-		},
-
-		{
-			name:      "prune all-images=true with image missing managed annotation",
-			allImages: newBool(true),
-			images: imagetest.ImageList(
-				imagetest.UnmanagedImage("sha256:0000000000000000000000000000000000000000000000000000000000000000", "someregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000", true, "foo", "bar"),
-			),
-			expectedImageDeletions: []string{"sha256:0000000000000000000000000000000000000000000000000000000000000000"},
-			expectedStreamUpdates:  []string{},
-			expectedBlobDeletions:  []string{registryURL + "|sha256:0000000000000000000000000000000000000000000000000000000000000000"},
-		},
-
-		{
-			name:      "prune all-images=true with image with managed annotation != true",
-			allImages: newBool(true),
-			images: imagetest.ImageList(
-				imagetest.UnmanagedImage("sha256:0000000000000000000000000000000000000000000000000000000000000000", "someregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000", true, imagev1.ManagedByOpenShiftAnnotation, "false"),
-				imagetest.UnmanagedImage("sha256:0000000000000000000000000000000000000000000000000000000000000001", "someregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000", true, imagev1.ManagedByOpenShiftAnnotation, "0"),
-				imagetest.UnmanagedImage("sha256:0000000000000000000000000000000000000000000000000000000000000002", "someregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000", true, imagev1.ManagedByOpenShiftAnnotation, "1"),
-				imagetest.UnmanagedImage("sha256:0000000000000000000000000000000000000000000000000000000000000003", "someregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000", true, imagev1.ManagedByOpenShiftAnnotation, "True"),
-				imagetest.UnmanagedImage("sha256:0000000000000000000000000000000000000000000000000000000000000004", "someregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000", true, imagev1.ManagedByOpenShiftAnnotation, "yes"),
-				imagetest.UnmanagedImage("sha256:0000000000000000000000000000000000000000000000000000000000000005", "someregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000", true, imagev1.ManagedByOpenShiftAnnotation, "Yes"),
-			),
-			expectedImageDeletions: []string{
-				"sha256:0000000000000000000000000000000000000000000000000000000000000000",
-				"sha256:0000000000000000000000000000000000000000000000000000000000000001",
-				"sha256:0000000000000000000000000000000000000000000000000000000000000002",
-				"sha256:0000000000000000000000000000000000000000000000000000000000000003",
-				"sha256:0000000000000000000000000000000000000000000000000000000000000004",
-				"sha256:0000000000000000000000000000000000000000000000000000000000000005",
-			},
-			expectedStreamUpdates: []string{},
-			expectedBlobDeletions: []string{
-				registryURL + "|sha256:0000000000000000000000000000000000000000000000000000000000000000",
-				registryURL + "|sha256:0000000000000000000000000000000000000000000000000000000000000001",
-				registryURL + "|sha256:0000000000000000000000000000000000000000000000000000000000000002",
-				registryURL + "|sha256:0000000000000000000000000000000000000000000000000000000000000003",
-				registryURL + "|sha256:0000000000000000000000000000000000000000000000000000000000000004",
-				registryURL + "|sha256:0000000000000000000000000000000000000000000000000000000000000005",
-			},
-		},
-
-		{
-			name:      "prune all-images=false with image missing managed annotation",
-			allImages: newBool(false),
-			images: imagetest.ImageList(
-				imagetest.UnmanagedImage("sha256:0000000000000000000000000000000000000000000000000000000000000000", "someregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000", true, "foo", "bar"),
-			),
-			expectedImageDeletions: []string{},
-			expectedStreamUpdates:  []string{},
-		},
-
-		{
-			name:      "prune all-images=false with image with managed annotation != true",
-			allImages: newBool(false),
-			images: imagetest.ImageList(
-				imagetest.UnmanagedImage("sha256:0000000000000000000000000000000000000000000000000000000000000000", "someregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000", true, imagev1.ManagedByOpenShiftAnnotation, "false"),
-				imagetest.UnmanagedImage("sha256:0000000000000000000000000000000000000000000000000000000000000001", "someregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000", true, imagev1.ManagedByOpenShiftAnnotation, "0"),
-				imagetest.UnmanagedImage("sha256:0000000000000000000000000000000000000000000000000000000000000002", "someregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000", true, imagev1.ManagedByOpenShiftAnnotation, "1"),
-				imagetest.UnmanagedImage("sha256:0000000000000000000000000000000000000000000000000000000000000003", "someregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000", true, imagev1.ManagedByOpenShiftAnnotation, "True"),
-				imagetest.UnmanagedImage("sha256:0000000000000000000000000000000000000000000000000000000000000004", "someregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000", true, imagev1.ManagedByOpenShiftAnnotation, "yes"),
-				imagetest.UnmanagedImage("sha256:0000000000000000000000000000000000000000000000000000000000000005", "someregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000", true, imagev1.ManagedByOpenShiftAnnotation, "Yes"),
-			),
-			expectedImageDeletions: []string{},
-			expectedStreamUpdates:  []string{},
-		},
-
-		{
-			name: "image with layers",
-			images: imagetest.ImageList(
-				imagetest.ImageWithLayers("sha256:0000000000000000000000000000000000000000000000000000000000000001", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001", &imagetest.Config1, "layer1", "layer2", "layer3", "layer4"),
-				imagetest.ImageWithLayers("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002", &imagetest.Config2, "layer1", "layer2", "layer3", "layer4"),
-				imagetest.ImageWithLayers("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003", nil, "layer1", "layer2", "layer3", "layer4"),
-				imagetest.ImageWithLayers("sha256:0000000000000000000000000000000000000000000000000000000000000004", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000004", nil, "layer5", "layer6", "layer7", "layer8"),
-			),
-			streams: imagetest.StreamList(
-				imagetest.Stream(registryHost, "foo", "bar", []imagev1.NamedTagEventList{
-					imagetest.Tag("latest",
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000001", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000004", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000004"),
-					),
-				}),
-			),
-			expectedImageDeletions: []string{"sha256:0000000000000000000000000000000000000000000000000000000000000004"},
-			expectedStreamUpdates:  []string{"foo/bar|sha256:0000000000000000000000000000000000000000000000000000000000000004"},
-			expectedLayerLinkDeletions: []string{
-				registryURL + "|foo/bar|layer5",
-				registryURL + "|foo/bar|layer6",
-				registryURL + "|foo/bar|layer7",
-				registryURL + "|foo/bar|layer8",
-			},
-			expectedManifestLinkDeletions: []string{registryURL + "|foo/bar|sha256:0000000000000000000000000000000000000000000000000000000000000004"},
-			expectedBlobDeletions: []string{
-				registryURL + "|sha256:0000000000000000000000000000000000000000000000000000000000000004",
-				registryURL + "|layer5",
-				registryURL + "|layer6",
-				registryURL + "|layer7",
-				registryURL + "|layer8",
-			},
-		},
-
-		{
-			name: "continue on layer link error",
-			images: imagetest.ImageList(
-				imagetest.ImageWithLayers("sha256:0000000000000000000000000000000000000000000000000000000000000001", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001", &imagetest.Config1, "layer1", "layer2", "layer3", "layer4"),
-				imagetest.ImageWithLayers("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002", &imagetest.Config2, "layer1", "layer2", "layer3", "layer4"),
-				imagetest.ImageWithLayers("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003", nil, "layer1", "layer2", "layer3", "layer4"),
-				imagetest.ImageWithLayers("sha256:0000000000000000000000000000000000000000000000000000000000000004", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000004", nil, "layer5", "layer6", "layer7", "layer8"),
-			),
-			streams: imagetest.StreamList(
-				imagetest.Stream(registryHost, "foo", "bar", []imagev1.NamedTagEventList{
-					imagetest.Tag("latest",
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000001", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000004", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000004"),
-					),
-				}),
-			),
-			layerDeleterErr:               fmt.Errorf("err"),
-			expectedImageDeletions:        []string{"sha256:0000000000000000000000000000000000000000000000000000000000000004"},
-			expectedStreamUpdates:         []string{"foo/bar|sha256:0000000000000000000000000000000000000000000000000000000000000004"},
-			expectedManifestLinkDeletions: []string{registryURL + "|foo/bar|sha256:0000000000000000000000000000000000000000000000000000000000000004"},
-			expectedBlobDeletions: []string{
-				registryURL + "|sha256:0000000000000000000000000000000000000000000000000000000000000004",
-				registryURL + "|layer5",
-				registryURL + "|layer6",
-				registryURL + "|layer7",
-				registryURL + "|layer8",
-			},
-			expectedLayerLinkDeletions: []string{
-				registryURL + "|foo/bar|layer5",
-				registryURL + "|foo/bar|layer6",
-				registryURL + "|foo/bar|layer7",
-				registryURL + "|foo/bar|layer8",
-			},
-			expectedFailures: []string{
-				registryURL + "|foo/bar|layer5|err",
-				registryURL + "|foo/bar|layer6|err",
-				registryURL + "|foo/bar|layer7|err",
-				registryURL + "|foo/bar|layer8|err",
-			},
-		},
-
-		{
-			name: "images with duplicate layers and configs",
-			images: imagetest.ImageList(
-				imagetest.ImageWithLayers("sha256:0000000000000000000000000000000000000000000000000000000000000001", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001", &imagetest.Config1, "layer1", "layer2", "layer3", "layer4"),
-				imagetest.ImageWithLayers("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002", &imagetest.Config1, "layer1", "layer2", "layer3", "layer4"),
-				imagetest.ImageWithLayers("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003", &imagetest.Config1, "layer1", "layer2", "layer3", "layer4"),
-				imagetest.ImageWithLayers("sha256:0000000000000000000000000000000000000000000000000000000000000004", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000004", &imagetest.Config2, "layer5", "layer6", "layer7", "layer8"),
-				imagetest.ImageWithLayers("sha256:0000000000000000000000000000000000000000000000000000000000000005", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000005", &imagetest.Config2, "layer5", "layer6", "layer9", "layerX"),
-			),
-			streams: imagetest.StreamList(
-				imagetest.Stream(registryHost, "foo", "bar", []imagev1.NamedTagEventList{
-					imagetest.Tag("latest",
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000001", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000004", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000004"),
-					),
-				}),
-			),
-			expectedImageDeletions: []string{"sha256:0000000000000000000000000000000000000000000000000000000000000004", "sha256:0000000000000000000000000000000000000000000000000000000000000005"},
-			expectedStreamUpdates:  []string{"foo/bar|sha256:0000000000000000000000000000000000000000000000000000000000000004"},
-			expectedLayerLinkDeletions: []string{
-				registryURL + "|foo/bar|" + imagetest.Config2,
-				registryURL + "|foo/bar|layer5",
-				registryURL + "|foo/bar|layer6",
-				registryURL + "|foo/bar|layer7",
-				registryURL + "|foo/bar|layer8",
-			},
-			expectedManifestLinkDeletions: []string{registryURL + "|foo/bar|sha256:0000000000000000000000000000000000000000000000000000000000000004"},
-			expectedBlobDeletions: []string{
-				registryURL + "|sha256:0000000000000000000000000000000000000000000000000000000000000004",
-				registryURL + "|sha256:0000000000000000000000000000000000000000000000000000000000000005",
-				registryURL + "|" + imagetest.Config2,
-				registryURL + "|layer5",
-				registryURL + "|layer6",
-				registryURL + "|layer7",
-				registryURL + "|layer8",
-				registryURL + "|layer9",
-				registryURL + "|layerX",
-			},
-		},
-
-		{
-			name: "continue on image deletion failure",
-			images: imagetest.ImageList(
-				imagetest.ImageWithLayers("sha256:0000000000000000000000000000000000000000000000000000000000000001", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001", &imagetest.Config1, "layer1", "layer2", "layer3", "layer4"),
-				imagetest.ImageWithLayers("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002", &imagetest.Config1, "layer1", "layer2", "layer3", "layer4"),
-				imagetest.ImageWithLayers("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003", &imagetest.Config1, "layer1", "layer2", "layer3", "layer4"),
-				imagetest.ImageWithLayers("sha256:0000000000000000000000000000000000000000000000000000000000000004", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000004", &imagetest.Config2, "layer5", "layer6", "layer7", "layer8"),
-				imagetest.ImageWithLayers("sha256:0000000000000000000000000000000000000000000000000000000000000005", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000005", &imagetest.Config2, "layer5", "layer6", "layer9", "layerX"),
-			),
-			streams: imagetest.StreamList(
-				imagetest.Stream(registryHost, "foo", "bar", []imagev1.NamedTagEventList{
-					imagetest.Tag("latest",
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000001", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000004", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000004"),
-					),
-				}),
-			),
-			imageDeleterErr:        fmt.Errorf("err"),
-			expectedImageDeletions: []string{"sha256:0000000000000000000000000000000000000000000000000000000000000004", "sha256:0000000000000000000000000000000000000000000000000000000000000005"},
-			expectedStreamUpdates:  []string{"foo/bar|sha256:0000000000000000000000000000000000000000000000000000000000000004"},
-			expectedLayerLinkDeletions: []string{
-				registryURL + "|foo/bar|" + imagetest.Config2,
-				registryURL + "|foo/bar|layer5",
-				registryURL + "|foo/bar|layer6",
-				registryURL + "|foo/bar|layer7",
-				registryURL + "|foo/bar|layer8",
-			},
-			expectedManifestLinkDeletions: []string{registryURL + "|foo/bar|sha256:0000000000000000000000000000000000000000000000000000000000000004"},
-			expectedBlobDeletions: []string{
-				registryURL + "|sha256:0000000000000000000000000000000000000000000000000000000000000004",
-				registryURL + "|sha256:0000000000000000000000000000000000000000000000000000000000000005",
-				registryURL + "|layer7",
-				registryURL + "|layer8",
-				registryURL + "|layer9",
-				registryURL + "|layerX",
-			},
-			expectedFailures: []string{"sha256:0000000000000000000000000000000000000000000000000000000000000004|err", "sha256:0000000000000000000000000000000000000000000000000000000000000005|err"},
-		},
-
-		{
-			name: "layers shared with young images are not pruned",
-			images: imagetest.ImageList(
-				imagetest.AgedImage("sha256:0000000000000000000000000000000000000000000000000000000000000001", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001", 43200),
-				imagetest.AgedImage("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002", 5),
-			),
-			expectedImageDeletions: []string{"sha256:0000000000000000000000000000000000000000000000000000000000000001"},
-			expectedBlobDeletions:  []string{registryURL + "|sha256:0000000000000000000000000000000000000000000000000000000000000001"},
-		},
-
-		{
-			name:               "image exceeding limits",
-			pruneOverSizeLimit: newBool(true),
-			images: imagetest.ImageList(
-				imagetest.UnmanagedImage("sha256:0000000000000000000000000000000000000000000000000000000000000000", "otherregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000", false, "", ""),
-				imagetest.SizedImage("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002", 100, nil),
-				imagetest.SizedImage("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003", 200, nil),
-			),
-			streams: imagetest.StreamList(
-				imagetest.Stream(registryHost, "foo", "bar", []imagev1.NamedTagEventList{
-					imagetest.Tag("latest",
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000000", "otherregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003"),
-					),
-				}),
-			),
-			limits: map[string][]*corev1.LimitRange{
-				"foo": imagetest.LimitList(100, 200),
-			},
-			expectedImageDeletions:        []string{"sha256:0000000000000000000000000000000000000000000000000000000000000003"},
-			expectedStreamUpdates:         []string{"foo/bar|sha256:0000000000000000000000000000000000000000000000000000000000000003"},
-			expectedManifestLinkDeletions: []string{registryURL + "|foo/bar|sha256:0000000000000000000000000000000000000000000000000000000000000003"},
-			expectedBlobDeletions:         []string{registryURL + "|sha256:0000000000000000000000000000000000000000000000000000000000000003"},
-		},
-
-		{
-			name:               "multiple images in different namespaces exceeding different limits",
-			pruneOverSizeLimit: newBool(true),
-			images: imagetest.ImageList(
-				imagetest.SizedImage("sha256:0000000000000000000000000000000000000000000000000000000000000001", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001", 100, nil),
-				imagetest.SizedImage("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002", 200, nil),
-				imagetest.SizedImage("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/bar/foo@sha256:0000000000000000000000000000000000000000000000000000000000000003", 500, nil),
-				imagetest.SizedImage("sha256:0000000000000000000000000000000000000000000000000000000000000004", registryHost+"/bar/foo@sha256:0000000000000000000000000000000000000000000000000000000000000004", 600, nil),
-			),
-			streams: imagetest.StreamList(
-				imagetest.Stream(registryHost, "foo", "bar", []imagev1.NamedTagEventList{
-					imagetest.Tag("latest",
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000001", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
-					),
-				}),
-				imagetest.Stream(registryHost, "bar", "foo", []imagev1.NamedTagEventList{
-					imagetest.Tag("latest",
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/bar/foo@sha256:0000000000000000000000000000000000000000000000000000000000000003"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000004", registryHost+"/bar/foo@sha256:0000000000000000000000000000000000000000000000000000000000000004"),
-					),
-				}),
-			),
-			limits: map[string][]*corev1.LimitRange{
-				"foo": imagetest.LimitList(150),
-				"bar": imagetest.LimitList(550),
-			},
-			expectedImageDeletions: []string{"sha256:0000000000000000000000000000000000000000000000000000000000000002", "sha256:0000000000000000000000000000000000000000000000000000000000000004"},
-			expectedStreamUpdates:  []string{"foo/bar|sha256:0000000000000000000000000000000000000000000000000000000000000002", "bar/foo|sha256:0000000000000000000000000000000000000000000000000000000000000004"},
-			expectedManifestLinkDeletions: []string{
-				registryURL + "|foo/bar|sha256:0000000000000000000000000000000000000000000000000000000000000002",
-				registryURL + "|bar/foo|sha256:0000000000000000000000000000000000000000000000000000000000000004",
-			},
-			expectedBlobDeletions: []string{
-				registryURL + "|sha256:0000000000000000000000000000000000000000000000000000000000000002",
-				registryURL + "|sha256:0000000000000000000000000000000000000000000000000000000000000004",
-			},
-		},
-
-		{
-			name:               "image within allowed limits",
-			pruneOverSizeLimit: newBool(true),
-			images: imagetest.ImageList(
-				imagetest.UnmanagedImage("sha256:0000000000000000000000000000000000000000000000000000000000000000", "otherregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000", false, "", ""),
-				imagetest.SizedImage("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002", 100, nil),
-				imagetest.SizedImage("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003", 200, nil),
-			),
-			streams: imagetest.StreamList(
-				imagetest.Stream(registryHost, "foo", "bar", []imagev1.NamedTagEventList{
-					imagetest.Tag("latest",
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000000", "otherregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003"),
-					),
-				}),
-			),
-			limits: map[string][]*corev1.LimitRange{
-				"foo": imagetest.LimitList(300),
-			},
-			expectedImageDeletions: []string{},
-			expectedStreamUpdates:  []string{},
-		},
-
-		{
-			name:               "image exceeding limits with namespace specified",
-			pruneOverSizeLimit: newBool(true),
-			namespace:          "foo",
-			images: imagetest.ImageList(
-				imagetest.UnmanagedImage("sha256:0000000000000000000000000000000000000000000000000000000000000000", "otherregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000", false, "", ""),
-				imagetest.SizedImage("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002", 100, nil),
-				imagetest.SizedImage("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003", 200, nil),
-			),
-			streams: imagetest.StreamList(
-				imagetest.Stream(registryHost, "foo", "bar", []imagev1.NamedTagEventList{
-					imagetest.Tag("latest",
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000000", "otherregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003"),
-					),
-				}),
-			),
-			limits: map[string][]*corev1.LimitRange{
-				"foo": imagetest.LimitList(100, 200),
-			},
-			expectedStreamUpdates: []string{"foo/bar|sha256:0000000000000000000000000000000000000000000000000000000000000003"},
-		},
-
-		{
-			name:               "build with ignored bad image reference",
-			pruneOverSizeLimit: newBool(true),
-			ignoreInvalidRefs:  newBool(true),
-			images: imagetest.ImageList(
-				imagetest.UnmanagedImage("sha256:0000000000000000000000000000000000000000000000000000000000000000", "otherregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000", false, "", ""),
-				imagetest.SizedImage("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002", 100, nil),
-				imagetest.SizedImage("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003", 200, nil),
-			),
-			streams: imagetest.StreamList(
-				imagetest.Stream(registryHost, "foo", "bar", []imagev1.NamedTagEventList{
-					imagetest.Tag("latest",
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000000", "otherregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003"),
-					),
-				}),
-			),
-			builds: imagetest.BuildList(
-				imagetest.Build("foo", "build1", "source", "DockerImage", "foo", registryHost+"/foo/bar@sha256:many-zeros-and-3"),
-			),
-			limits: map[string][]*corev1.LimitRange{
-				"foo": imagetest.LimitList(100, 200),
-			},
-			expectedImageDeletions:        []string{"sha256:0000000000000000000000000000000000000000000000000000000000000003"},
-			expectedManifestLinkDeletions: []string{registryURL + "|foo/bar|sha256:0000000000000000000000000000000000000000000000000000000000000003"},
-			expectedBlobDeletions:         []string{registryURL + "|sha256:0000000000000000000000000000000000000000000000000000000000000003"},
-			expectedStreamUpdates:         []string{"foo/bar|sha256:0000000000000000000000000000000000000000000000000000000000000003"},
-		},
-
-		{
-			name:                "build with bad image reference",
-			builds:              imagetest.BuildList(imagetest.Build("foo", "build1", "source", "DockerImage", "foo", registryHost+"/foo/bar@invalid-digest")),
-			expectedErrorString: fmt.Sprintf(`Build[foo/build1]: invalid container image reference "%s/foo/bar@invalid-digest": invalid reference format`, registryHost),
-		},
-
-		{
-			name: "buildconfig with bad imagestreamtag",
-			bcs:  imagetest.BCList(imagetest.BC("foo", "bc1", "source", "ImageStreamTag", "ns", "bad/tag@name")),
-			expectedErrorString: `BuildConfig[foo/bc1]: invalid ImageStreamTag reference "bad/tag@name":` +
-				` "bad/tag@name" is an image stream image, not an image stream tag`,
-		},
-
-		{
-			name:        "more parsing errors",
-			bcs:         imagetest.BCList(imagetest.BC("foo", "bc1", "source", "ImageStreamImage", "ns", "bad:isi")),
-			deployments: imagetest.DeploymentList(imagetest.Deployment("nm", "dep1", "garbage")),
-			rss:         imagetest.RSList(imagetest.RS("nm", "rs1", "I am certainly a valid reference")),
-			expectedErrorString: `[BuildConfig[foo/bc1]: invalid ImageStreamImage reference "bad:isi":` +
-				` expected exactly one @ in the isimage name "bad:isi",` +
-				` ReplicaSet[nm/rs1]: invalid container image reference "I am certainly a valid reference":` +
-				` invalid reference format]`,
-		},
-	}
-
-	// we need to install OpenShift API types to kubectl's scheme for GetReference to work
-	api.Install(scheme.Scheme)
-
-	for _, test := range tests {
-		t.Run(test.name, func(t *testing.T) {
-			options := PrunerOptions{
-				Namespace:             test.namespace,
-				AllImages:             test.allImages,
-				Images:                &test.images,
-				ImageWatcher:          watch.NewFake(),
-				Streams:               &test.streams,
-				StreamWatcher:         watch.NewFake(),
-				Pods:                  &test.pods,
-				RCs:                   &test.rcs,
-				BCs:                   &test.bcs,
-				Builds:                &test.builds,
-				DSs:                   &test.dss,
-				Deployments:           &test.deployments,
-				DCs:                   &test.dcs,
-				RSs:                   &test.rss,
-				LimitRanges:           test.limits,
-				RegistryClientFactory: FakeRegistryClientFactory,
-				RegistryURL:           &url.URL{Scheme: "https", Host: registryHost},
-			}
-			if test.pruneOverSizeLimit != nil {
-				options.PruneOverSizeLimit = test.pruneOverSizeLimit
-			} else {
-				youngerThan := time.Hour
-				tagRevisions := 3
-				if test.keepTagRevisions != nil {
-					tagRevisions = *test.keepTagRevisions
-				}
-				options.KeepYoungerThan = &youngerThan
-				options.KeepTagRevisions = &tagRevisions
-			}
-			if test.pruneRegistry != nil {
-				options.PruneRegistry = test.pruneRegistry
-			}
-			if test.ignoreInvalidRefs != nil {
-				options.IgnoreInvalidRefs = *test.ignoreInvalidRefs
-			}
-			p, err := NewPruner(options)
-			if err != nil {
-				if len(test.expectedErrorString) > 0 {
-					if a, e := err.Error(), test.expectedErrorString; a != e {
-						t.Fatalf("got unexpected error: %q != %q", a, e)
-					}
-				} else {
-					t.Fatalf("got unexpected error: %v", err)
-				}
-				return
-			} else if len(test.expectedErrorString) > 0 {
-				t.Fatalf("got no error while expecting: %s", test.expectedErrorString)
-				return
-			}
-
-			imageDeleter, imageDeleterFactory := newFakeImageDeleter(test.imageDeleterErr)
-			streamDeleter := &fakeImageStreamDeleter{err: test.imageStreamDeleterErr, invocations: sets.NewString()}
-			layerLinkDeleter := &fakeLayerLinkDeleter{err: test.layerDeleterErr, invocations: sets.NewString()}
-			blobDeleter := &fakeBlobDeleter{getError: test.blobDeleterErrorGetter, invocations: sets.NewString()}
-			manifestDeleter := &fakeManifestDeleter{err: test.manifestDeleterErr, invocations: sets.NewString()}
-
-			deletions, failures := p.Prune(imageDeleterFactory, streamDeleter, layerLinkDeleter, blobDeleter, manifestDeleter)
-
-			expectedFailures := sets.NewString(test.expectedFailures...)
-			renderedFailures := sets.NewString()
-			for _, f := range failures {
-				rendered := renderFailure(registryURL, &f)
-				if renderedFailures.Has(rendered) {
-					t.Errorf("got the following failure more than once: %v", rendered)
-					continue
-				}
-				renderedFailures.Insert(rendered)
-			}
-			for f := range renderedFailures {
-				if expectedFailures.Has(f) {
-					expectedFailures.Delete(f)
-					continue
-				}
-				t.Errorf("got unexpected failure: %v", f)
-			}
-			for f := range expectedFailures {
-				t.Errorf("the following expected failure was not returned: %v", f)
-			}
-
-			expectedImageDeletions := sets.NewString(test.expectedImageDeletions...)
-			if a, e := imageDeleter.invocations, expectedImageDeletions; !reflect.DeepEqual(a, e) {
-				t.Errorf("unexpected image deletions: %s", diff.ObjectDiff(a, e))
-			}
-
-			expectedStreamUpdates := sets.NewString(test.expectedStreamUpdates...)
-			if a, e := streamDeleter.invocations, expectedStreamUpdates; !reflect.DeepEqual(a, e) {
-				t.Errorf("unexpected stream updates: %s", diff.ObjectDiff(a, e))
-			}
-
-			expectedLayerLinkDeletions := sets.NewString(test.expectedLayerLinkDeletions...)
-			if a, e := layerLinkDeleter.invocations, expectedLayerLinkDeletions; !reflect.DeepEqual(a, e) {
-				t.Errorf("unexpected layer link deletions: %s", diff.ObjectDiff(a, e))
-			}
-
-			expectedManifestLinkDeletions := sets.NewString(test.expectedManifestLinkDeletions...)
-			if a, e := manifestDeleter.invocations, expectedManifestLinkDeletions; !reflect.DeepEqual(a, e) {
-				t.Errorf("unexpected manifest link deletions: %s", diff.ObjectDiff(a, e))
-			}
-
-			expectedBlobDeletions := sets.NewString(test.expectedBlobDeletions...)
-			if a, e := blobDeleter.invocations, expectedBlobDeletions; !reflect.DeepEqual(a, e) {
-				t.Errorf("unexpected blob deletions: %s", diff.ObjectDiff(a, e))
-			}
-
-			// TODO: shall we return deletion for each layer link unlinked from the image stream??
-			imageStreamUpdates := sets.NewString()
-			expectedAllDeletions := sets.NewString()
-			for _, s := range []sets.String{expectedImageDeletions, expectedLayerLinkDeletions, expectedBlobDeletions} {
-				expectedAllDeletions.Insert(s.List()...)
-			}
-			for _, d := range deletions {
-				rendered, isImageStreamUpdate, isManifestLinkDeletion := renderDeletion(registryURL, &d)
-				if isManifestLinkDeletion {
-					continue
-				}
-				if isImageStreamUpdate {
-					imageStreamUpdates.Insert(rendered)
-					continue
-				}
-				if expectedAllDeletions.Has(rendered) {
-					expectedAllDeletions.Delete(rendered)
-				} else {
-					t.Errorf("got unexpected deletion: %#+v (rendered: %q)", d, rendered)
-				}
-			}
-			for _, f := range failures {
-				rendered, _, _ := renderDeletion(registryURL, &Deletion{Node: f.Node, Parent: f.Parent})
-				expectedAllDeletions.Delete(rendered)
-			}
-			for del, ok := expectedAllDeletions.PopAny(); ok; del, ok = expectedAllDeletions.PopAny() {
-				t.Errorf("expected deletion %q did not happen", del)
-			}
-
-			expectedStreamUpdateNames := sets.NewString()
-			for u := range expectedStreamUpdates {
-				expectedStreamUpdateNames.Insert(regexp.MustCompile(`[@|:]`).Split(u, 2)[0])
-			}
-			if a, e := imageStreamUpdates, expectedStreamUpdateNames; !reflect.DeepEqual(a, e) {
-				t.Errorf("unexpected image stream updates in deletions: %s", diff.ObjectDiff(a, e))
-			}
-		})
-	}
-}
-
-func renderDeletion(registryURL string, deletion *Deletion) (rendered string, isImageStreamUpdate, isManifestLinkDeletion bool) {
-	switch t := deletion.Node.(type) {
-	case *imagegraph.ImageNode:
-		return t.Image.Name, false, false
-	case *imagegraph.ImageComponentNode:
-		// deleting blob
-		if deletion.Parent == nil {
-			return fmt.Sprintf("%s|%s", registryURL, t.Component), false, false
-		}
-		streamName := "unknown"
-		if sn, ok := deletion.Parent.(*imagegraph.ImageStreamNode); ok {
-			streamName = getName(sn.ImageStream)
-		}
-		return fmt.Sprintf("%s|%s|%s", registryURL, streamName, t.Component), false, t.Type == imagegraph.ImageComponentTypeManifest
-	case *imagegraph.ImageStreamNode:
-		return getName(t.ImageStream), true, false
-	}
-	return "unknown", false, false
-}
-
-func renderFailure(registryURL string, failure *Failure) string {
-	rendered, _, _ := renderDeletion(registryURL, &Deletion{Node: failure.Node, Parent: failure.Parent})
-	return rendered + "|" + failure.Err.Error()
-}
-
-func TestImageDeleter(t *testing.T) {
-	var level klog.Level
-	level.Set(fmt.Sprint(*logLevel))
-
-	tests := map[string]struct {
-		imageDeletionError error
-	}{
-		"no error": {},
-		"delete error": {
-			imageDeletionError: fmt.Errorf("foo"),
-		},
-	}
-
-	for name, test := range tests {
-		imageClient := &fakeimagev1client.FakeImageV1{Fake: &clienttesting.Fake{}}
-		imageClient.AddReactor("delete", "images", func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) {
-			return true, nil, test.imageDeletionError
-		})
-		imageDeleter := NewImageDeleter(imageClient)
-		err := imageDeleter.DeleteImage(&imagev1.Image{ObjectMeta: metav1.ObjectMeta{Name: "sha256:0000000000000000000000000000000000000000000000000000000000000002"}})
-		if test.imageDeletionError != nil {
-			if e, a := test.imageDeletionError, err; e != a {
-				t.Errorf("%s: err: expected %v, got %v", name, e, a)
-			}
-			continue
-		}
-
-		if e, a := 1, len(imageClient.Actions()); e != a {
-			t.Errorf("%s: expected %d actions, got %d: %#v", name, e, a, imageClient.Actions())
-			continue
-		}
-
-		if !imageClient.Actions()[0].Matches("delete", "images") {
-			t.Errorf("%s: expected action %s, got %v", name, "delete-images", imageClient.Actions()[0])
-		}
-	}
-}
-
-func TestLayerDeleter(t *testing.T) {
-	var level klog.Level
-	level.Set(fmt.Sprint(*logLevel))
-
-	var actions []string
-	client := fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
-		actions = append(actions, req.Method+":"+req.URL.String())
-		return &http.Response{StatusCode: http.StatusServiceUnavailable, Body: ioutil.NopCloser(bytes.NewReader([]byte{}))}, nil
-	})
-	layerLinkDeleter := NewLayerLinkDeleter()
-	layerLinkDeleter.DeleteLayerLink(client, &url.URL{Scheme: "http", Host: "registry1"}, "repo", "layer1")
-
-	if e := []string{"DELETE:http://registry1/v2/repo/blobs/layer1"}; !reflect.DeepEqual(actions, e) {
-		t.Errorf("unexpected actions: %s", diff.ObjectDiff(actions, e))
-	}
-}
-
-func TestNotFoundLayerDeleter(t *testing.T) {
-	var level klog.Level
-	level.Set(fmt.Sprint(*logLevel))
-
-	var actions []string
-	client := fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
-		actions = append(actions, req.Method+":"+req.URL.String())
-		return &http.Response{StatusCode: http.StatusNotFound, Body: ioutil.NopCloser(bytes.NewReader([]byte{}))}, nil
-	})
-	layerLinkDeleter := NewLayerLinkDeleter()
-	layerLinkDeleter.DeleteLayerLink(client, &url.URL{Scheme: "https", Host: "registry1"}, "repo", "layer1")
-
-	if e := []string{"DELETE:https://registry1/v2/repo/blobs/layer1"}; !reflect.DeepEqual(actions, e) {
-		t.Errorf("unexpected actions: %s", diff.ObjectDiff(actions, e))
-	}
-}
-
-func TestRegistryPruning(t *testing.T) {
-	var level klog.Level
-	level.Set(fmt.Sprint(*logLevel))
-
-	tests := []struct {
-		name                       string
-		images                     imagev1.ImageList
-		streams                    imagev1.ImageStreamList
-		expectedLayerLinkDeletions sets.String
-		expectedBlobDeletions      sets.String
-		expectedManifestDeletions  sets.String
-		pruneRegistry              bool
-		pingErr                    error
-	}{
-		{
-			name:          "layers unique to id1 pruned",
-			pruneRegistry: true,
-			images: imagetest.ImageList(
-				imagetest.ImageWithLayers("sha256:0000000000000000000000000000000000000000000000000000000000000001", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001", &imagetest.Config1, "layer1", "layer2", "layer3", "layer4"),
-				imagetest.ImageWithLayers("sha256:0000000000000000000000000000000000000000000000000000000000000002", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002", &imagetest.Config2, "layer3", "layer4", "layer5", "layer6"),
-			),
-			streams: imagetest.StreamList(
-				imagetest.Stream("registry1.io", "foo", "bar", []imagev1.NamedTagEventList{
-					imagetest.Tag("latest",
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000001", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001"),
-					),
-				}),
-				imagetest.Stream("registry1.io", "foo", "other", []imagev1.NamedTagEventList{
-					imagetest.Tag("latest",
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", "registry1.io/foo/other@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
-					),
-				}),
-			),
-			expectedLayerLinkDeletions: sets.NewString(
-				"https://registry1.io|foo/bar|"+imagetest.Config1,
-				"https://registry1.io|foo/bar|layer1",
-				"https://registry1.io|foo/bar|layer2",
-			),
-			expectedBlobDeletions: sets.NewString(
-				"https://registry1.io|sha256:0000000000000000000000000000000000000000000000000000000000000001",
-				"https://registry1.io|"+imagetest.Config1,
-				"https://registry1.io|layer1",
-				"https://registry1.io|layer2",
-			),
-			expectedManifestDeletions: sets.NewString(
-				"https://registry1.io|foo/bar|sha256:0000000000000000000000000000000000000000000000000000000000000001",
-			),
-		},
-
-		{
-			name:          "no pruning when no images are pruned",
-			pruneRegistry: true,
-			images: imagetest.ImageList(
-				imagetest.ImageWithLayers("sha256:0000000000000000000000000000000000000000000000000000000000000001", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001", &imagetest.Config1, "layer1", "layer2", "layer3", "layer4"),
-			),
-			streams: imagetest.StreamList(
-				imagetest.Stream("registry1.io", "foo", "bar", []imagev1.NamedTagEventList{
-					imagetest.Tag("latest",
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000001", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001"),
-					),
-				}),
-			),
-			expectedLayerLinkDeletions: sets.NewString(),
-			expectedBlobDeletions:      sets.NewString(),
-			expectedManifestDeletions:  sets.NewString(),
-		},
-
-		{
-			name:          "blobs pruned when streams have already been deleted",
-			pruneRegistry: true,
-			images: imagetest.ImageList(
-				imagetest.ImageWithLayers("sha256:0000000000000000000000000000000000000000000000000000000000000001", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001", &imagetest.Config1, "layer1", "layer2", "layer3", "layer4"),
-				imagetest.ImageWithLayers("sha256:0000000000000000000000000000000000000000000000000000000000000002", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002", &imagetest.Config2, "layer3", "layer4", "layer5", "layer6"),
-			),
-			expectedLayerLinkDeletions: sets.NewString(),
-			expectedBlobDeletions: sets.NewString(
-				"https://registry1.io|sha256:0000000000000000000000000000000000000000000000000000000000000001",
-				"https://registry1.io|sha256:0000000000000000000000000000000000000000000000000000000000000002",
-				"https://registry1.io|"+imagetest.Config1,
-				"https://registry1.io|"+imagetest.Config2,
-				"https://registry1.io|layer1",
-				"https://registry1.io|layer2",
-				"https://registry1.io|layer3",
-				"https://registry1.io|layer4",
-				"https://registry1.io|layer5",
-				"https://registry1.io|layer6",
-			),
-			expectedManifestDeletions: sets.NewString(),
-		},
-
-		{
-			name:          "config used as a layer",
-			pruneRegistry: true,
-			images: imagetest.ImageList(
-				imagetest.ImageWithLayers("sha256:0000000000000000000000000000000000000000000000000000000000000001", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001", &imagetest.Config1, "layer1", "layer2", "layer3", imagetest.Config1),
-				imagetest.ImageWithLayers("sha256:0000000000000000000000000000000000000000000000000000000000000002", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002", &imagetest.Config2, "layer3", "layer4", "layer5", imagetest.Config1),
-				imagetest.ImageWithLayers("sha256:0000000000000000000000000000000000000000000000000000000000000003", "registry1.io/foo/other@sha256:0000000000000000000000000000000000000000000000000000000000000003", nil, "layer3", "layer4", "layer6", imagetest.Config1),
-			),
-			streams: imagetest.StreamList(
-				imagetest.Stream("registry1.io", "foo", "bar", []imagev1.NamedTagEventList{
-					imagetest.Tag("latest",
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000001", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001"),
-					),
-				}),
-				imagetest.Stream("registry1.io", "foo", "other", []imagev1.NamedTagEventList{
-					imagetest.Tag("latest",
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000003", "registry1.io/foo/other@sha256:0000000000000000000000000000000000000000000000000000000000000003"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
-					),
-				}),
-			),
-			expectedLayerLinkDeletions: sets.NewString(
-				"https://registry1.io|foo/bar|layer1",
-				"https://registry1.io|foo/bar|layer2",
-			),
-			expectedBlobDeletions: sets.NewString(
-				"https://registry1.io|sha256:0000000000000000000000000000000000000000000000000000000000000001",
-				"https://registry1.io|layer1",
-				"https://registry1.io|layer2",
-			),
-			expectedManifestDeletions: sets.NewString(
-				"https://registry1.io|foo/bar|sha256:0000000000000000000000000000000000000000000000000000000000000001",
-			),
-		},
-
-		{
-			name:          "config used as a layer, but leave registry alone",
-			pruneRegistry: false,
-			images: imagetest.ImageList(
-				imagetest.ImageWithLayers("sha256:0000000000000000000000000000000000000000000000000000000000000001", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001", &imagetest.Config1, "layer1", "layer2", "layer3", imagetest.Config1),
-				imagetest.ImageWithLayers("sha256:0000000000000000000000000000000000000000000000000000000000000002", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002", &imagetest.Config2, "layer3", "layer4", "layer5", imagetest.Config1),
-				imagetest.ImageWithLayers("sha256:0000000000000000000000000000000000000000000000000000000000000003", "registry1.io/foo/other@sha256:0000000000000000000000000000000000000000000000000000000000000003", nil, "layer3", "layer4", "layer6", imagetest.Config1),
-			),
-			streams: imagetest.StreamList(
-				imagetest.Stream("registry1.io", "foo", "bar", []imagev1.NamedTagEventList{
-					imagetest.Tag("latest",
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000001", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001"),
-					),
-				}),
-				imagetest.Stream("registry1.io", "foo", "other", []imagev1.NamedTagEventList{
-					imagetest.Tag("latest",
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000003", "registry1.io/foo/other@sha256:0000000000000000000000000000000000000000000000000000000000000003"),
-						imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
-					),
-				}),
-			),
-			expectedLayerLinkDeletions: sets.NewString(),
-			expectedBlobDeletions:      sets.NewString(),
-			expectedManifestDeletions:  sets.NewString(),
-		},
-	}
-
-	for _, test := range tests {
-		t.Run(test.name, func(t *testing.T) {
-			keepYoungerThan := 60 * time.Minute
-			keepTagRevisions := 1
-			options := PrunerOptions{
-				KeepYoungerThan:       &keepYoungerThan,
-				KeepTagRevisions:      &keepTagRevisions,
-				PruneRegistry:         &test.pruneRegistry,
-				Images:                &test.images,
-				ImageWatcher:          watch.NewFake(),
-				Streams:               &test.streams,
-				StreamWatcher:         watch.NewFake(),
-				Pods:                  &corev1.PodList{},
-				RCs:                   &corev1.ReplicationControllerList{},
-				BCs:                   &buildv1.BuildConfigList{},
-				Builds:                &buildv1.BuildList{},
-				DSs:                   &kappsv1.DaemonSetList{},
-				Deployments:           &kappsv1.DeploymentList{},
-				DCs:                   &appsv1.DeploymentConfigList{},
-				RSs:                   &kappsv1.ReplicaSetList{},
-				RegistryClientFactory: FakeRegistryClientFactory,
-				RegistryURL:           &url.URL{Scheme: "https", Host: "registry1.io"},
-			}
-			p, err := NewPruner(options)
-			if err != nil {
-				t.Fatalf("unexpected error: %v", err)
-			}
-
-			_, imageDeleterFactory := newFakeImageDeleter(nil)
-			streamDeleter := &fakeImageStreamDeleter{invocations: sets.NewString()}
-			layerLinkDeleter := &fakeLayerLinkDeleter{invocations: sets.NewString()}
-			blobDeleter := &fakeBlobDeleter{invocations: sets.NewString()}
-			manifestDeleter := &fakeManifestDeleter{invocations: sets.NewString()}
-
-			p.Prune(imageDeleterFactory, streamDeleter, layerLinkDeleter, blobDeleter, manifestDeleter)
-
-			if a, e := layerLinkDeleter.invocations, test.expectedLayerLinkDeletions; !reflect.DeepEqual(a, e) {
-				t.Errorf("unexpected layer link deletions: %s", diff.ObjectDiff(a, e))
-			}
-			if a, e := blobDeleter.invocations, test.expectedBlobDeletions; !reflect.DeepEqual(a, e) {
-				t.Errorf("unexpected blob deletions: %s", diff.ObjectDiff(a, e))
-			}
-			if a, e := manifestDeleter.invocations, test.expectedManifestDeletions; !reflect.DeepEqual(a, e) {
-				t.Errorf("unexpected manifest deletions: %s", diff.ObjectDiff(a, e))
-			}
-		})
-	}
-}
-
-func newBool(a bool) *bool {
-	r := new(bool)
-	*r = a
-	return r
-}
-
-func TestImageWithStrongAndWeakRefsIsNotPruned(t *testing.T) {
-	var level klog.Level
-	level.Set(fmt.Sprint(*logLevel))
-
-	images := imagetest.ImageList(
-		imagetest.AgedImage("0000000000000000000000000000000000000000000000000000000000000001", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001", 1540),
-		imagetest.AgedImage("0000000000000000000000000000000000000000000000000000000000000002", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002", 1540),
-		imagetest.AgedImage("0000000000000000000000000000000000000000000000000000000000000003", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003", 1540),
-	)
-	streams := imagetest.StreamList(
-		imagetest.Stream("registry1", "foo", "bar", []imagev1.NamedTagEventList{
-			imagetest.Tag("latest",
-				imagetest.TagEvent("0000000000000000000000000000000000000000000000000000000000000003", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003"),
-				imagetest.TagEvent("0000000000000000000000000000000000000000000000000000000000000002", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
-				imagetest.TagEvent("0000000000000000000000000000000000000000000000000000000000000001", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001"),
-			),
-			imagetest.Tag("strong",
-				imagetest.TagEvent("0000000000000000000000000000000000000000000000000000000000000001", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001"),
-			),
-		}),
-	)
-	pods := imagetest.PodList()
-	rcs := imagetest.RCList()
-	bcs := imagetest.BCList()
-	builds := imagetest.BuildList()
-	dss := imagetest.DSList()
-	deployments := imagetest.DeploymentList()
-	dcs := imagetest.DCList()
-	rss := imagetest.RSList()
-
-	options := PrunerOptions{
-		Images:        &images,
-		ImageWatcher:  watch.NewFake(),
-		Streams:       &streams,
-		StreamWatcher: watch.NewFake(),
-		Pods:          &pods,
-		RCs:           &rcs,
-		BCs:           &bcs,
-		Builds:        &builds,
-		DSs:           &dss,
-		Deployments:   &deployments,
-		DCs:           &dcs,
-		RSs:           &rss,
-	}
-	keepYoungerThan := 24 * time.Hour
-	keepTagRevisions := 2
-	options.KeepYoungerThan = &keepYoungerThan
-	options.KeepTagRevisions = &keepTagRevisions
-	p, err := NewPruner(options)
-	if err != nil {
-		t.Fatalf("unexpected error: %v", err)
-	}
-
-	imageDeleter, imageDeleterFactory := newFakeImageDeleter(nil)
-	streamDeleter := &fakeImageStreamDeleter{invocations: sets.NewString()}
-	layerLinkDeleter := &fakeLayerLinkDeleter{invocations: sets.NewString()}
-	blobDeleter := &fakeBlobDeleter{invocations: sets.NewString()}
-	manifestDeleter := &fakeManifestDeleter{invocations: sets.NewString()}
-
-	deletions, failures := p.Prune(imageDeleterFactory, streamDeleter, layerLinkDeleter, blobDeleter, manifestDeleter)
-	if len(failures) != 0 {
-		t.Errorf("got unexpected failures: %#+v", failures)
-	}
-
-	if len(deletions) > 0 {
-		t.Fatalf("got unexpected deletions: %#+v", deletions)
-	}
-
-	if imageDeleter.invocations.Len() > 0 {
-		t.Fatalf("unexpected imageDeleter invocations: %v", imageDeleter.invocations)
-	}
-	if streamDeleter.invocations.Len() > 0 {
-		t.Fatalf("unexpected streamDeleter invocations: %v", streamDeleter.invocations)
-	}
-	if layerLinkDeleter.invocations.Len() > 0 {
-		t.Fatalf("unexpected layerLinkDeleter invocations: %v", layerLinkDeleter.invocations)
-	}
-	if blobDeleter.invocations.Len() > 0 {
-		t.Fatalf("unexpected blobDeleter invocations: %v", blobDeleter.invocations)
-	}
-	if manifestDeleter.invocations.Len() > 0 {
-		t.Fatalf("unexpected manifestDeleter invocations: %v", manifestDeleter.invocations)
-	}
-}
-
-func TestImageIsPrunable(t *testing.T) {
-	g := genericgraph.New()
-	imageNode := imagegraph.EnsureImageNode(g, &imagev1.Image{ObjectMeta: metav1.ObjectMeta{Name: "myImage"}})
-	streamNode := imagegraph.EnsureImageStreamNode(g, &imagev1.ImageStream{ObjectMeta: metav1.ObjectMeta{Name: "myStream"}})
-	g.AddEdge(streamNode, imageNode, ReferencedImageEdgeKind)
-	g.AddEdge(streamNode, imageNode, WeakReferencedImageEdgeKind)
-
-	if imageIsPrunable(g, imageNode.(*imagegraph.ImageNode), pruneAlgorithm{}) {
-		t.Fatalf("Image is prunable although it should not")
-	}
-}
-
-func TestPrunerGetNextJob(t *testing.T) {
-	var level klog.Level
-	level.Set(fmt.Sprint(*logLevel))
-
-	klog.V(2).Infof("debug")
-	algo := pruneAlgorithm{
-		keepYoungerThan: time.Now(),
-	}
-	p := &pruner{algorithm: algo, processedImages: make(map[*imagegraph.ImageNode]*Job)}
-	images := imagetest.ImageList(
-		imagetest.AgedImage("sha256:0000000000000000000000000000000000000000000000000000000000000003", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003", 1, "layer1"),
-		imagetest.AgedImage("sha256:0000000000000000000000000000000000000000000000000000000000000002", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002", 2, "layer1", "layer2"),
-		imagetest.AgedImage("sha256:0000000000000000000000000000000000000000000000000000000000000001", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001", 3, "Layer1", "Layer2", "Layer3"),
-		imagetest.AgedImage("sha256:0000000000000000000000000000000000000000000000000000000000000013", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000013", 4, "Layer1", "LayeR2", "LayeR3"),
-		imagetest.AgedImage("sha256:0000000000000000000000000000000000000000000000000000000000000012", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000012", 5, "LayeR1", "LayeR2"),
-		imagetest.AgedImage("sha256:0000000000000000000000000000000000000000000000000000000000000011", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000011", 6, "layer1", "Layer2", "LAYER3", "LAYER4"),
-		imagetest.AgedImage("sha256:0000000000000000000000000000000000000000000000000000000000000010", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000010", 7, "layer1", "layer2", "layer3", "layer4"),
-	)
-	p.g = genericgraph.New()
-	err := p.addImagesToGraph(&images)
-	if err != nil {
-		t.Fatalf("failed to add images: %v", err)
-	}
-
-	is := images.Items
-	imageStreams := imagetest.StreamList(
-		imagetest.Stream("example.com", "foo", "bar", []imagev1.NamedTagEventList{
-			imagetest.Tag("latest",
-				imagetest.TagEvent(is[3].Name, is[3].DockerImageReference),
-				imagetest.TagEvent(is[4].Name, is[4].DockerImageReference),
-				imagetest.TagEvent(is[5].Name, is[5].DockerImageReference))}),
-		imagetest.Stream("example.com", "foo", "baz", []imagev1.NamedTagEventList{
-			imagetest.Tag("devel",
-				imagetest.TagEvent(is[3].Name, is[3].DockerImageReference),
-				imagetest.TagEvent(is[2].Name, is[2].DockerImageReference),
-				imagetest.TagEvent(is[1].Name, is[1].DockerImageReference)),
-			imagetest.Tag("prod",
-				imagetest.TagEvent(is[2].Name, is[2].DockerImageReference))}))
-	if err := p.addImageStreamsToGraph(&imageStreams, nil); err != nil {
-		t.Fatalf("failed to add image streams: %v", err)
-	}
-
-	imageNodes := getImageNodes(p.g.Nodes())
-	if len(imageNodes) == 0 {
-		t.Fatalf("not images nodes")
-	}
-	prunable := calculatePrunableImages(p.g, imageNodes, algo)
-	sort.Sort(byLayerCountAndAge(prunable))
-	p.queue = makeQueue(prunable)
-
-	checkQueue := func(desc string, expected ...*imagev1.Image) {
-		for i, item := 0, p.queue; i < len(expected) || item != nil; i++ {
-			if i >= len(expected) {
-				t.Errorf("[%s] unexpected image at #%d: %s", desc, i, item.node.Image.Name)
-			} else if item == nil {
-				t.Errorf("[%s] expected image %q not found at #%d", desc, expected[i].Name, i)
-			} else if item.node.Image.Name != expected[i].Name {
-				t.Errorf("[%s] unexpected image at #%d: %s != %s", desc, i, item.node.Image.Name, expected[i].Name)
-			}
-			if item != nil {
-				item = item.next
-			}
-		}
-		if t.Failed() {
-			t.FailNow()
-		}
-	}
-
-	/* layerrefs: layer1:4, Layer1:2, LayeR1:1, layer2:2, Layer2:2, LayeR2:2,
-	 * layer3:1, Layer3:1, LayeR3:1, LAYER3:1, layer4:1, LAYER4:1 */
-	checkQueue("initial state", &is[6], &is[5], &is[3], &is[2], &is[4], &is[1], &is[0])
-	job := expectBlockedOrJob(t, p, "pop first", false, &is[6], []string{"layer4", "layer3"})(p.getNextJob())
-	p.processedImages[job.Image] = job
-	imgnd6 := job.Image
-
-	/* layerrefs: layer1:3, Layer1:2, LayeR1:1, layer2:1, Layer2:2, LayeR2:2,
-	 * layer3:0, Layer3:1, LayeR3:1, LAYER3:1, layer4:0, LAYER4:1 */
-	checkQueue("1 removed", &is[5], &is[3], &is[2], &is[4], &is[1], &is[0])
-	job = expectBlockedOrJob(t, p, "pop second", false, &is[5], []string{"LAYER3", "LAYER4"})(p.getNextJob())
-	p.processedImages[job.Image] = job
-	imgnd5 := job.Image
-
-	/* layerrefs: layer1:2, Layer1:2, LayeR1:1, layer2:1, Layer2:1, LayeR2:2,
-	 * Layer3:1, LayeR3:1, LAYER3:0, LAYER4:0 */
-	checkQueue("2 removed", &is[3], &is[2], &is[4], &is[1], &is[0])
-	job = expectBlockedOrJob(t, p, "pop third", false, &is[3], []string{"LayeR3"})(p.getNextJob())
-	p.processedImages[job.Image] = job
-	imgnd3 := job.Image
-
-	// layerrefs: layer1:2, Layer1:1, LayeR1:1, layer2:1, Layer2:1, LayeR2:1, Layer3:1, LayeR3:0
-	checkQueue("3 removed", &is[2], &is[4], &is[1], &is[0])
-	// all the remaining images are blocked now except for the is[0]
-	job = expectBlockedOrJob(t, p, "pop fourth", false, &is[0], nil)(p.getNextJob())
-	p.processedImages[job.Image] = job
-	imgnd0 := job.Image
-
-	// layerrefs: layer1:1, Layer1:1, LayeR1:1, layer2:1, Layer2:1, LayeR2:1, Layer3:1
-	checkQueue("4 removed and blocked", &is[2], &is[4], &is[1])
-	// all the remaining images are blocked now
-	expectBlockedOrJob(t, p, "blocked", true, nil, nil)(p.getNextJob())
-
-	// layerrefs: layer1:1, Layer1:2, LayeR1:1, layer2:1, Layer2:1, LayeR2:1, Layer3:1
-	checkQueue("3 to go", &is[2], &is[4], &is[1])
-	// unblock one of the images
-	p.g.RemoveNode(imgnd3)
-	job = expectBlockedOrJob(t, p, "pop fifth", false, &is[4],
-		[]string{"LayeR1", "LayeR2"})(p.getNextJob())
-	p.processedImages[job.Image] = job
-	imgnd4 := job.Image
-
-	// layerrefs: layer1:1, Layer1:2, LayeR1:0, layer2:1, Layer2:1, LayeR2:0, Layer3:1
-	checkQueue("2 to go", &is[2], &is[1])
-	expectBlockedOrJob(t, p, "blocked with two items#1", true, nil, nil)(p.getNextJob())
-	checkQueue("still 2 to go", &is[2], &is[1])
-
-	p.g.RemoveNode(imgnd0)
-	delete(p.processedImages, imgnd0)
-	expectBlockedOrJob(t, p, "blocked with two items#2", true, nil, nil)(p.getNextJob())
-	p.g.RemoveNode(imgnd6)
-	delete(p.processedImages, imgnd6)
-	expectBlockedOrJob(t, p, "blocked with two items#3", true, nil, nil)(p.getNextJob())
-	p.g.RemoveNode(imgnd4)
-	delete(p.processedImages, imgnd4)
-	expectBlockedOrJob(t, p, "blocked with two items#4", true, nil, nil)(p.getNextJob())
-	p.g.RemoveNode(imgnd5)
-	delete(p.processedImages, imgnd5)
-
-	job = expectBlockedOrJob(t, p, "pop sixth", false, &is[2],
-		[]string{"Layer1", "Layer2", "Layer3"})(p.getNextJob())
-	p.processedImages[job.Image] = job
-
-	// layerrefs: layer1:1, Layer1:0, layer2:1, Layer2:0, Layer3:0
-	checkQueue("1 to go", &is[1])
-	job = expectBlockedOrJob(t, p, "pop last", false, &is[1],
-		[]string{"layer1", "layer2"})(p.getNextJob())
-	p.processedImages[job.Image] = job
-
-	// layerrefs: layer1:0, layer2:0
-	checkQueue("queue empty")
-	expectBlockedOrJob(t, p, "empty", false, nil, nil)(p.getNextJob())
-}
-
-func expectBlockedOrJob(
-	t *testing.T,
-	p *pruner,
-	desc string,
-	blocked bool,
-	image *imagev1.Image,
-	layers []string,
-) func(job *Job, blocked bool) *Job {
-	return func(job *Job, b bool) *Job {
-		if b != blocked {
-			t.Fatalf("[%s] unexpected blocked: %t != %t", desc, b, blocked)
-		}
-
-		if blocked {
-			return job
-		}
-
-		if image == nil && job != nil {
-			t.Fatalf("[%s] got unexpected job %#+v", desc, job)
-		}
-		if image != nil && job == nil {
-			t.Fatalf("[%s] got nil instead of job", desc)
-		}
-		if job == nil {
-			return nil
-		}
-
-		if a, e := job.Image.Image.Name, image.Name; a != e {
-			t.Errorf("[%s] unexpected image in job: %s != %s", desc, a, e)
-		}
-
-		expLayers := sets.NewString(imagegraph.EnsureImageComponentManifestNode(
-			p.g, job.Image.Image.Name).(*imagegraph.ImageComponentNode).String())
-		for _, l := range layers {
-			expLayers.Insert(imagegraph.EnsureImageComponentLayerNode(
-				p.g, l).(*imagegraph.ImageComponentNode).String())
-		}
-		actLayers := sets.NewString()
-		for c, ret := range job.Components {
-			if ret.PrunableGlobally {
-				actLayers.Insert(c.String())
-			}
-		}
-		if a, e := actLayers, expLayers; !reflect.DeepEqual(a, e) {
-			t.Errorf("[%s] unexpected image components: %s", desc, diff.ObjectDiff(a.List(), e.List()))
-		}
-
-		if t.Failed() {
-			t.FailNow()
-		}
-
-		return job
-	}
-}
-
-func TestChangeImageStreamsWhilePruning(t *testing.T) {
-	t.Skip("failed after commenting out")
-	var level klog.Level
-	level.Set(fmt.Sprint(*logLevel))
-
-	images := imagetest.ImageList(
-		imagetest.AgedImage("sha256:0000000000000000000000000000000000000000000000000000000000000001", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001", 5),
-		imagetest.AgedImage("sha256:0000000000000000000000000000000000000000000000000000000000000002", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002", 4),
-		imagetest.AgedImage("sha256:0000000000000000000000000000000000000000000000000000000000000003", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003", 3),
-		imagetest.AgedImage("sha256:0000000000000000000000000000000000000000000000000000000000000004", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003", 2),
-		imagetest.AgedImage("sha256:0000000000000000000000000000000000000000000000000000000000000005", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003", 1),
-	)
-
-	streams := imagetest.StreamList(imagetest.Stream("registry1", "foo", "bar", []imagev1.NamedTagEventList{}))
-	streamWatcher := watch.NewFake()
-	pods := imagetest.PodList()
-	rcs := imagetest.RCList()
-	bcs := imagetest.BCList()
-	builds := imagetest.BuildList()
-	dss := imagetest.DSList()
-	deployments := imagetest.DeploymentList()
-	dcs := imagetest.DCList()
-	rss := imagetest.RSList()
-
-	options := PrunerOptions{
-		Images:                &images,
-		ImageWatcher:          watch.NewFake(),
-		Streams:               &streams,
-		StreamWatcher:         streamWatcher,
-		Pods:                  &pods,
-		RCs:                   &rcs,
-		BCs:                   &bcs,
-		Builds:                &builds,
-		DSs:                   &dss,
-		Deployments:           &deployments,
-		DCs:                   &dcs,
-		RSs:                   &rss,
-		RegistryClientFactory: FakeRegistryClientFactory,
-		RegistryURL:           &url.URL{Scheme: "https", Host: "registry1.io"},
-		NumWorkers:            1,
-	}
-	keepYoungerThan := 30 * time.Second
-	keepTagRevisions := 2
-	options.KeepYoungerThan = &keepYoungerThan
-	options.KeepTagRevisions = &keepTagRevisions
-	p, err := NewPruner(options)
-	if err != nil {
-		t.Fatalf("unexpected error: %v", err)
-	}
-
-	pruneFinished := make(chan struct{})
-	deletions, failures := []Deletion{}, []Failure{}
-	imageDeleter, imageDeleterFactory := newBlockingImageDeleter(t)
-
-	// run the pruning loop in a go routine
-	go func() {
-		deletions, failures = p.Prune(
-			imageDeleterFactory,
-			&fakeImageStreamDeleter{invocations: sets.NewString()},
-			&fakeLayerLinkDeleter{invocations: sets.NewString()},
-			&fakeBlobDeleter{invocations: sets.NewString()},
-			&fakeManifestDeleter{invocations: sets.NewString()},
-		)
-		if len(failures) != 0 {
-			t.Errorf("got unexpected failures: %#+v", failures)
-		}
-		close(pruneFinished)
-	}()
-
-	expectedImageDeletions := sets.NewString()
-	expectedBlobDeletions := sets.NewString()
-
-	img := imageDeleter.waitForRequest()
-	if a, e := img.Name, images.Items[0].Name; a != e {
-		t.Fatalf("got unexpected image deletion request: %s != %s", a, e)
-	}
-	expectedImageDeletions.Insert(images.Items[0].Name)
-	expectedBlobDeletions.Insert("registry1|" + images.Items[0].Name)
-
-	// let the pruner wait for reply and meanwhile reference an image with a new image stream
-	stream := imagetest.Stream("registry1", "foo", "new", []imagev1.NamedTagEventList{
-		imagetest.Tag("latest",
-			imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", "registry1/foo/new@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
-		)})
-	streamWatcher.Add(&stream)
-	imageDeleter.unblock()
-
-	// the pruner shall skip the newly referenced image
-	img = imageDeleter.waitForRequest()
-	if a, e := img.Name, images.Items[2].Name; a != e {
-		t.Fatalf("got unexpected image deletion request: %s != %s", a, e)
-	}
-	expectedImageDeletions.Insert(images.Items[2].Name)
-	expectedBlobDeletions.Insert("registry1|" + images.Items[2].Name)
-
-	// now lets modify the existing image stream to reference some more images
-	stream = imagetest.Stream("registry1", "foo", "bar", []imagev1.NamedTagEventList{
-		imagetest.Tag("latest",
-			imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000000", "registry1/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
-			imagetest.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000004", "registry1/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000004"),
-		)})
-	streamWatcher.Modify(&stream)
-	imageDeleter.unblock()
-
-	// the pruner shall skip the newly referenced image
-	img = imageDeleter.waitForRequest()
-	if a, e := img.Name, images.Items[4].Name; a != e {
-		t.Fatalf("got unexpected image deletion request: %s != %s", a, e)
-	}
-	expectedImageDeletions.Insert(images.Items[4].Name)
-	expectedBlobDeletions.Insert("registry1|" + images.Items[4].Name)
-	imageDeleter.unblock()
-
-	// no more images - wait for the pruner to finish
-	select {
-	case <-pruneFinished:
-	case <-time.After(time.Second):
-		t.Errorf("tester: timeout while waiting for pruner to finish")
-	}
-
-	if a, e := imageDeleter.d.invocations, expectedImageDeletions; !reflect.DeepEqual(a, e) {
-		t.Errorf("unexpected image deletions: %s", diff.ObjectDiff(a, e))
-	}
-
-	expectedAllDeletions := sets.NewString(
-		append(expectedImageDeletions.List(), expectedBlobDeletions.List()...)...)
-	for _, d := range deletions {
-		rendered, _, isManifestLinkDeletion := renderDeletion("registry1", &d)
-		if isManifestLinkDeletion {
-			// TODO: update tests to count and verify the number of manifest link deletions
-			continue
-		}
-		if expectedAllDeletions.Has(rendered) {
-			expectedAllDeletions.Delete(rendered)
-		} else {
-			t.Errorf("got unexpected deletion: %#+v (rendered: %q)", d, rendered)
-		}
-	}
-	for del, ok := expectedAllDeletions.PopAny(); ok; del, ok = expectedAllDeletions.PopAny() {
-		t.Errorf("expected deletion %q did not happen", del)
-	}
-}
-
-func streamListToClient(list *imagev1.ImageStreamList) imagev1client.ImageStreamsGetter {
-	streams := make([]runtime.Object, 0, len(list.Items))
-	for i := range list.Items {
-		streams = append(streams, &list.Items[i])
-	}
-
-	return &fakeimagev1client.FakeImageV1{Fake: &(fakeimageclient.NewSimpleClientset(streams...).Fake)}
-}
-
-func keepTagRevisions(n int) *int {
-	return &n
-}
-
-type fakeImageDeleter struct {
-	mutex       sync.Mutex
-	invocations sets.String
-	err         error
-}
-
-var _ ImageDeleter = &fakeImageDeleter{}
-
-func (p *fakeImageDeleter) DeleteImage(image *imagev1.Image) error {
-	p.mutex.Lock()
-	defer p.mutex.Unlock()
-	p.invocations.Insert(image.Name)
-	return p.err
-}
-
-func newFakeImageDeleter(err error) (*fakeImageDeleter, ImagePrunerFactoryFunc) {
-	deleter := &fakeImageDeleter{
-		err:         err,
-		invocations: sets.NewString(),
-	}
-	return deleter, func() (ImageDeleter, error) {
-		return deleter, nil
-	}
-}
-
-type blockingImageDeleter struct {
-	t        *testing.T
-	d        *fakeImageDeleter
-	requests chan *imagev1.Image
-	reply    chan struct{}
-}
-
-func (bid *blockingImageDeleter) DeleteImage(img *imagev1.Image) error {
-	bid.requests <- img
-	select {
-	case <-bid.reply:
-	case <-time.After(time.Second):
-		bid.t.Fatalf("worker: timeout while waiting for image deletion confirmation")
-	}
-	return bid.d.DeleteImage(img)
-}
-
-func (bid *blockingImageDeleter) waitForRequest() *imagev1.Image {
-	select {
-	case img := <-bid.requests:
-		return img
-	case <-time.After(time.Second):
-		bid.t.Fatalf("tester: timeout while waiting on worker's request")
-		return nil
-	}
-}
-
-func (bid *blockingImageDeleter) unblock() {
-	bid.reply <- struct{}{}
-}
-
-func newBlockingImageDeleter(t *testing.T) (*blockingImageDeleter, ImagePrunerFactoryFunc) {
-	deleter, _ := newFakeImageDeleter(nil)
-	blocking := blockingImageDeleter{
-		t:        t,
-		d:        deleter,
-		requests: make(chan *imagev1.Image),
-		reply:    make(chan struct{}),
-	}
-	return &blocking, func() (ImageDeleter, error) {
-		return &blocking, nil
-	}
-}
-
-type fakeImageStreamDeleter struct {
-	mutex        sync.Mutex
-	invocations  sets.String
-	err          error
-	streamImages map[string][]string
-	streamTags   map[string][]string
-}
-
-var _ ImageStreamDeleter = &fakeImageStreamDeleter{}
-
-func (p *fakeImageStreamDeleter) GetImageStream(stream *imagev1.ImageStream) (*imagev1.ImageStream, error) {
-	p.mutex.Lock()
-	defer p.mutex.Unlock()
-	if p.streamImages == nil {
-		p.streamImages = make(map[string][]string)
-	}
-	if p.streamTags == nil {
-		p.streamTags = make(map[string][]string)
-	}
-	for _, tag := range stream.Status.Tags {
-		streamName := fmt.Sprintf("%s/%s", stream.Namespace, stream.Name)
-		p.streamTags[streamName] = append(p.streamTags[streamName], tag.Tag)
-
-		for _, tagEvent := range tag.Items {
-			p.streamImages[streamName] = append(p.streamImages[streamName], tagEvent.Image)
-		}
-	}
-	return stream, p.err
-}
-
-func (p *fakeImageStreamDeleter) UpdateImageStream(stream *imagev1.ImageStream) (*imagev1.ImageStream, error) {
-	streamImages := make(map[string]struct{})
-	streamTags := make(map[string]struct{})
-
-	for _, tag := range stream.Status.Tags {
-		streamTags[tag.Tag] = struct{}{}
-		for _, tagEvent := range tag.Items {
-			streamImages[tagEvent.Image] = struct{}{}
-		}
-	}
-
-	streamName := fmt.Sprintf("%s/%s", stream.Namespace, stream.Name)
-
-	for _, tag := range p.streamTags[streamName] {
-		if _, ok := streamTags[tag]; !ok {
-			p.invocations.Insert(fmt.Sprintf("%s:%s", streamName, tag))
-		}
-	}
-
-	for _, imageName := range p.streamImages[streamName] {
-		if _, ok := streamImages[imageName]; !ok {
-			p.invocations.Insert(fmt.Sprintf("%s|%s", streamName, imageName))
-		}
-	}
-
-	return stream, p.err
-}
-
-func (p *fakeImageStreamDeleter) NotifyImageStreamPrune(stream *imagev1.ImageStream, updatedTags []string, deletedTags []string) {
-	return
-}
-
-type errorForSHA func(dgst string) error
-
-type fakeBlobDeleter struct {
-	mutex       sync.Mutex
-	invocations sets.String
-	getError    errorForSHA
-}
-
-var _ BlobDeleter = &fakeBlobDeleter{}
-
-func (p *fakeBlobDeleter) DeleteBlob(registryClient *http.Client, registryURL *url.URL, blob string) error {
-	p.mutex.Lock()
-	defer p.mutex.Unlock()
-	p.invocations.Insert(fmt.Sprintf("%s|%s", registryURL.String(), blob))
-	if p.getError == nil {
-		return nil
-	}
-	return p.getError(blob)
-}
-
-type fakeLayerLinkDeleter struct {
-	mutex       sync.Mutex
-	invocations sets.String
-	err         error
-}
-
-var _ LayerLinkDeleter = &fakeLayerLinkDeleter{}
-
-func (p *fakeLayerLinkDeleter) DeleteLayerLink(registryClient *http.Client, registryURL *url.URL, repo, layer string) error {
-	p.mutex.Lock()
-	defer p.mutex.Unlock()
-	p.invocations.Insert(fmt.Sprintf("%s|%s|%s", registryURL.String(), repo, layer))
-	return p.err
-}
-
-type fakeManifestDeleter struct {
-	mutex       sync.Mutex
-	invocations sets.String
-	err         error
-}
-
-var _ ManifestDeleter = &fakeManifestDeleter{}
-
-func (p *fakeManifestDeleter) DeleteManifest(registryClient *http.Client, registryURL *url.URL, repo, manifest string) error {
-	p.mutex.Lock()
-	defer p.mutex.Unlock()
-	p.invocations.Insert(fmt.Sprintf("%s|%s|%s", registryURL.String(), repo, manifest))
-	return p.err
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/imageprune/worker.go b/vendor/github.com/openshift/oc/pkg/cli/admin/prune/imageprune/worker.go
deleted file mode 100644
index aeffd5c45797..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/imageprune/worker.go
+++ /dev/null
@@ -1,359 +0,0 @@
-package imageprune
-
-import (
-	"fmt"
-	"net/http"
-	"net/url"
-
-	gonum "github.com/gonum/graph"
-	"k8s.io/klog"
-
-	kerrapi "k8s.io/apimachinery/pkg/api/errors"
-
-	imagegraph "github.com/openshift/oc/pkg/helpers/graph/imagegraph/nodes"
-)
-
-// ComponentRetention knows all the places where image component needs to be pruned (e.g. global blob store
-// and repositories).
-type ComponentRetention struct {
-	ReferencingStreams map[*imagegraph.ImageStreamNode]bool
-	PrunableGlobally   bool
-}
-
-// ComponentRetentions contains prunable locations for all the components of an image.
-type ComponentRetentions map[*imagegraph.ImageComponentNode]*ComponentRetention
-
-func (cr ComponentRetentions) add(comp *imagegraph.ImageComponentNode) *ComponentRetention {
-	if _, ok := cr[comp]; ok {
-		return cr[comp]
-	}
-	cr[comp] = &ComponentRetention{
-		ReferencingStreams: make(map[*imagegraph.ImageStreamNode]bool),
-	}
-	return cr[comp]
-}
-
-// Add adds component marked as (not) prunable in the blob store.
-func (cr ComponentRetentions) Add(
-	comp *imagegraph.ImageComponentNode,
-	globallyPrunable bool,
-) *ComponentRetention {
-	r := cr.add(comp)
-	r.PrunableGlobally = globallyPrunable
-	return r
-}
-
-// AddReferencingStreams adds a repository location as (not) prunable to the given component.
-func (cr ComponentRetentions) AddReferencingStreams(
-	comp *imagegraph.ImageComponentNode,
-	prunable bool,
-	streams ...*imagegraph.ImageStreamNode,
-) *ComponentRetention {
-	r := cr.add(comp)
-	for _, n := range streams {
-		r.ReferencingStreams[n] = prunable
-	}
-	return r
-}
-
-// Job is an image pruning job for the Worker. It contains information about single image and related
-// components.
-type Job struct {
-	Image      *imagegraph.ImageNode
-	Components ComponentRetentions
-}
-
-func enumerateImageComponents(
-	crs ComponentRetentions,
-	compType *imagegraph.ImageComponentType,
-	withPreserved bool,
-	handler func(comp *imagegraph.ImageComponentNode, prunable bool),
-) {
-	for c, retention := range crs {
-		if !withPreserved && !retention.PrunableGlobally {
-			continue
-		}
-		if compType != nil && c.Type != *compType {
-			continue
-		}
-
-		handler(c, retention.PrunableGlobally)
-	}
-}
-
-func enumerateImageStreamComponents(
-	crs ComponentRetentions,
-	compType *imagegraph.ImageComponentType,
-	withPreserved bool,
-	handler func(comp *imagegraph.ImageComponentNode, stream *imagegraph.ImageStreamNode, prunable bool),
-) {
-	for c, cr := range crs {
-		if compType != nil && c.Type != *compType {
-			continue
-		}
-
-		for s, prunable := range cr.ReferencingStreams {
-			if withPreserved || prunable {
-				handler(c, s, prunable)
-			}
-		}
-	}
-}
-
-// Deletion denotes a single deletion of a resource as a result of processing a job. If Parent is nil, the
-// deletion occured in the global blob store. Otherwise the parent identities repository location.
-type Deletion struct {
-	Node   gonum.Node
-	Parent gonum.Node
-}
-
-// Failure denotes a pruning failure of a single object.
-type Failure struct {
-	Node   gonum.Node
-	Parent gonum.Node
-	Err    error
-}
-
-var _ error = &Failure{}
-
-func (pf *Failure) Error() string { return pf.String() }
-
-func (pf *Failure) String() string {
-	if pf.Node == nil {
-		return fmt.Sprintf("failed to prune blob: %v", pf.Err)
-	}
-
-	switch t := pf.Node.(type) {
-	case *imagegraph.ImageStreamNode:
-		return fmt.Sprintf("failed to update ImageStream %s: %v", getName(t.ImageStream), pf.Err)
-	case *imagegraph.ImageNode:
-		return fmt.Sprintf("failed to delete Image %s: %v", t.Image.DockerImageReference, pf.Err)
-	case *imagegraph.ImageComponentNode:
-		detail := ""
-		if isn, ok := pf.Parent.(*imagegraph.ImageStreamNode); ok {
-			detail = " in repository " + getName(isn.ImageStream)
-		}
-		switch t.Type {
-		case imagegraph.ImageComponentTypeConfig:
-			return fmt.Sprintf("failed to delete image config link %s%s: %v", t.Component, detail, pf.Err)
-		case imagegraph.ImageComponentTypeLayer:
-			return fmt.Sprintf("failed to delete image layer link %s%s: %v", t.Component, detail, pf.Err)
-		case imagegraph.ImageComponentTypeManifest:
-			return fmt.Sprintf("failed to delete image manifest link %s%s: %v", t.Component, detail, pf.Err)
-		default:
-			return fmt.Sprintf("failed to delete %s%s: %v", t.String(), detail, pf.Err)
-		}
-	default:
-		return fmt.Sprintf("failed to delete %v: %v", t, pf.Err)
-	}
-}
-
-// JobResult is a result of job's processing.
-type JobResult struct {
-	Job       *Job
-	Deletions []Deletion
-	Failures  []Failure
-}
-
-func (jr *JobResult) update(deletions []Deletion, failures []Failure) *JobResult {
-	jr.Deletions = append(jr.Deletions, deletions...)
-	jr.Failures = append(jr.Failures, failures...)
-	return jr
-}
-
-// Worker knows how to prune image and its related components.
-type Worker interface {
-	// Run is supposed to be run as a go-rutine. It terminates when nil is received through the in channel.
-	Run(in <-chan *Job, out chan<- JobResult)
-}
-
-type worker struct {
-	algorithm       pruneAlgorithm
-	registryClient  *http.Client
-	registryURL     *url.URL
-	imagePruner     ImageDeleter
-	streamPruner    ImageStreamDeleter
-	layerLinkPruner LayerLinkDeleter
-	blobPruner      BlobDeleter
-	manifestPruner  ManifestDeleter
-}
-
-var _ Worker = &worker{}
-
-// NewWorker creates a new pruning worker.
-func NewWorker(
-	algorithm pruneAlgorithm,
-	registryClientFactory RegistryClientFactoryFunc,
-	registryURL *url.URL,
-	imagePrunerFactory ImagePrunerFactoryFunc,
-	streamPruner ImageStreamDeleter,
-	layerLinkPruner LayerLinkDeleter,
-	blobPruner BlobDeleter,
-	manifestPruner ManifestDeleter,
-) (Worker, error) {
-	client, err := registryClientFactory()
-	if err != nil {
-		return nil, err
-	}
-
-	imagePruner, err := imagePrunerFactory()
-	if err != nil {
-		return nil, err
-	}
-
-	return &worker{
-		algorithm:       algorithm,
-		registryClient:  client,
-		registryURL:     registryURL,
-		imagePruner:     imagePruner,
-		streamPruner:    streamPruner,
-		layerLinkPruner: layerLinkPruner,
-		blobPruner:      blobPruner,
-		manifestPruner:  manifestPruner,
-	}, nil
-}
-
-func (w *worker) Run(in <-chan *Job, out chan<- JobResult) {
-	for {
-		job, more := <-in
-		if !more {
-			return
-		}
-		out <- *w.prune(job)
-	}
-}
-
-func (w *worker) prune(job *Job) *JobResult {
-	res := &JobResult{Job: job}
-
-	blobDeletions, blobFailures := []Deletion{}, []Failure{}
-
-	if w.algorithm.pruneRegistry {
-		// NOTE: not found errors are treated as success
-		res.update(pruneImageComponents(
-			w.registryClient,
-			w.registryURL,
-			job.Components,
-			w.layerLinkPruner,
-		))
-
-		blobDeletions, blobFailures = pruneBlobs(
-			w.registryClient,
-			w.registryURL,
-			job.Components,
-			w.blobPruner,
-		)
-		res.update(blobDeletions, blobFailures)
-
-		res.update(pruneManifests(
-			w.registryClient,
-			w.registryURL,
-			job.Components,
-			w.manifestPruner,
-		))
-	}
-
-	// Keep the image object when its blobs could not be deleted and the image is ostensibly (we cannot be
-	// sure unless we ask the registry for blob's existence) still complete. Thanks to the preservation, the
-	// blobs can be identified and deleted next time.
-	if len(blobDeletions) > 0 || len(blobFailures) == 0 {
-		res.update(pruneImages(job.Image, w.imagePruner))
-	}
-
-	return res
-}
-
-// pruneImages invokes imagePruner.DeleteImage with each image that is prunable.
-func pruneImages(
-	imageNode *imagegraph.ImageNode,
-	imagePruner ImageDeleter,
-) (deletions []Deletion, failures []Failure) {
-	err := imagePruner.DeleteImage(imageNode.Image)
-	if err != nil {
-		if kerrapi.IsNotFound(err) {
-			klog.V(2).Infof("Skipping image %s that no longer exists", imageNode.Image.Name)
-		} else {
-			failures = append(failures, Failure{Node: imageNode, Err: err})
-		}
-	} else {
-		deletions = append(deletions, Deletion{Node: imageNode})
-	}
-
-	return
-}
-
-// pruneImageComponents invokes layerLinkDeleter.DeleteLayerLink for each repository layer link to
-// be deleted from the registry.
-func pruneImageComponents(
-	registryClient *http.Client,
-	registryURL *url.URL,
-	crs ComponentRetentions,
-	layerLinkDeleter LayerLinkDeleter,
-) (deletions []Deletion, failures []Failure) {
-	enumerateImageStreamComponents(crs, nil, false, func(
-		comp *imagegraph.ImageComponentNode,
-		stream *imagegraph.ImageStreamNode,
-		_ bool,
-	) {
-		if comp.Type == imagegraph.ImageComponentTypeManifest {
-			return
-		}
-		streamName := getName(stream.ImageStream)
-		klog.V(4).Infof("Pruning repository %s/%s: %s", registryURL.Host, streamName, comp.Describe())
-		err := layerLinkDeleter.DeleteLayerLink(registryClient, registryURL, streamName, comp.Component)
-		if err != nil {
-			failures = append(failures, Failure{Node: comp, Parent: stream, Err: err})
-		} else {
-			deletions = append(deletions, Deletion{Node: comp, Parent: stream})
-		}
-	})
-
-	return
-}
-
-// pruneBlobs invokes blobPruner.DeleteBlob for each blob to be deleted from the registry.
-func pruneBlobs(
-	registryClient *http.Client,
-	registryURL *url.URL,
-	crs ComponentRetentions,
-	blobPruner BlobDeleter,
-) (deletions []Deletion, failures []Failure) {
-	enumerateImageComponents(crs, nil, false, func(comp *imagegraph.ImageComponentNode, prunable bool) {
-		err := blobPruner.DeleteBlob(registryClient, registryURL, comp.Component)
-		if err != nil {
-			failures = append(failures, Failure{Node: comp, Err: err})
-		} else {
-			deletions = append(deletions, Deletion{Node: comp})
-		}
-	})
-
-	return
-}
-
-// pruneManifests invokes manifestPruner.DeleteManifest for each repository
-// manifest to be deleted from the registry.
-func pruneManifests(
-	registryClient *http.Client,
-	registryURL *url.URL,
-	crs ComponentRetentions,
-	manifestPruner ManifestDeleter,
-) (deletions []Deletion, failures []Failure) {
-	manifestType := imagegraph.ImageComponentTypeManifest
-	enumerateImageStreamComponents(crs, &manifestType, false, func(
-		manifestNode *imagegraph.ImageComponentNode,
-		stream *imagegraph.ImageStreamNode,
-		_ bool,
-	) {
-		repoName := getName(stream.ImageStream)
-
-		klog.V(4).Infof("Pruning manifest %s in the repository %s/%s", manifestNode.Component, registryURL.Host, repoName)
-		err := manifestPruner.DeleteManifest(registryClient, registryURL, repoName, manifestNode.Component)
-		if err != nil {
-			failures = append(failures, Failure{Node: manifestNode, Parent: stream, Err: err})
-		} else {
-			deletions = append(deletions, Deletion{Node: manifestNode, Parent: stream})
-		}
-	})
-
-	return
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/images/images.go b/vendor/github.com/openshift/oc/pkg/cli/admin/prune/images/images.go
deleted file mode 100644
index ab923c0040e9..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/images/images.go
+++ /dev/null
@@ -1,932 +0,0 @@
-package images
-
-import (
-	"context"
-	"crypto/x509"
-	"encoding/json"
-	"errors"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"net/http"
-	"net/url"
-	"os"
-	"regexp"
-	"strings"
-	"text/tabwriter"
-	"time"
-
-	gonum "github.com/gonum/graph"
-	"github.com/spf13/cobra"
-	"k8s.io/klog"
-
-	corev1 "k8s.io/api/core/v1"
-	kerrors "k8s.io/apimachinery/pkg/api/errors"
-	"k8s.io/apimachinery/pkg/api/meta"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/runtime"
-	kutilerrors "k8s.io/apimachinery/pkg/util/errors"
-	knet "k8s.io/apimachinery/pkg/util/net"
-	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
-	apimachineryversion "k8s.io/apimachinery/pkg/version"
-	"k8s.io/apimachinery/pkg/watch"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	"k8s.io/client-go/discovery"
-	"k8s.io/client-go/kubernetes"
-	restclient "k8s.io/client-go/rest"
-	kclientcmd "k8s.io/client-go/tools/clientcmd"
-	"k8s.io/client-go/tools/pager"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-
-	imagev1 "github.com/openshift/api/image/v1"
-	appsv1client "github.com/openshift/client-go/apps/clientset/versioned/typed/apps/v1"
-	buildv1client "github.com/openshift/client-go/build/clientset/versioned/typed/build/v1"
-	imagev1client "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1"
-	"github.com/openshift/library-go/pkg/network/networkutils"
-	"github.com/openshift/oc/pkg/cli/admin/prune/imageprune"
-	imagegraph "github.com/openshift/oc/pkg/helpers/graph/imagegraph/nodes"
-	"github.com/openshift/oc/pkg/version"
-)
-
-// PruneImagesRecommendedName is the recommended command name
-const PruneImagesRecommendedName = "images"
-
-var errNoToken = errors.New("you must use a client config with a token")
-
-const registryURLNotReachable = `(?:operation|connection) timed out|no such host`
-
-var (
-	imagesLongDesc = templates.LongDesc(`
-		Remove image stream tags, images, and image layers by age or usage
-
-		This command removes historical image stream tags, unused images, and unreferenced image
-		layers from the integrated registry. By default, all images are considered as candidates.
-		The command can be instructed to consider only images that have been directly pushed to the
-		registry by supplying --all=false flag.
-
-		By default, the prune operation performs a dry run making no changes to internal registry. A
-		--confirm flag is needed for changes to be effective. The flag requires a valid route to the
-		integrated container image registry. If this command is run outside of the cluster network, the route
-		needs to be provided using --registry-url.
-
-		Only a user with a cluster role %s or higher who is logged-in will be able to actually
-		delete the images.
-
-		If the registry is secured with a certificate signed by a self-signed root certificate
-		authority other than the one present in current user's config, you may need to specify it
-		using --certificate-authority flag.
-
-		Insecure connection is allowed in the following cases unless certificate-authority is
-		specified:
-
-		 1. --force-insecure is given
-		 2. provided registry-url is prefixed with http://
-		 3. registry url is a private or link-local address
-		 4. user's config allows for insecure connection (the user logged in to the cluster with
-			--insecure-skip-tls-verify or allowed for insecure connection)`)
-
-	imagesExample = templates.Examples(`
-	  # See, what the prune command would delete if only images and their referrers were more than an hour old
-	  # and obsoleted by 3 newer revisions under the same tag were considered.
-	  %[1]s %[2]s --keep-tag-revisions=3 --keep-younger-than=60m
-
-	  # To actually perform the prune operation, the confirm flag must be appended
-	  %[1]s %[2]s --keep-tag-revisions=3 --keep-younger-than=60m --confirm
-
-	  # See, what the prune command would delete if we're interested in removing images
-	  # exceeding currently set limit ranges ('openshift.io/Image')
-	  %[1]s %[2]s --prune-over-size-limit
-
-	  # To actually perform the prune operation, the confirm flag must be appended
-	  %[1]s %[2]s --prune-over-size-limit --confirm
-
-	  # Force the insecure http protocol with the particular registry host name
-	  %[1]s %[2]s --registry-url=http://registry.example.org --confirm
-
-	  # Force a secure connection with a custom certificate authority to the particular registry host name
-	  %[1]s %[2]s --registry-url=registry.example.org --certificate-authority=/path/to/custom/ca.crt --confirm`)
-)
-
-var (
-	defaultKeepYoungerThan         = 60 * time.Minute
-	defaultKeepTagRevisions        = 3
-	defaultPruneImageOverSizeLimit = false
-	defaultPruneRegistry           = true
-)
-
-// PruneImagesOptions holds all the required options for pruning images.
-type PruneImagesOptions struct {
-	Confirm             bool
-	KeepYoungerThan     *time.Duration
-	KeepTagRevisions    *int
-	PruneOverSizeLimit  *bool
-	AllImages           *bool
-	CABundle            string
-	RegistryUrlOverride string
-	Namespace           string
-	ForceInsecure       bool
-	PruneRegistry       *bool
-	IgnoreInvalidRefs   bool
-
-	ClientConfig       *restclient.Config
-	AppsClient         appsv1client.AppsV1Interface
-	BuildClient        buildv1client.BuildV1Interface
-	ImageClient        imagev1client.ImageV1Interface
-	ImageClientFactory func() (imagev1client.ImageV1Interface, error)
-	DiscoveryClient    discovery.DiscoveryInterface
-	KubeClient         kubernetes.Interface
-	Timeout            time.Duration
-	Out                io.Writer
-	ErrOut             io.Writer
-}
-
-// NewCmdPruneImages implements the OpenShift cli prune images command.
-func NewCmdPruneImages(f kcmdutil.Factory, parentName, name string, streams genericclioptions.IOStreams) *cobra.Command {
-	allImages := true
-	opts := &PruneImagesOptions{
-		Confirm:            false,
-		KeepYoungerThan:    &defaultKeepYoungerThan,
-		KeepTagRevisions:   &defaultKeepTagRevisions,
-		PruneOverSizeLimit: &defaultPruneImageOverSizeLimit,
-		PruneRegistry:      &defaultPruneRegistry,
-		AllImages:          &allImages,
-	}
-
-	cmd := &cobra.Command{
-		Use:   name,
-		Short: "Remove unreferenced images",
-		Long:  fmt.Sprintf(imagesLongDesc, "system:image-pruner"),
-
-		Example: fmt.Sprintf(imagesExample, parentName, name),
-
-		Run: func(cmd *cobra.Command, args []string) {
-			kcmdutil.CheckErr(opts.Complete(f, cmd, args, streams.Out))
-			kcmdutil.CheckErr(opts.Validate())
-			kcmdutil.CheckErr(opts.Run())
-		},
-	}
-
-	cmd.Flags().BoolVar(&opts.Confirm, "confirm", opts.Confirm, "If true, specify that image pruning should proceed. Defaults to false, displaying what would be deleted but not actually deleting anything. Requires a valid route to the integrated container image registry (see --registry-url).")
-	cmd.Flags().BoolVar(opts.AllImages, "all", *opts.AllImages, "Include images that were imported from external registries as candidates for pruning.  If pruned, all the mirrored objects associated with them will also be removed from the integrated registry.")
-	cmd.Flags().DurationVar(opts.KeepYoungerThan, "keep-younger-than", *opts.KeepYoungerThan, "Specify the minimum age of an image and its referrers for it to be considered a candidate for pruning.")
-	cmd.Flags().IntVar(opts.KeepTagRevisions, "keep-tag-revisions", *opts.KeepTagRevisions, "Specify the number of image revisions for a tag in an image stream that will be preserved.")
-	cmd.Flags().BoolVar(opts.PruneOverSizeLimit, "prune-over-size-limit", *opts.PruneOverSizeLimit, "Specify if images which are exceeding LimitRanges (see 'openshift.io/Image'), specified in the same namespace, should be considered for pruning. This flag cannot be combined with --keep-younger-than nor --keep-tag-revisions.")
-	cmd.Flags().StringVar(&opts.CABundle, "certificate-authority", opts.CABundle, "The path to a certificate authority bundle to use when communicating with the managed container image registries. Defaults to the certificate authority data from the current user's config file. It cannot be used together with --force-insecure.")
-	cmd.Flags().StringVar(&opts.RegistryUrlOverride, "registry-url", opts.RegistryUrlOverride, "The address to use when contacting the registry, instead of using the default value. This is useful if you can't resolve or reach the registry (e.g.; the default is a cluster-internal URL) but you do have an alternative route that works. Particular transport protocol can be enforced using '://' prefix.")
-	cmd.Flags().BoolVar(&opts.ForceInsecure, "force-insecure", opts.ForceInsecure, "If true, allow an insecure connection to the container image registry that is hosted via HTTP or has an invalid HTTPS certificate. Whenever possible, use --certificate-authority instead of this dangerous option.")
-	cmd.Flags().BoolVar(opts.PruneRegistry, "prune-registry", *opts.PruneRegistry, "If false, the prune operation will clean up image API objects, but the none of the associated content in the registry is removed.  Note, if only image API objects are cleaned up through use of this flag, the only means for subsequently cleaning up registry data corresponding to those image API objects is to employ the 'hard prune' administrative task.")
-	cmd.Flags().BoolVar(&opts.IgnoreInvalidRefs, "ignore-invalid-refs", opts.IgnoreInvalidRefs, "If true, the pruning process will ignore all errors while parsing image references. This means that the pruning process will ignore the intended connection between the object and the referenced image. As a result an image may be incorrectly deleted as unused.")
-
-	return cmd
-}
-
-// Complete turns a partially defined PruneImagesOptions into a solvent structure
-// which can be validated and used for pruning images.
-func (o *PruneImagesOptions) Complete(f kcmdutil.Factory, cmd *cobra.Command, args []string, out io.Writer) error {
-	if len(args) > 0 {
-		return kcmdutil.UsageErrorf(cmd, "no arguments are allowed to this command")
-	}
-
-	if !cmd.Flags().Lookup("prune-over-size-limit").Changed {
-		o.PruneOverSizeLimit = nil
-	} else {
-		if !cmd.Flags().Lookup("keep-younger-than").Changed {
-			o.KeepYoungerThan = nil
-		}
-		if !cmd.Flags().Lookup("keep-tag-revisions").Changed {
-			o.KeepTagRevisions = nil
-		}
-	}
-
-	o.Namespace = metav1.NamespaceAll
-	if cmd.Flags().Lookup("namespace").Changed {
-		var err error
-		o.Namespace, _, err = f.ToRawKubeConfigLoader().Namespace()
-		if err != nil {
-			return err
-		}
-	}
-	o.Out = out
-	o.ErrOut = os.Stderr
-
-	var err error
-	o.ClientConfig, err = f.ToRESTConfig()
-	if err != nil {
-		return err
-	}
-	if len(o.ClientConfig.BearerToken) == 0 {
-		return errNoToken
-	}
-	o.KubeClient, err = kubernetes.NewForConfig(o.ClientConfig)
-	if err != nil {
-		return err
-	}
-	o.AppsClient, err = appsv1client.NewForConfig(o.ClientConfig)
-	if err != nil {
-		return err
-	}
-	o.BuildClient, err = buildv1client.NewForConfig(o.ClientConfig)
-	if err != nil {
-		return err
-	}
-	o.ImageClient, err = imagev1client.NewForConfig(o.ClientConfig)
-	if err != nil {
-		return err
-	}
-	o.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(o.ClientConfig)
-	if err != nil {
-		return err
-	}
-
-	o.ImageClientFactory = getImageClientFactory(f)
-
-	o.Timeout = o.ClientConfig.Timeout
-	if o.Timeout == 0 {
-		o.Timeout = time.Duration(10 * time.Second)
-	}
-
-	return nil
-}
-
-// Validate ensures that a PruneImagesOptions is valid and can be used to execute pruning.
-func (o PruneImagesOptions) Validate() error {
-	if o.PruneOverSizeLimit != nil && (o.KeepYoungerThan != nil || o.KeepTagRevisions != nil) {
-		return fmt.Errorf("--prune-over-size-limit cannot be specified with --keep-tag-revisions nor --keep-younger-than")
-	}
-	if o.KeepYoungerThan != nil && *o.KeepYoungerThan < 0 {
-		return fmt.Errorf("--keep-younger-than must be greater than or equal to 0")
-	}
-	if o.KeepTagRevisions != nil && *o.KeepTagRevisions < 0 {
-		return fmt.Errorf("--keep-tag-revisions must be greater than or equal to 0")
-	}
-	if err := validateRegistryURL(o.RegistryUrlOverride); len(o.RegistryUrlOverride) > 0 && err != nil {
-		return fmt.Errorf("invalid --registry-url flag: %v", err)
-	}
-	if o.ForceInsecure && len(o.CABundle) > 0 {
-		return fmt.Errorf("--certificate-authority cannot be specified with --force-insecure")
-	}
-	if len(o.CABundle) > 0 && strings.HasPrefix(o.RegistryUrlOverride, "http://") {
-		return fmt.Errorf("--cerificate-authority cannot be specified for insecure http protocol")
-	}
-	return nil
-}
-
-var errNoRegistryURLPathAllowed = errors.New("no path after [:] is allowed")
-var errNoRegistryURLQueryAllowed = errors.New("no query arguments are allowed after [:]")
-var errRegistryURLHostEmpty = errors.New("no host name specified")
-
-// validateRegistryURL returns error if the given input is not a valid registry URL. The url may be prefixed
-// with http:// or https:// schema. It may not contain any path or query after the host:[port].
-func validateRegistryURL(registryURL string) error {
-	var (
-		u     *url.URL
-		err   error
-		parts = strings.SplitN(registryURL, "://", 2)
-	)
-
-	switch len(parts) {
-	case 2:
-		u, err = url.Parse(registryURL)
-		if err != nil {
-			return err
-		}
-		switch u.Scheme {
-		case "http", "https":
-		default:
-			return fmt.Errorf("unsupported scheme: %s", u.Scheme)
-		}
-	case 1:
-		u, err = url.Parse("https://" + registryURL)
-		if err != nil {
-			return err
-		}
-	}
-	if len(u.Path) > 0 && u.Path != "/" {
-		return errNoRegistryURLPathAllowed
-	}
-	if len(u.RawQuery) > 0 {
-		return errNoRegistryURLQueryAllowed
-	}
-	if len(u.Host) == 0 {
-		return errRegistryURLHostEmpty
-	}
-	return nil
-}
-
-// Run contains all the necessary functionality for the OpenShift cli prune images command.
-func (o PruneImagesOptions) Run() error {
-	allPods, err := o.KubeClient.CoreV1().Pods(o.Namespace).List(metav1.ListOptions{})
-	if err != nil {
-		return err
-	}
-
-	allRCs, err := o.KubeClient.CoreV1().ReplicationControllers(o.Namespace).List(metav1.ListOptions{})
-	if err != nil {
-		return err
-	}
-
-	allBCs, err := o.BuildClient.BuildConfigs(o.Namespace).List(metav1.ListOptions{})
-	// We need to tolerate 'not found' errors for buildConfigs since they may be disabled in Atomic
-	if err != nil && !kerrors.IsNotFound(err) {
-		return err
-	}
-
-	allBuilds, err := o.BuildClient.Builds(o.Namespace).List(metav1.ListOptions{})
-	// We need to tolerate 'not found' errors for builds since they may be disabled in Atomic
-	if err != nil && !kerrors.IsNotFound(err) {
-		return err
-	}
-
-	allDSs, err := o.KubeClient.AppsV1().DaemonSets(o.Namespace).List(metav1.ListOptions{})
-	if err != nil {
-		// TODO: remove in future (3.9) release
-		if !kerrors.IsForbidden(err) {
-			return err
-		}
-		fmt.Fprintf(o.ErrOut, "Failed to list daemonsets: %v\n - * Make sure to update clusterRoleBindings.\n", err)
-	}
-
-	allDeployments, err := o.KubeClient.AppsV1().Deployments(o.Namespace).List(metav1.ListOptions{})
-	if err != nil {
-		// TODO: remove in future (3.9) release
-		if !kerrors.IsForbidden(err) {
-			return err
-		}
-		fmt.Fprintf(o.ErrOut, "Failed to list deployments: %v\n - * Make sure to update clusterRoleBindings.\n", err)
-	}
-
-	allDCs, err := o.AppsClient.DeploymentConfigs(o.Namespace).List(metav1.ListOptions{})
-	if err != nil {
-		return err
-	}
-
-	allRSs, err := o.KubeClient.AppsV1().ReplicaSets(o.Namespace).List(metav1.ListOptions{})
-	if err != nil {
-		// TODO: remove in future (3.9) release
-		if !kerrors.IsForbidden(err) {
-			return err
-		}
-		fmt.Fprintf(o.ErrOut, "Failed to list replicasets: %v\n - * Make sure to update clusterRoleBindings.\n", err)
-	}
-
-	limitRangesList, err := o.KubeClient.CoreV1().LimitRanges(o.Namespace).List(metav1.ListOptions{})
-	if err != nil {
-		return err
-	}
-	limitRangesMap := make(map[string][]*corev1.LimitRange)
-	for i := range limitRangesList.Items {
-		limit := limitRangesList.Items[i]
-		limits, ok := limitRangesMap[limit.Namespace]
-		if !ok {
-			limits = []*corev1.LimitRange{}
-		}
-		limits = append(limits, &limit)
-		limitRangesMap[limit.Namespace] = limits
-	}
-
-	ctx := context.TODO()
-	allImagesUntyped, err := pager.New(func(ctx context.Context, opts metav1.ListOptions) (runtime.Object, error) {
-		return o.ImageClient.Images().List(opts)
-	}).List(ctx, metav1.ListOptions{})
-	if err != nil {
-		return err
-	}
-	allImages := &imagev1.ImageList{}
-	if err := meta.EachListItem(allImagesUntyped, func(obj runtime.Object) error {
-		allImages.Items = append(allImages.Items, *obj.(*imagev1.Image))
-		return nil
-	}); err != nil {
-		return err
-	}
-
-	imageWatcher, err := o.ImageClient.Images().Watch(metav1.ListOptions{})
-	if err != nil {
-		utilruntime.HandleError(fmt.Errorf("internal error: failed to watch for images: %v"+
-			"\n - image changes will not be detected", err))
-		imageWatcher = watch.NewFake()
-	}
-
-	imageStreamWatcher, err := o.ImageClient.ImageStreams(o.Namespace).Watch(metav1.ListOptions{})
-	if err != nil {
-		utilruntime.HandleError(fmt.Errorf("internal error: failed to watch for image streams: %v"+
-			"\n - image stream changes will not be detected", err))
-		imageStreamWatcher = watch.NewFake()
-	}
-	defer imageStreamWatcher.Stop()
-
-	allStreams, err := o.ImageClient.ImageStreams(o.Namespace).List(metav1.ListOptions{})
-	if err != nil {
-		return err
-	}
-
-	var (
-		registryHost          = o.RegistryUrlOverride
-		registryClientFactory imageprune.RegistryClientFactoryFunc
-		registryClient        *http.Client
-		registryPinger        imageprune.RegistryPinger
-	)
-
-	if o.Confirm {
-		if len(registryHost) == 0 {
-			registryHost, err = imageprune.DetermineRegistryHost(allImages, allStreams)
-			if err != nil {
-				return fmt.Errorf("unable to determine registry: %v", err)
-			}
-		}
-
-		insecure := o.ForceInsecure
-		if !insecure && len(o.CABundle) == 0 {
-			insecure = o.ClientConfig.TLSClientConfig.Insecure || networkutils.IsPrivateAddress(registryHost) ||
-				strings.HasPrefix(registryHost, "http://")
-		}
-
-		registryClientFactory = func() (*http.Client, error) {
-			return getRegistryClient(o.ClientConfig, o.CABundle, insecure)
-		}
-		registryClient, err = registryClientFactory()
-		if err != nil {
-			return err
-		}
-
-		registryPinger = &imageprune.DefaultRegistryPinger{
-			Client:   registryClient,
-			Insecure: insecure,
-		}
-	} else {
-		registryPinger = &imageprune.DryRunRegistryPinger{}
-		registryClientFactory = imageprune.FakeRegistryClientFactory
-	}
-
-	// verify the registy connection now to avoid future surprises
-	registryURL, err := registryPinger.Ping(registryHost)
-	if err != nil {
-		if len(o.RegistryUrlOverride) == 0 && regexp.MustCompile(registryURLNotReachable).MatchString(err.Error()) {
-			err = fmt.Errorf("%s\n* Please provide a reachable route to the integrated registry using --registry-url.", err.Error())
-		}
-		return fmt.Errorf("failed to ping registry %s: %v", registryHost, err)
-	}
-
-	options := imageprune.PrunerOptions{
-		KeepYoungerThan:       o.KeepYoungerThan,
-		KeepTagRevisions:      o.KeepTagRevisions,
-		PruneOverSizeLimit:    o.PruneOverSizeLimit,
-		AllImages:             o.AllImages,
-		Images:                allImages,
-		ImageWatcher:          imageWatcher,
-		Streams:               allStreams,
-		StreamWatcher:         imageStreamWatcher,
-		Pods:                  allPods,
-		RCs:                   allRCs,
-		BCs:                   allBCs,
-		Builds:                allBuilds,
-		DSs:                   allDSs,
-		Deployments:           allDeployments,
-		DCs:                   allDCs,
-		RSs:                   allRSs,
-		LimitRanges:           limitRangesMap,
-		DryRun:                o.Confirm == false,
-		RegistryClientFactory: registryClientFactory,
-		RegistryURL:           registryURL,
-		PruneRegistry:         o.PruneRegistry,
-		IgnoreInvalidRefs:     o.IgnoreInvalidRefs,
-	}
-	if o.Namespace != metav1.NamespaceAll {
-		options.Namespace = o.Namespace
-	}
-	pruner, errs := imageprune.NewPruner(options)
-	if errs != nil {
-		o.printGraphBuildErrors(errs)
-		return fmt.Errorf("failed to build graph - no changes made")
-	}
-
-	imagePrunerFactory := func() (imageprune.ImageDeleter, error) {
-		return &describingImageDeleter{w: o.Out, errOut: o.ErrOut}, nil
-	}
-	imageStreamDeleter := &describingImageStreamDeleter{w: o.Out, errOut: o.ErrOut}
-	layerLinkDeleter := &describingLayerLinkDeleter{w: o.Out, errOut: o.ErrOut}
-	blobDeleter := &describingBlobDeleter{w: o.Out, errOut: o.ErrOut}
-	manifestDeleter := &describingManifestDeleter{w: o.Out, errOut: o.ErrOut}
-
-	if o.Confirm {
-		imageStreamDeleter.delegate = imageprune.NewImageStreamDeleter(o.ImageClient)
-		layerLinkDeleter.delegate = imageprune.NewLayerLinkDeleter()
-		blobDeleter.delegate = imageprune.NewBlobDeleter()
-		manifestDeleter.delegate = imageprune.NewManifestDeleter()
-
-		imagePrunerFactory = func() (imageprune.ImageDeleter, error) {
-			imageClient, err := o.ImageClientFactory()
-			if err != nil {
-				return nil, err
-			}
-			return imageprune.NewImageDeleter(imageClient), nil
-		}
-	} else {
-		fmt.Fprintln(o.ErrOut, "Dry run enabled - no modifications will be made. Add --confirm to remove images")
-	}
-
-	if o.PruneRegistry != nil && !*o.PruneRegistry {
-		fmt.Fprintln(o.Out, "Only API objects will be removed.  No modifications to the image registry will be made.")
-	}
-
-	deletions, failures := pruner.Prune(
-		imagePrunerFactory,
-		imageStreamDeleter,
-		layerLinkDeleter,
-		blobDeleter,
-		manifestDeleter,
-	)
-	printSummary(o.Out, deletions, failures)
-	if len(failures) == 1 {
-		return &failures[0]
-	}
-	if len(failures) > 0 {
-		return fmt.Errorf("failed")
-	}
-	return nil
-}
-
-func printSummary(out io.Writer, deletions []imageprune.Deletion, failures []imageprune.Failure) {
-	// TODO: for higher verbosity, sum by error type
-	if len(failures) == 0 {
-		fmt.Fprintf(out, "Deleted %d objects.\n", len(deletions))
-	} else {
-		fmt.Fprintf(out, "Deleted %d objects out of %d.\n", len(deletions), len(deletions)+len(failures))
-		fmt.Fprintf(out, "Failed to delete %d objects.\n", len(failures))
-	}
-	if !klog.V(2) {
-		return
-	}
-
-	fmt.Fprintf(out, "\n")
-
-	w := tabwriter.NewWriter(out, 10, 4, 3, ' ', 0)
-	defer w.Flush()
-
-	buckets := make(map[string]struct{ deletions, failures, total uint64 })
-	count := func(node gonum.Node, parent gonum.Node, deletions uint64, failures uint64) {
-		bucket := ""
-		switch t := node.(type) {
-		case *imagegraph.ImageStreamNode:
-			bucket = "is"
-		case *imagegraph.ImageNode:
-			bucket = "image"
-		case *imagegraph.ImageComponentNode:
-			bucket = "component/" + string(t.Type)
-			if parent == nil {
-				bucket = "blob"
-			}
-		default:
-			bucket = fmt.Sprintf("other/%T", t)
-		}
-		c := buckets[bucket]
-		c.deletions += deletions
-		c.failures += failures
-		c.total += deletions + failures
-		buckets[bucket] = c
-	}
-
-	for _, d := range deletions {
-		count(d.Node, d.Parent, 1, 0)
-	}
-	for _, f := range failures {
-		count(f.Node, f.Parent, 0, 1)
-	}
-
-	printAndPopBucket := func(name string, desc string) {
-		cnt, ok := buckets[name]
-		if ok {
-			delete(buckets, name)
-		}
-		if cnt.total == 0 {
-			return
-		}
-		fmt.Fprintf(w, "%s:\t%d\n", desc, cnt.deletions)
-		if cnt.failures == 0 {
-			return
-		}
-		// add padding before failures to make it appear subordinate to the line above
-		for i := 0; i < len(desc)-len("failures"); i++ {
-			fmt.Fprintf(w, " ")
-		}
-		fmt.Fprintf(w, "failures:\t%d\n", cnt.failures)
-	}
-
-	printAndPopBucket("is", "Image Stream updates")
-	printAndPopBucket("image", "Image deletions")
-	printAndPopBucket("blob", "Blob deletions")
-	printAndPopBucket("component/"+string(imagegraph.ImageComponentTypeManifest), "Image Manifest Link deletions")
-	printAndPopBucket("component/"+string(imagegraph.ImageComponentTypeConfig), "Image Config Link deletions")
-	printAndPopBucket("component/"+string(imagegraph.ImageComponentTypeLayer), "Image Layer Link deletions")
-
-	for name := range buckets {
-		printAndPopBucket(name, fmt.Sprintf("%s deletions", strings.TrimPrefix(name, "other/")))
-	}
-}
-
-func (o *PruneImagesOptions) printGraphBuildErrors(errs kutilerrors.Aggregate) {
-	refErrors := []error{}
-
-	fmt.Fprintf(o.ErrOut, "Failed to build graph!\n")
-
-	for _, err := range errs.Errors() {
-		if _, ok := err.(*imageprune.ErrBadReference); ok {
-			refErrors = append(refErrors, err)
-		} else {
-			fmt.Fprintf(o.ErrOut, "%v\n", err)
-		}
-	}
-
-	if len(refErrors) > 0 {
-		clientVersion, masterVersion, err := getClientAndMasterVersions(o.DiscoveryClient, o.Timeout)
-		if err != nil {
-			fmt.Fprintf(o.ErrOut, "Failed to get master API version: %v\n", err)
-		}
-		fmt.Fprintf(o.ErrOut, "\nThe following objects have invalid references:\n\n")
-		for _, err := range refErrors {
-			fmt.Fprintf(o.ErrOut, "  %s\n", err)
-		}
-		fmt.Fprintf(o.ErrOut, "\nEither fix the references or delete the objects to make the pruner proceed.\n")
-
-		if masterVersion != nil && (clientVersion.Major != masterVersion.Major || clientVersion.Minor != masterVersion.Minor) {
-			fmt.Fprintf(o.ErrOut, "Client version (%s) doesn't match master (%s), which may allow for different image references. Try to re-run this binary with the same version.\n", clientVersion, masterVersion)
-		}
-	}
-}
-
-// describingImageStreamDeleter prints information about each image stream update.
-// If a delegate exists, its DeleteImageStream function is invoked prior to returning.
-type describingImageStreamDeleter struct {
-	w             io.Writer
-	delegate      imageprune.ImageStreamDeleter
-	headerPrinted bool
-	errOut        io.Writer
-}
-
-var _ imageprune.ImageStreamDeleter = &describingImageStreamDeleter{}
-
-func (p *describingImageStreamDeleter) GetImageStream(stream *imagev1.ImageStream) (*imagev1.ImageStream, error) {
-	return stream, nil
-}
-
-func (p *describingImageStreamDeleter) UpdateImageStream(stream *imagev1.ImageStream) (*imagev1.ImageStream, error) {
-	if p.delegate == nil {
-		return stream, nil
-	}
-
-	updatedStream, err := p.delegate.UpdateImageStream(stream)
-	if err != nil {
-		fmt.Fprintf(p.errOut, "error updating image stream %s/%s to remove image references: %v\n", stream.Namespace, stream.Name, err)
-	}
-
-	return updatedStream, err
-}
-
-func (p *describingImageStreamDeleter) NotifyImageStreamPrune(stream *imagev1.ImageStream, updatedTags []string, deletedTags []string) {
-	if len(updatedTags) > 0 {
-		fmt.Fprintf(p.w, "Updating istags %s/%s: %s\n", stream.Namespace, stream.Name, strings.Join(updatedTags, ", "))
-	}
-	if len(deletedTags) > 0 {
-		fmt.Fprintf(p.w, "Deleting istags %s/%s: %s\n", stream.Namespace, stream.Name, strings.Join(deletedTags, ", "))
-	}
-}
-
-// describingImageDeleter prints information about each image being deleted.
-// If a delegate exists, its DeleteImage function is invoked prior to returning.
-type describingImageDeleter struct {
-	w             io.Writer
-	delegate      imageprune.ImageDeleter
-	headerPrinted bool
-	errOut        io.Writer
-}
-
-var _ imageprune.ImageDeleter = &describingImageDeleter{}
-
-func (p *describingImageDeleter) DeleteImage(image *imagev1.Image) error {
-	fmt.Fprintf(p.w, "Deleting image %s\n", image.Name)
-
-	if p.delegate == nil {
-		return nil
-	}
-
-	err := p.delegate.DeleteImage(image)
-	if err != nil {
-		fmt.Fprintf(p.errOut, "error deleting image %s from server: %v\n", image.Name, err)
-	}
-
-	return err
-}
-
-// describingLayerLinkDeleter prints information about each repo layer link being deleted. If a delegate
-// exists, its DeleteLayerLink function is invoked prior to returning.
-type describingLayerLinkDeleter struct {
-	w             io.Writer
-	delegate      imageprune.LayerLinkDeleter
-	headerPrinted bool
-	errOut        io.Writer
-}
-
-var _ imageprune.LayerLinkDeleter = &describingLayerLinkDeleter{}
-
-func (p *describingLayerLinkDeleter) DeleteLayerLink(registryClient *http.Client, registryURL *url.URL, repo, name string) error {
-	fmt.Fprintf(p.w, "Deleting layer link %s in repository %s\n", name, repo)
-
-	if p.delegate == nil {
-		return nil
-	}
-
-	err := p.delegate.DeleteLayerLink(registryClient, registryURL, repo, name)
-	if err != nil {
-		fmt.Fprintf(p.errOut, "error deleting repository %s layer link %s from the registry: %v\n", repo, name, err)
-	}
-
-	return err
-}
-
-// describingBlobDeleter prints information about each blob being deleted. If a
-// delegate exists, its DeleteBlob function is invoked prior to returning.
-type describingBlobDeleter struct {
-	w             io.Writer
-	delegate      imageprune.BlobDeleter
-	headerPrinted bool
-	errOut        io.Writer
-}
-
-var _ imageprune.BlobDeleter = &describingBlobDeleter{}
-
-func (p *describingBlobDeleter) DeleteBlob(registryClient *http.Client, registryURL *url.URL, layer string) error {
-	fmt.Fprintf(p.w, "Deleting blob %s\n", layer)
-
-	if p.delegate == nil {
-		return nil
-	}
-
-	err := p.delegate.DeleteBlob(registryClient, registryURL, layer)
-	if err != nil {
-		fmt.Fprintf(p.errOut, "error deleting blob %s from the registry: %v\n", layer, err)
-	}
-
-	return err
-}
-
-// describingManifestDeleter prints information about each repo manifest being
-// deleted. If a delegate exists, its DeleteManifest function is invoked prior
-// to returning.
-type describingManifestDeleter struct {
-	w             io.Writer
-	delegate      imageprune.ManifestDeleter
-	headerPrinted bool
-	errOut        io.Writer
-}
-
-var _ imageprune.ManifestDeleter = &describingManifestDeleter{}
-
-func (p *describingManifestDeleter) DeleteManifest(registryClient *http.Client, registryURL *url.URL, repo, manifest string) error {
-	fmt.Fprintf(p.w, "Deleting manifest link %s in repository %s\n", manifest, repo)
-
-	if p.delegate == nil {
-		return nil
-	}
-
-	err := p.delegate.DeleteManifest(registryClient, registryURL, repo, manifest)
-	if err != nil {
-		fmt.Fprintf(p.errOut, "error deleting manifest link %s from repository %s: %v\n", manifest, repo, err)
-	}
-
-	return err
-}
-
-func getImageClientFactory(f kcmdutil.Factory) func() (imagev1client.ImageV1Interface, error) {
-	return func() (imagev1client.ImageV1Interface, error) {
-		clientConfig, err := f.ToRESTConfig()
-		if err != nil {
-			return nil, err
-		}
-
-		return imagev1client.NewForConfig(clientConfig)
-	}
-}
-
-// getRegistryClient returns a registry client. Note that registryCABundle and registryInsecure=true are
-// mutually exclusive. If registryInsecure=true is specified, the ca bundle is ignored.
-func getRegistryClient(clientConfig *restclient.Config, registryCABundle string, registryInsecure bool) (*http.Client, error) {
-	var (
-		err                      error
-		cadata                   []byte
-		registryCABundleIncluded = false
-		token                    = clientConfig.BearerToken
-	)
-
-	if len(token) == 0 {
-		return nil, errNoToken
-	}
-
-	if len(registryCABundle) > 0 {
-		cadata, err = ioutil.ReadFile(registryCABundle)
-		if err != nil {
-			return nil, fmt.Errorf("failed to read registry ca bundle: %v", err)
-		}
-	}
-
-	// copy the config
-	registryClientConfig := *clientConfig
-	registryClientConfig.TLSClientConfig.Insecure = registryInsecure
-
-	// zero out everything we don't want to use
-	registryClientConfig.BearerToken = ""
-	registryClientConfig.CertFile = ""
-	registryClientConfig.CertData = []byte{}
-	registryClientConfig.KeyFile = ""
-	registryClientConfig.KeyData = []byte{}
-
-	if registryInsecure {
-		// it's not allowed to specify insecure flag together with CAs
-		registryClientConfig.CAFile = ""
-		registryClientConfig.CAData = []byte{}
-
-	} else if len(cadata) > 0 && len(registryClientConfig.CAData) == 0 {
-		// If given, we want to append cabundle to the resulting tlsConfig.RootCAs. However, if we
-		// leave CAData unset, tlsConfig may not be created. We could append the caBundle to the
-		// CAData here directly if we were ok doing a binary magic, which is not the case.
-		registryClientConfig.CAData = cadata
-		registryCABundleIncluded = true
-	}
-
-	// we have to set a username to something for the Docker login but it's not actually used
-	registryClientConfig.Username = "unused"
-
-	// set the "password" to be the token
-	registryClientConfig.Password = token
-
-	tlsConfig, err := restclient.TLSConfigFor(®istryClientConfig)
-	if err != nil {
-		return nil, err
-	}
-
-	// Add the CA bundle to the client config's CA roots if provided and we haven't done that already.
-	// FIXME: handle registryCABundle on one place
-	if tlsConfig != nil && len(cadata) > 0 && !registryCABundleIncluded && !registryInsecure {
-		if tlsConfig.RootCAs == nil {
-			tlsConfig.RootCAs = x509.NewCertPool()
-		}
-		tlsConfig.RootCAs.AppendCertsFromPEM(cadata)
-	}
-
-	transport := knet.SetTransportDefaults(&http.Transport{
-		TLSClientConfig: tlsConfig,
-	})
-
-	wrappedTransport, err := restclient.HTTPWrappersForConfig(®istryClientConfig, transport)
-	if err != nil {
-		return nil, err
-	}
-
-	return &http.Client{
-		Transport: wrappedTransport,
-	}, nil
-}
-
-// getClientAndMasterVersions returns version info for client and master binaries. If it takes too long to get
-// a response from the master, timeout error is returned.
-func getClientAndMasterVersions(client discovery.DiscoveryInterface, timeout time.Duration) (clientVersion, masterVersion *apimachineryversion.Info, err error) {
-	done := make(chan error)
-
-	go func() {
-		defer close(done)
-
-		ocVersionBody, err := client.RESTClient().Get().AbsPath("/version/openshift").Do().Raw()
-		switch {
-		case err == nil:
-			var ocServerInfo apimachineryversion.Info
-			err = json.Unmarshal(ocVersionBody, &ocServerInfo)
-			if err != nil && len(ocVersionBody) > 0 {
-				done <- err
-				return
-			}
-			masterVersion = &ocServerInfo
-
-		case kerrors.IsNotFound(err) || kerrors.IsUnauthorized(err) || kerrors.IsForbidden(err):
-		default:
-			done <- err
-			return
-		}
-	}()
-
-	select {
-	case err, closed := <-done:
-		if strings.HasSuffix(fmt.Sprintf("%v", err), "connection refused") || kclientcmd.IsEmptyConfig(err) || kclientcmd.IsConfigurationInvalid(err) {
-			return nil, nil, err
-		}
-		if closed && err != nil {
-			return nil, nil, err
-		}
-	// do not block error printing if the master is busy
-	case <-time.After(timeout):
-		return nil, nil, fmt.Errorf("error: server took too long to respond with version information.")
-	}
-
-	v := version.Get()
-	clientVersion = &v
-
-	return
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/images/images_test.go b/vendor/github.com/openshift/oc/pkg/cli/admin/prune/images/images_test.go
deleted file mode 100644
index 5eb0c15edb9f..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/images/images_test.go
+++ /dev/null
@@ -1,283 +0,0 @@
-package images
-
-import (
-	"bytes"
-	"encoding/json"
-	"flag"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"net/http"
-	"os"
-	"strings"
-	"testing"
-	"time"
-
-	corev1 "k8s.io/api/core/v1"
-	"k8s.io/apimachinery/pkg/runtime"
-	"k8s.io/apimachinery/pkg/util/diff"
-	"k8s.io/apimachinery/pkg/util/sets"
-	"k8s.io/apimachinery/pkg/version"
-	apimachineryversion "k8s.io/apimachinery/pkg/version"
-	fakediscovery "k8s.io/client-go/discovery/fake"
-	fakekubernetes "k8s.io/client-go/kubernetes/fake"
-	kubernetesscheme "k8s.io/client-go/kubernetes/scheme"
-	restclient "k8s.io/client-go/rest"
-	restfake "k8s.io/client-go/rest/fake"
-	"k8s.io/klog"
-	"k8s.io/kubernetes/pkg/kubectl/scheme"
-
-	"github.com/openshift/api"
-	fakeappsclient "github.com/openshift/client-go/apps/clientset/versioned/fake"
-	fakeappsv1client "github.com/openshift/client-go/apps/clientset/versioned/typed/apps/v1/fake"
-	fakebuildclient "github.com/openshift/client-go/build/clientset/versioned/fake"
-	fakebuildv1client "github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake"
-	fakeimageclient "github.com/openshift/client-go/image/clientset/versioned/fake"
-	fakeimagev1client "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake"
-	imagetest "github.com/openshift/oc/pkg/helpers/image/test"
-)
-
-var logLevel = flag.Int("loglevel", 0, "")
-
-func TestImagePruneNamespaced(t *testing.T) {
-	var level klog.Level
-	level.Set(fmt.Sprint(*logLevel))
-
-	kFake := fakekubernetes.NewSimpleClientset()
-	imageFake := &fakeimagev1client.FakeImageV1{Fake: &(fakeimageclient.NewSimpleClientset().Fake)}
-	opts := &PruneImagesOptions{
-		Namespace: "foo",
-
-		AppsClient:  &fakeappsv1client.FakeAppsV1{Fake: &(fakeappsclient.NewSimpleClientset().Fake)},
-		BuildClient: &fakebuildv1client.FakeBuildV1{Fake: &(fakebuildclient.NewSimpleClientset().Fake)},
-		ImageClient: imageFake,
-		KubeClient:  kFake,
-		Out:         ioutil.Discard,
-		ErrOut:      os.Stderr,
-	}
-
-	if err := opts.Run(); err != nil {
-		t.Errorf("Unexpected error: %v", err)
-	}
-
-	if len(imageFake.Actions()) == 0 || len(kFake.Actions()) == 0 {
-		t.Errorf("Missing get images actions")
-	}
-	for _, a := range imageFake.Actions() {
-		// images are non-namespaced
-		if a.GetResource().Resource == "images" {
-			continue
-		}
-		if a.GetNamespace() != "foo" {
-			t.Errorf("Unexpected namespace while pruning %s: %s", a.GetResource(), a.GetNamespace())
-		}
-	}
-	for _, a := range kFake.Actions() {
-		if a.GetNamespace() != "foo" {
-			t.Errorf("Unexpected namespace while pruning %s: %s", a.GetResource(), a.GetNamespace())
-		}
-	}
-}
-
-func TestImagePruneErrOnBadReference(t *testing.T) {
-	var level klog.Level
-	level.Set(fmt.Sprint(*logLevel))
-
-	podBad := imagetest.Pod("foo", "pod1", corev1.PodRunning, "invalid image reference")
-	podGood := imagetest.Pod("foo", "pod2", corev1.PodRunning, "example.com/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")
-	dep := imagetest.Deployment("foo", "dep1", "do not blame me")
-	bcBad := imagetest.BC("foo", "bc1", "source", "ImageStreamImage", "foo", "bar:invalid-digest")
-
-	kFake := fakekubernetes.NewSimpleClientset(&podBad, &podGood, &dep)
-	imageFake := &fakeimagev1client.FakeImageV1{Fake: &(fakeimageclient.NewSimpleClientset().Fake)}
-	fakeDiscovery := &fakeVersionDiscovery{
-		masterVersion: version.Info{},
-	}
-
-	// we need to install OpenShift API types to kubectl's scheme for GetReference to work
-	api.Install(scheme.Scheme)
-
-	switch d := kFake.Discovery().(type) {
-	case *fakediscovery.FakeDiscovery:
-		fakeDiscovery.FakeDiscovery = d
-	default:
-		t.Fatalf("unexpected discovery type: %T != %T", d, &fakediscovery.FakeDiscovery{})
-	}
-
-	errBuf := bytes.NewBuffer(make([]byte, 0, 4096))
-	opts := &PruneImagesOptions{
-		AppsClient:      &fakeappsv1client.FakeAppsV1{Fake: &(fakeappsclient.NewSimpleClientset().Fake)},
-		BuildClient:     &fakebuildv1client.FakeBuildV1{Fake: &(fakebuildclient.NewSimpleClientset(&bcBad).Fake)},
-		ImageClient:     imageFake,
-		KubeClient:      kFake,
-		DiscoveryClient: fakeDiscovery,
-		Timeout:         time.Second,
-		Out:             ioutil.Discard,
-		ErrOut:          errBuf,
-	}
-
-	verifyOutput := func(out string, expectClientVersionMismatch bool) {
-		t.Logf("pruner error output: %s\n", out)
-
-		badRefErrors := sets.NewString()
-		for _, l := range strings.Split(out, "\n") {
-			if strings.HasPrefix(l, "  ") {
-				badRefErrors.Insert(l[2:])
-			}
-		}
-		expBadRefErrors := sets.NewString(
-			`Pod[foo/pod1]: invalid container image reference "invalid image reference": invalid reference format`,
-			`BuildConfig[foo/bc1]: invalid ImageStreamImage reference "bar:invalid-digest": expected exactly one @ in the isimage name "bar:invalid-digest"`,
-			`Deployment[foo/dep1]: invalid container image reference "do not blame me": invalid reference format`)
-
-		if a, e := badRefErrors, expBadRefErrors; !a.Equal(e) {
-			t.Fatalf("got unexpected invalid reference errors: %s", diff.ObjectDiff(a, e))
-		}
-
-		if expectClientVersionMismatch {
-			if msg := "client version"; !strings.Contains(strings.ToLower(out), msg) {
-				t.Errorf("expected message %q is not contained in the output", msg)
-			}
-		} else {
-			for _, msg := range []string{"failed to get master api version", "client version"} {
-				if strings.Contains(strings.ToLower(out), msg) {
-					t.Errorf("got unexpected message %q in the output", msg)
-				}
-			}
-		}
-	}
-
-	err := opts.Run()
-	if err == nil {
-		t.Fatal("Unexpected non-error")
-	}
-
-	t.Logf("pruner error: %s\n", err)
-	verifyOutput(errBuf.String(), false)
-
-	t.Logf("bump master version and try again")
-	fakeDiscovery.masterVersion.Minor += "1"
-	errBuf.Reset()
-	err = opts.Run()
-	if err == nil {
-		t.Fatal("Unexpected non-error")
-	}
-
-	t.Logf("pruner error: %s\n", err)
-	verifyOutput(errBuf.String(), true)
-}
-
-type fakeVersionDiscovery struct {
-	*fakediscovery.FakeDiscovery
-	masterVersion apimachineryversion.Info
-}
-
-func (f *fakeVersionDiscovery) RESTClient() restclient.Interface {
-	return &restfake.RESTClient{
-		NegotiatedSerializer: kubernetesscheme.Codecs,
-		Client: restfake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
-			if req.URL.Path != "/version/openshift" {
-				return &http.Response{
-					StatusCode: http.StatusNotFound,
-				}, nil
-			}
-			header := http.Header{}
-			header.Set("Content-Type", runtime.ContentTypeJSON)
-			return &http.Response{
-				StatusCode: http.StatusOK,
-				Header:     header,
-				Body:       objBody(&f.masterVersion),
-			}, nil
-		}),
-	}
-}
-
-func objBody(object interface{}) io.ReadCloser {
-	output, err := json.MarshalIndent(object, "", "")
-	if err != nil {
-		panic(err)
-	}
-	return ioutil.NopCloser(bytes.NewReader([]byte(output)))
-}
-
-func TestValidateRegistryURL(t *testing.T) {
-	for _, tc := range []struct {
-		input               string
-		expectedError       bool
-		expectedErrorString string
-	}{
-		{input: "172.30.30.30:5000"},
-		{input: ":5000"},
-		{input: "[fd12:3456:789a:1::1]:80/"},
-		{input: "[fd12:3456:789a:1::1]:80"},
-		{input: "http://172.30.30.30:5000"},
-		{input: "http://[fd12:3456:789a:1::1]:5000/"},
-		{input: "http://[fd12:3456:789a:1::1]:5000"},
-		{input: "http://registry.org:5000"},
-		{input: "https://172.30.30.30:5000"},
-		{input: "https://:80/"},
-		{input: "https://[fd12:3456:789a:1::1]/"},
-		{input: "https://[fd12:3456:789a:1::1]"},
-		{input: "https://[fd12:3456:789a:1::1]:5000/"},
-		{input: "https://[fd12:3456:789a:1::1]:5000"},
-		{input: "https://registry.org/"},
-		{input: "https://registry.org"},
-		{input: "localhost/"},
-		{input: "localhost"},
-		{input: "localhost:80"},
-		{input: "registry.org/"},
-		{input: "registry.org"},
-		{input: "registry.org:5000"},
-
-		{
-			input:               "httpss://registry.org",
-			expectedErrorString: "unsupported scheme: httpss",
-		},
-		{
-			input:               "ftp://registry.org",
-			expectedErrorString: "unsupported scheme: ftp",
-		},
-		{
-			input:               "http://registry.org://",
-			expectedErrorString: errNoRegistryURLPathAllowed.Error(),
-		},
-		{
-			input:               "http://registry.org/path",
-			expectedErrorString: errNoRegistryURLPathAllowed.Error(),
-		},
-		{
-			input:         "[fd12:3456:789a:1::1",
-			expectedError: true,
-		},
-		{
-			input:         "bad url",
-			expectedError: true,
-		},
-		{
-			input:               "/registry.org",
-			expectedErrorString: errNoRegistryURLPathAllowed.Error(),
-		},
-		{
-			input:               "https:///",
-			expectedErrorString: errRegistryURLHostEmpty.Error(),
-		},
-		{
-			input:               "http://registry.org?parm=arg",
-			expectedErrorString: errNoRegistryURLQueryAllowed.Error(),
-		},
-	} {
-
-		err := validateRegistryURL(tc.input)
-		if err != nil {
-			if len(tc.expectedErrorString) > 0 && err.Error() != tc.expectedErrorString {
-				t.Errorf("[%s] unexpected error string: %q != %q", tc.input, err.Error(), tc.expectedErrorString)
-			} else if len(tc.expectedErrorString) == 0 && !tc.expectedError {
-				t.Errorf("[%s] unexpected error: %q", tc.input, err.Error())
-			}
-		} else if len(tc.expectedErrorString) > 0 {
-			t.Errorf("[%s] got non-error while expecting %q", tc.input, tc.expectedErrorString)
-		} else if tc.expectedError {
-			t.Errorf("[%s] got unexpected non-error", tc.input)
-		}
-	}
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/prune.go b/vendor/github.com/openshift/oc/pkg/cli/admin/prune/prune.go
deleted file mode 100644
index 510498fa1307..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/prune/prune.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package prune
-
-import (
-	"github.com/spf13/cobra"
-
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-
-	groups "github.com/openshift/oc/pkg/cli/admin/groups/sync"
-	"github.com/openshift/oc/pkg/cli/admin/prune/auth"
-	"github.com/openshift/oc/pkg/cli/admin/prune/builds"
-	"github.com/openshift/oc/pkg/cli/admin/prune/deployments"
-	"github.com/openshift/oc/pkg/cli/admin/prune/images"
-)
-
-const (
-	PruneRecommendedName       = "prune"
-	PruneGroupsRecommendedName = "groups"
-)
-
-var pruneLong = templates.LongDesc(`
-	Remove older versions of resources from the server
-
-	The commands here allow administrators to manage the older versions of resources on
-	the system by removing them.`)
-
-func NewCommandPrune(name, fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	// Parent command to which all subcommands are added.
-	cmds := &cobra.Command{
-		Use:   name,
-		Short: "Remove older versions of resources from the server",
-		Long:  pruneLong,
-		Run:   kcmdutil.DefaultSubCommandRun(streams.ErrOut),
-	}
-
-	cmds.AddCommand(builds.NewCmdPruneBuilds(f, fullName, builds.PruneBuildsRecommendedName, streams))
-	cmds.AddCommand(deployments.NewCmdPruneDeployments(f, fullName, deployments.PruneDeploymentsRecommendedName, streams))
-	cmds.AddCommand(images.NewCmdPruneImages(f, fullName, images.PruneImagesRecommendedName, streams))
-	cmds.AddCommand(groups.NewCmdPrune(PruneGroupsRecommendedName, fullName+" "+PruneGroupsRecommendedName, f, streams))
-	cmds.AddCommand(auth.NewCmdPruneAuth(f, "auth", streams))
-	return cmds
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/release/annotations.go b/vendor/github.com/openshift/oc/pkg/cli/admin/release/annotations.go
deleted file mode 100644
index a2e4fcfff43d..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/release/annotations.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package release
-
-const (
-	// This annotation is set in image-references when created with --from-release.
-	annotationReleaseFromRelease = "release.openshift.io/from-release"
-	// This annotation is set in image-references when created with --from-image-stream.
-	annotationReleaseFromImageStream = "release.openshift.io/from-image-stream"
-
-	// This value is set on images as LABEL to 'true' to indicate they should be
-	// scanned for a /manifests/ directory to contribute to the payload.
-	annotationReleaseOperator = "io.openshift.release.operator"
-
-	// This is an internal annotation to indicate the source image was not derived
-	// from an image stream or existing release but was manually specified.
-	annotationReleaseOverride = "io.openshift.release.override"
-	// This LABEL is set on images to indicate the manifest digest that was used
-	// as the base layer for the release image (usually the cluster-version-operator).
-	annotationReleaseBaseImageDigest = "io.openshift.release.base-image-digest"
-	// This LABEL is a comma-delimited list of key=version pairs that can be consumed
-	// by other manifests within the payload to hardcode version strings. Version must
-	// be a semantic version with no build label (+ is not allowed) and key must be
-	// alphanumeric characters and dashes only. The value `0.0.1-snapshot-key` in a
-	// manifest will be substituted with the version value for key.
-	annotationBuildVersions = "io.openshift.build.versions"
-
-	// This LABEL is the git ref that an image was built with. Copied unmodified to
-	// the image-references file.
-	annotationBuildSourceRef = "io.openshift.build.commit.ref"
-	// This LABEL is the full git commit hash that an image was built with. Copied
-	// unmodified to the image-references file.
-	annotationBuildSourceCommit = "io.openshift.build.commit.id"
-	// This LABEL is the git clone location that an image was built with. Copied
-	// unmodified to the image-references file.
-	annotationBuildSourceLocation = "io.openshift.build.source-location"
-
-	urlGithubPrefix = "https://github.com/"
-)
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/release/extract.go b/vendor/github.com/openshift/oc/pkg/cli/admin/release/extract.go
deleted file mode 100644
index 2dd492f32c51..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/release/extract.go
+++ /dev/null
@@ -1,303 +0,0 @@
-package release
-
-import (
-	"archive/tar"
-	"fmt"
-	"io"
-	"os"
-	"time"
-
-	digest "github.com/opencontainers/go-digest"
-	"github.com/spf13/cobra"
-	"k8s.io/klog"
-
-	apierrors "k8s.io/apimachinery/pkg/api/errors"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-
-	configv1client "github.com/openshift/client-go/config/clientset/versioned"
-	"github.com/openshift/library-go/pkg/image/dockerv1client"
-	imagereference "github.com/openshift/library-go/pkg/image/reference"
-	"github.com/openshift/oc/pkg/cli/image/extract"
-	imagemanifest "github.com/openshift/oc/pkg/cli/image/manifest"
-)
-
-func NewExtractOptions(streams genericclioptions.IOStreams) *ExtractOptions {
-	return &ExtractOptions{
-		IOStreams: streams,
-		Directory: ".",
-	}
-}
-
-func NewExtract(f kcmdutil.Factory, parentName string, streams genericclioptions.IOStreams) *cobra.Command {
-	o := NewExtractOptions(streams)
-	cmd := &cobra.Command{
-		Use:   "extract",
-		Short: "Extract the contents of an update payload to disk",
-		Long: templates.LongDesc(`
-			Extract the contents of a release image to disk
-
-			Extracts the contents of an OpenShift release image to disk for inspection or
-			debugging. Update images contain manifests and metadata about the operators that
-			must be installed on the cluster for a given version.
-
-			The --tools and --command flags allow you to extract the appropriate client binaries
-			for	your operating system to disk. --tools will create archive files containing the
-			current OS tools (or, if --command-os is set to '*', all OS versions). Specifying
-			--command for either 'oc' or 'openshift-install' will extract the binaries directly.
-			You may pass a PGP private key file with --signing-key which will create an ASCII
-			armored sha256sum.txt.asc file describing the content that was extracted that is
-			signed by the key. For more advanced signing use the generated sha256sum.txt and an
-			external tool like gpg.
-
-			Instead of extracting the manifests, you can specify --git=DIR to perform a Git
-			checkout of the source code that comprises the release. A warning will be printed
-			if the component is not associated with source code. The command will not perform
-			any destructive actions on your behalf except for executing a 'git checkout' which
-			may change the current branch. Requires 'git' to be on your path.
-		`),
-		Run: func(cmd *cobra.Command, args []string) {
-			kcmdutil.CheckErr(o.Complete(f, cmd, args))
-			kcmdutil.CheckErr(o.Run())
-		},
-	}
-	flags := cmd.Flags()
-	o.SecurityOptions.Bind(flags)
-	o.ParallelOptions.Bind(flags)
-
-	flags.StringVar(&o.From, "from", o.From, "Image containing the release payload.")
-	flags.StringVar(&o.File, "file", o.File, "Extract a single file from the payload to standard output.")
-	flags.StringVar(&o.Directory, "to", o.Directory, "Directory to write release contents to, defaults to the current directory.")
-
-	flags.StringVar(&o.GitExtractDir, "git", o.GitExtractDir, "Check out the sources that created this release into the provided dir. Repos will be created at //. Requires 'git' on your path.")
-	flags.BoolVar(&o.Tools, "tools", o.Tools, "Extract the tools archives from the release image. Implies --command=*")
-	flags.StringVar(&o.SigningKey, "signing-key", o.SigningKey, "Sign the sha256sum.txt generated by --tools with this GPG key. A sha256sum.txt.asc file signed by this key will be created. The key is assumed to be encrypted.")
-
-	flags.StringVar(&o.Command, "command", o.Command, "Specify 'oc' or 'openshift-install' to extract the client for your operating system.")
-	flags.StringVar(&o.CommandOperatingSystem, "command-os", o.CommandOperatingSystem, "Override which operating system command is extracted (mac, windows, linux). You map specify '*' to extract all tool archives.")
-	return cmd
-}
-
-type ExtractOptions struct {
-	genericclioptions.IOStreams
-
-	SecurityOptions imagemanifest.SecurityOptions
-	ParallelOptions imagemanifest.ParallelOptions
-
-	From string
-
-	Tools                  bool
-	Command                string
-	CommandOperatingSystem string
-	SigningKey             string
-
-	// GitExtractDir is the path of a root directory to extract the source of a release to.
-	GitExtractDir string
-
-	Directory string
-	File      string
-
-	ImageMetadataCallback func(m *extract.Mapping, dgst, contentDigest digest.Digest, config *dockerv1client.DockerImageConfig)
-}
-
-func (o *ExtractOptions) Complete(f kcmdutil.Factory, cmd *cobra.Command, args []string) error {
-	switch {
-	case len(args) == 0 && len(o.From) == 0:
-		cfg, err := f.ToRESTConfig()
-		if err != nil {
-			return fmt.Errorf("info expects one argument, or a connection to an OpenShift 4.x server: %v", err)
-		}
-		client, err := configv1client.NewForConfig(cfg)
-		if err != nil {
-			return fmt.Errorf("info expects one argument, or a connection to an OpenShift 4.x server: %v", err)
-		}
-		cv, err := client.ConfigV1().ClusterVersions().Get("version", metav1.GetOptions{})
-		if err != nil {
-			if apierrors.IsNotFound(err) {
-				return fmt.Errorf("you must be connected to an OpenShift 4.x server to fetch the current version")
-			}
-			return fmt.Errorf("info expects one argument, or a connection to an OpenShift 4.x server: %v", err)
-		}
-		image := cv.Status.Desired.Image
-		if len(image) == 0 && cv.Spec.DesiredUpdate != nil {
-			image = cv.Spec.DesiredUpdate.Image
-		}
-		if len(image) == 0 {
-			return fmt.Errorf("the server is not reporting a release image at this time, please specify an image to extract")
-		}
-		o.From = image
-
-	case len(args) == 1 && len(o.From) > 0, len(args) > 1:
-		return fmt.Errorf("you may only specify a single image via --from or argument")
-
-	case len(args) == 1:
-		o.From = args[0]
-	}
-	return nil
-}
-
-func (o *ExtractOptions) Run() error {
-	sources := 0
-	if o.Tools {
-		sources++
-	}
-	if len(o.File) > 0 {
-		sources++
-	}
-	if len(o.Command) > 0 {
-		sources++
-	}
-	if len(o.GitExtractDir) > 0 {
-		sources++
-	}
-
-	switch {
-	case sources > 1:
-		return fmt.Errorf("only one of --tools, --command, --file, or --git may be specified")
-	case len(o.From) == 0:
-		return fmt.Errorf("must specify an image containing a release payload with --from")
-	case o.Directory != "." && len(o.File) > 0:
-		return fmt.Errorf("only one of --to and --file may be set")
-
-	case len(o.GitExtractDir) > 0:
-		return o.extractGit(o.GitExtractDir)
-	case o.Tools:
-		return o.extractTools()
-	case len(o.Command) > 0:
-		return o.extractCommand(o.Command)
-	}
-
-	dir := o.Directory
-	if err := os.MkdirAll(dir, 0777); err != nil {
-		return err
-	}
-
-	src := o.From
-	ref, err := imagereference.Parse(src)
-	if err != nil {
-		return err
-	}
-	opts := extract.NewOptions(genericclioptions.IOStreams{Out: o.Out, ErrOut: o.ErrOut})
-	opts.SecurityOptions = o.SecurityOptions
-
-	switch {
-	case len(o.File) > 0:
-		if o.ImageMetadataCallback != nil {
-			opts.ImageMetadataCallback = o.ImageMetadataCallback
-		}
-		opts.OnlyFiles = true
-		opts.Mappings = []extract.Mapping{
-			{
-				ImageRef: ref,
-
-				From: "release-manifests/",
-				To:   dir,
-			},
-		}
-		found := false
-		opts.TarEntryCallback = func(hdr *tar.Header, _ extract.LayerInfo, r io.Reader) (bool, error) {
-			if hdr.Name != o.File {
-				return true, nil
-			}
-			if _, err := io.Copy(o.Out, r); err != nil {
-				return false, err
-			}
-			found = true
-			return false, nil
-		}
-		if err := opts.Run(); err != nil {
-			return err
-		}
-		if !found {
-			return fmt.Errorf("image did not contain %s", o.File)
-		}
-		return nil
-
-	default:
-		opts.OnlyFiles = true
-		opts.Mappings = []extract.Mapping{
-			{
-				ImageRef: ref,
-
-				From: "release-manifests/",
-				To:   dir,
-			},
-		}
-		verifier := imagemanifest.NewVerifier()
-		opts.ImageMetadataCallback = func(m *extract.Mapping, dgst, contentDigest digest.Digest, config *dockerv1client.DockerImageConfig) {
-			verifier.Verify(dgst, contentDigest)
-			if o.ImageMetadataCallback != nil {
-				o.ImageMetadataCallback(m, dgst, contentDigest, config)
-			}
-			if len(ref.ID) > 0 {
-				fmt.Fprintf(o.Out, "Extracted release payload created at %s\n", config.Created.Format(time.RFC3339))
-			} else {
-				fmt.Fprintf(o.Out, "Extracted release payload from digest %s created at %s\n", dgst, config.Created.Format(time.RFC3339))
-			}
-		}
-		if err := opts.Run(); err != nil {
-			return err
-		}
-		if !verifier.Verified() {
-			err := fmt.Errorf("the release image failed content verification and may have been tampered with")
-			if !o.SecurityOptions.SkipVerification {
-				return err
-			}
-			fmt.Fprintf(o.ErrOut, "warning: %v\n", err)
-		}
-		return nil
-	}
-}
-
-func (o *ExtractOptions) extractGit(dir string) error {
-	if err := os.MkdirAll(dir, 0777); err != nil {
-		return err
-	}
-
-	release, err := NewInfoOptions(o.IOStreams).LoadReleaseInfo(o.From, false)
-	if err != nil {
-		return err
-	}
-
-	hadErrors := false
-	alreadyExtracted := make(map[string]string)
-	for _, ref := range release.References.Spec.Tags {
-		repo := ref.Annotations[annotationBuildSourceLocation]
-		commit := ref.Annotations[annotationBuildSourceCommit]
-		if len(repo) == 0 || len(commit) == 0 {
-			if klog.V(2) {
-				klog.Infof("Tag %s has no source info", ref.Name)
-			} else {
-				fmt.Fprintf(o.ErrOut, "warning: Tag %s has no source info\n", ref.Name)
-			}
-			continue
-		}
-		if oldCommit, ok := alreadyExtracted[repo]; ok {
-			if oldCommit != commit {
-				fmt.Fprintf(o.ErrOut, "warning: Repo %s referenced more than once with different commits, only checking out the first reference\n", repo)
-			}
-			continue
-		}
-		alreadyExtracted[repo] = commit
-
-		extractedRepo, err := ensureCloneForRepo(dir, repo, nil, o.Out, o.ErrOut)
-		if err != nil {
-			hadErrors = true
-			fmt.Fprintf(o.ErrOut, "error: cloning %s: %v\n", repo, err)
-			continue
-		}
-
-		klog.V(2).Infof("Checkout %s from %s ...", commit, repo)
-		if err := extractedRepo.CheckoutCommit(repo, commit); err != nil {
-			hadErrors = true
-			fmt.Fprintf(o.ErrOut, "error: checking out commit for %s: %v\n", repo, err)
-			continue
-		}
-	}
-	if hadErrors {
-		return kcmdutil.ErrExit
-	}
-	return nil
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/release/extract_tools.go b/vendor/github.com/openshift/oc/pkg/cli/admin/release/extract_tools.go
deleted file mode 100644
index aebb4d6c2fbf..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/release/extract_tools.go
+++ /dev/null
@@ -1,661 +0,0 @@
-package release
-
-import (
-	"archive/tar"
-	"archive/zip"
-	"bufio"
-	"bytes"
-	"compress/gzip"
-	"crypto/sha256"
-	"encoding/hex"
-	"fmt"
-	"hash"
-	"io"
-	"io/ioutil"
-	"os"
-	"path/filepath"
-	"runtime"
-	"sort"
-	"strings"
-	"sync"
-	"syscall"
-
-	"golang.org/x/crypto/ssh/terminal"
-
-	"golang.org/x/crypto/openpgp"
-
-	"k8s.io/klog"
-
-	"k8s.io/apimachinery/pkg/util/sets"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-
-	"github.com/MakeNowJust/heredoc"
-	imagereference "github.com/openshift/library-go/pkg/image/reference"
-	"github.com/openshift/oc/pkg/cli/image/extract"
-)
-
-// extractTarget describes how a file in the release image can be extracted to disk.
-type extractTarget struct {
-	OS      string
-	Command string
-
-	TargetName string
-
-	InjectReleaseImage bool
-
-	ArchiveFormat string
-	AsArchive     bool
-	AsZip         bool
-	Readme        string
-	LinkTo        []string
-
-	Mapping extract.Mapping
-}
-
-// extractTools extracts all referenced commands as archives in the target dir.
-func (o *ExtractOptions) extractTools() error {
-	return o.extractCommand("")
-}
-
-var (
-	readmeInstallUnix = heredoc.Doc(`
-	# OpenShift Install
-
-	The OpenShift installer \u0060openshift-install\u0060 makes it easy to get a cluster
-	running on the public cloud or your local infrastructure.
-
-	To learn more about installing OpenShift, visit [docs.openshift.com](https://docs.openshift.com)
-	and select the version of OpenShift you are using.
-
-	## Installing the tools
-
-	After extracting this archive, you can move the \u0060openshift-install\u0060 binary
-	to a location on your PATH such as \u0060/usr/local/bin\u0060, or keep it in a temporary
-	directory and reference it via \u0060./openshift-install\u0060.
-
-	## License
-
-	OpenShift is licensed under the Apache Public License 2.0. The source code for this
-	program is [located on github](https://github.com/openshift/installer).
-	`)
-
-	readmeCLIUnix = heredoc.Doc(`
-	# OpenShift Clients
-
-	The OpenShift client \u0060oc\u0060 simplifies working with Kubernetes and OpenShift
-	clusters, offering a number of advantages over \u0060kubectl\u0060 such as easy login,
-	kube config file management, and access to developer tools. The \u0060kubectl\u0060
-	binary is included alongside for when strict Kubernetes compliance is necessary.
-
-	To learn more about OpenShift, visit [docs.openshift.com](https://docs.openshift.com)
-	and select the version of OpenShift you are using.
-
-	## Installing the tools
-
-	After extracting this archive, move the \u0060oc\u0060 and \u0060kubectl\u0060 binaries
-	to a location on your PATH such as \u0060/usr/local/bin\u0060. Then run:
-
-	    oc login [API_URL]
-
-	to start a session against an OpenShift cluster. After login, run \u0060oc\u0060 and
-	\u0060oc help\u0060 to learn more about how to get started with OpenShift.
-
-	## License
-
-	OpenShift is licensed under the Apache Public License 2.0. The source code for this
-	program is [located on github](https://github.com/openshift/origin).
-	`)
-
-	readmeCLIWindows = heredoc.Doc(`
-	# OpenShift Clients
-
-	The OpenShift client \u0060oc.exe\u0060 simplifies working with Kubernetes and OpenShift
-	clusters, offering a number of advantages over \u0060kubectl.exe\u0060 such as easy login,
-	kube config file management, and access to developer tools.
-
-	To learn more about OpenShift, visit [docs.openshift.com](https://docs.openshift.com)
-	and select the version of OpenShift you are using.
-
-	## Installing the tools
-
-	After extracting this archive, move the \u0060oc.exe\u0060 binary	to a location on your
-	PATH. Then run:
-
-	    oc login [API_URL]
-
-	to start a session against an OpenShift cluster. After login, run \u0060oc.exe\u0060 and
-	\u0060oc.exe help\u0060 to learn more about how to get started with OpenShift.
-
-	If you would like to use \u0060kubectl.exe\u0060 instead, copy the \u0060oc.exe\u0060 file
-	and rename it to \u0060kubectl.exe\u0060. The interface will follow the conventions of that
-	CLI.
-
-	## License
-
-	OpenShift is licensed under the Apache Public License 2.0. The source code for this
-	program is [located on github](https://github.com/openshift/origin).
-	`)
-)
-
-// extractTools extracts specific commands out of images referenced by the release image.
-// TODO: in the future the metadata this command contains might be loaded from the release
-//   image, but we must maintain compatibility with older payloads if so
-func (o *ExtractOptions) extractCommand(command string) error {
-	// Available targets is treated as a GA API and may not be changed without backwards
-	// compatibility of at least N-2 releases.
-	availableTargets := []extractTarget{
-		{
-			OS:      "darwin",
-			Command: "oc",
-			Mapping: extract.Mapping{Image: "cli-artifacts", From: "usr/share/openshift/mac/oc"},
-
-			LinkTo:        []string{"kubectl"},
-			Readme:        readmeCLIUnix,
-			ArchiveFormat: "openshift-client-mac-%s.tar.gz",
-		},
-		{
-			OS:      "linux",
-			Command: "oc",
-			Mapping: extract.Mapping{Image: "cli", From: "usr/bin/oc"},
-
-			LinkTo:        []string{"kubectl"},
-			Readme:        readmeCLIUnix,
-			ArchiveFormat: "openshift-client-linux-%s.tar.gz",
-		},
-		{
-			OS:      "windows",
-			Command: "oc",
-			Mapping: extract.Mapping{Image: "cli-artifacts", From: "usr/share/openshift/windows/oc.exe"},
-
-			Readme:        readmeCLIWindows,
-			ArchiveFormat: "openshift-client-windows-%s.zip",
-			AsZip:         true,
-		},
-		{
-			OS:      "darwin",
-			Command: "openshift-install",
-			Mapping: extract.Mapping{Image: "installer-artifacts", From: "usr/share/openshift/mac/openshift-install"},
-
-			Readme:             readmeInstallUnix,
-			InjectReleaseImage: true,
-			ArchiveFormat:      "openshift-install-mac-%s.tar.gz",
-		},
-		{
-			OS:      "linux",
-			Command: "openshift-install",
-			Mapping: extract.Mapping{Image: "installer", From: "usr/bin/openshift-install"},
-
-			Readme:             readmeInstallUnix,
-			InjectReleaseImage: true,
-			ArchiveFormat:      "openshift-install-linux-%s.tar.gz",
-		},
-	}
-
-	currentOS := runtime.GOOS
-	if len(o.CommandOperatingSystem) > 0 {
-		currentOS = o.CommandOperatingSystem
-	}
-	if currentOS == "mac" {
-		currentOS = "darwin"
-	}
-
-	// select the subset of targets based on command line input
-	var willArchive bool
-	var targets []extractTarget
-	if len(command) > 0 {
-		hasCommand := false
-		for _, target := range availableTargets {
-			if target.Command != command {
-				continue
-			}
-			hasCommand = true
-			if target.OS == currentOS || currentOS == "*" {
-				targets = []extractTarget{target}
-				break
-			}
-		}
-		if len(targets) == 0 {
-			if hasCommand {
-				return fmt.Errorf("command %q does not support the operating system %q", o.Command, currentOS)
-			}
-			return fmt.Errorf("the supported commands are 'oc' and 'openshift-install'")
-		}
-	} else {
-		willArchive = true
-		targets = availableTargets
-		for i := range targets {
-			targets[i].AsArchive = true
-			targets[i].AsZip = targets[i].OS == "windows"
-		}
-	}
-
-	var hashFn = sha256.New
-	var signer *openpgp.Entity
-	if willArchive && len(o.SigningKey) > 0 {
-		key, err := ioutil.ReadFile(o.SigningKey)
-		if err != nil {
-			return err
-		}
-		keyring, err := openpgp.ReadArmoredKeyRing(bytes.NewBuffer(key))
-		if err != nil {
-			return err
-		}
-		for _, key := range keyring {
-			if !key.PrivateKey.CanSign() {
-				continue
-			}
-			fmt.Fprintf(o.Out, "Enter password for private key: ")
-			password, err := terminal.ReadPassword(int(syscall.Stdin))
-			fmt.Fprintln(o.Out)
-			if err != nil {
-				return err
-			}
-			if err := key.PrivateKey.Decrypt(password); err != nil {
-				return fmt.Errorf("unable to decrypt signing key: %v", err)
-			}
-			for i, subkey := range key.Subkeys {
-				if err := subkey.PrivateKey.Decrypt(password); err != nil {
-					return fmt.Errorf("unable to decrypt signing subkey %d: %v", i, err)
-				}
-			}
-			signer = key
-			break
-		}
-		if signer == nil {
-			return fmt.Errorf("no private key exists in %s capable of signing the output", o.SigningKey)
-		}
-	}
-
-	// load the release image
-	dir := o.Directory
-	infoOptions := NewInfoOptions(o.IOStreams)
-	infoOptions.SecurityOptions = o.SecurityOptions
-	release, err := infoOptions.LoadReleaseInfo(o.From, false)
-	if err != nil {
-		return err
-	}
-	releaseName := release.PreferredName()
-	refExact := release.ImageRef
-	refExact.Tag = ""
-	refExact.ID = release.Digest.String()
-	exactReleaseImage := refExact.String()
-
-	// resolve target image references to their pull specs
-	missing := sets.NewString()
-	var validTargets []extractTarget
-	for _, target := range targets {
-		if currentOS != "*" && target.OS != currentOS {
-			klog.V(2).Infof("Skipping %s, does not match current OS %s", target.ArchiveFormat, target.OS)
-			continue
-		}
-		spec, err := findImageSpec(release.References, target.Mapping.Image, o.From)
-		if err != nil {
-			missing.Insert(target.Mapping.Image)
-			continue
-		}
-		klog.V(2).Infof("Will extract %s from %s", target.Mapping.From, spec)
-		ref, err := imagereference.Parse(spec)
-		if err != nil {
-			return err
-		}
-		target.Mapping.Image = spec
-		target.Mapping.ImageRef = ref
-		if target.AsArchive {
-			willArchive = true
-			target.Mapping.Name = fmt.Sprintf(target.ArchiveFormat, releaseName)
-			target.Mapping.To = filepath.Join(dir, target.Mapping.Name)
-		} else {
-			target.Mapping.To = filepath.Join(dir, filepath.Base(target.Mapping.From))
-			target.Mapping.Name = fmt.Sprintf("%s-%s", target.OS, target.Command)
-		}
-		validTargets = append(validTargets, target)
-	}
-
-	if len(validTargets) == 0 {
-		if len(missing) == 1 {
-			return fmt.Errorf("the image %q containing the desired command is not available", missing.List()[0])
-		}
-		return fmt.Errorf("some required images are missing: %s", strings.Join(missing.List(), ", "))
-	}
-	if len(missing) > 0 {
-		fmt.Fprintf(o.ErrOut, "warning: Some commands can not be extracted due to missing images: %s\n", strings.Join(missing.List(), ", "))
-	}
-
-	// will extract in parallel
-	opts := extract.NewOptions(genericclioptions.IOStreams{Out: o.Out, ErrOut: o.ErrOut})
-	opts.ParallelOptions = o.ParallelOptions
-	opts.SecurityOptions = o.SecurityOptions
-	opts.OnlyFiles = true
-
-	// create the mapping lookup of the valid targets
-	var extractLock sync.Mutex
-	targetsByName := make(map[string]extractTarget)
-	for _, target := range validTargets {
-		targetsByName[target.Mapping.Name] = target
-		opts.Mappings = append(opts.Mappings, target.Mapping)
-	}
-	hashByTargetName := make(map[string]string)
-
-	// ensure to is a directory
-	if err := os.MkdirAll(dir, 0777); err != nil {
-		return err
-	}
-
-	// as each layer is extracted, take the output binary and write it to disk
-	opts.TarEntryCallback = func(hdr *tar.Header, layer extract.LayerInfo, r io.Reader) (bool, error) {
-		// ensure we don't process the same mapping twice due to programmer error
-		target, ok := func() (extractTarget, bool) {
-			extractLock.Lock()
-			defer extractLock.Unlock()
-			target, ok := targetsByName[layer.Mapping.Name]
-			return target, ok
-		}()
-		if !ok {
-			return false, fmt.Errorf("unable to find target with mapping name %s", layer.Mapping.Name)
-		}
-
-		// open the file
-		f, err := os.OpenFile(layer.Mapping.To, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0755)
-		if err != nil {
-			return false, err
-		}
-
-		// if we need to write an archive, wrap the file appropriately to create a single
-		// entry
-		var w io.Writer = f
-
-		bw := bufio.NewWriterSize(w, 16*1024)
-		w = bw
-
-		var hash hash.Hash
-		closeFn := func() error { return nil }
-		if target.AsArchive {
-			text := strings.Replace(target.Readme, `\u0060`, "`", -1)
-			hash = hashFn()
-			w = io.MultiWriter(hash, w)
-			if target.AsZip {
-				klog.V(2).Infof("Writing %s as a ZIP archive %s", hdr.Name, layer.Mapping.To)
-				zw := zip.NewWriter(w)
-
-				if len(text) > 0 {
-					text = strings.Replace(text, "\n", "\r\n", -1)
-					zh := &zip.FileHeader{
-						Method:             zip.Deflate,
-						Name:               "README.md",
-						UncompressedSize64: uint64(len(text)),
-						Modified:           hdr.ModTime,
-					}
-					zh.SetMode(os.FileMode(0755))
-
-					fw, err := zw.CreateHeader(zh)
-					if err != nil {
-						return false, err
-					}
-					if _, err := fmt.Fprintf(fw, text); err != nil {
-						return false, err
-					}
-				}
-
-				zh := &zip.FileHeader{
-					Method:             zip.Deflate,
-					Name:               hdr.Name,
-					UncompressedSize64: uint64(hdr.Size),
-					Modified:           hdr.ModTime,
-				}
-				zh.SetMode(os.FileMode(0755))
-
-				fw, err := zw.CreateHeader(zh)
-				if err != nil {
-					return false, err
-				}
-
-				w = fw
-				closeFn = func() error { return zw.Close() }
-
-			} else {
-				klog.V(2).Infof("Writing %s as a tar.gz archive %s", hdr.Name, layer.Mapping.To)
-				gw, err := gzip.NewWriterLevel(w, 3)
-				if err != nil {
-					return false, err
-				}
-				tw := tar.NewWriter(gw)
-
-				if len(text) > 0 {
-					if err := tw.WriteHeader(&tar.Header{
-						Name:     "README.md",
-						Mode:     int64(os.FileMode(0644).Perm()),
-						Size:     int64(len(text)),
-						Typeflag: tar.TypeReg,
-						ModTime:  hdr.ModTime,
-					}); err != nil {
-						return false, err
-					}
-					if _, err := fmt.Fprintf(tw, text); err != nil {
-						return false, err
-					}
-				}
-
-				if err := tw.WriteHeader(&tar.Header{
-					Name:     hdr.Name,
-					Mode:     int64(os.FileMode(0755).Perm()),
-					Size:     hdr.Size,
-					Typeflag: tar.TypeReg,
-					ModTime:  hdr.ModTime,
-				}); err != nil {
-					return false, err
-				}
-
-				w = tw
-				closeFn = func() error {
-					for _, link := range target.LinkTo {
-						if err := tw.WriteHeader(&tar.Header{
-							Name:     link,
-							Mode:     int64(os.FileMode(0755).Perm()),
-							Size:     0,
-							Typeflag: tar.TypeLink,
-							ModTime:  hdr.ModTime,
-							Linkname: hdr.Name,
-						}); err != nil {
-							return err
-						}
-					}
-					if err := tw.Close(); err != nil {
-						return err
-					}
-					return gw.Close()
-				}
-			}
-		}
-
-		// copy the input to disk
-		if target.InjectReleaseImage {
-			var matched bool
-			matched, err = copyAndReplaceReleaseImage(w, r, 4*1024, exactReleaseImage)
-			if !matched {
-				fmt.Fprintf(o.ErrOut, "warning: Unable to replace release image location into %s, installer will not be locked to the correct image\n", target.TargetName)
-			}
-		} else {
-			_, err = io.Copy(w, r)
-		}
-		if err != nil {
-			closeFn()
-			f.Close()
-			os.Remove(f.Name())
-			return false, err
-		}
-
-		// ensure the file is written to disk
-		if err := closeFn(); err != nil {
-			return false, err
-		}
-		if err := bw.Flush(); err != nil {
-			return false, err
-		}
-		if err := f.Close(); err != nil {
-			return false, err
-		}
-		if err := os.Chtimes(f.Name(), hdr.ModTime, hdr.ModTime); err != nil {
-			klog.V(2).Infof("Unable to set extracted file modification time: %v", err)
-		}
-
-		func() {
-			extractLock.Lock()
-			defer extractLock.Unlock()
-			delete(targetsByName, layer.Mapping.Name)
-			if hash != nil {
-				hashByTargetName[layer.Mapping.To] = hex.EncodeToString(hash.Sum(nil))
-			}
-		}()
-
-		return false, nil
-	}
-	if err := opts.Run(); err != nil {
-		return err
-	}
-
-	if willArchive {
-		buf := &bytes.Buffer{}
-		fmt.Fprintf(buf, heredoc.Doc(`
-			Client tools for OpenShift
-			--------------------------
-			
-			These archives contain the client tooling for [OpenShift](https://docs.openshift.com).
-
-			To verify the contents of this directory, use the 'gpg' and 'shasum' tools to
-			ensure the archives you have downloaded match those published from this location.
-			
-			The openshift-install binary has been preconfigured to install the following release:
-
-			---
-			
-		`))
-		if err := describeReleaseInfo(buf, release, false, true, false); err != nil {
-			return err
-		}
-		filename := "release.txt"
-		if err := ioutil.WriteFile(filepath.Join(dir, filename), buf.Bytes(), 0644); err != nil {
-			return err
-		}
-		hash := hashFn()
-		hash.Write(buf.Bytes())
-		hashByTargetName[filename] = hex.EncodeToString(hash.Sum(nil))
-	}
-
-	// write a checksum of the tar files to disk as sha256sum.txt.asc
-	if len(hashByTargetName) > 0 {
-		var keys []string
-		for k := range hashByTargetName {
-			keys = append(keys, k)
-		}
-		sort.Strings(keys)
-		var lines []string
-		for _, k := range keys {
-			hash := hashByTargetName[k]
-			lines = append(lines, fmt.Sprintf("%s  %s", hash, filepath.Base(k)))
-		}
-		// ensure a trailing newline
-		if len(lines[len(lines)-1]) != 0 {
-			lines = append(lines, "")
-		}
-		// write the content manifest
-		data := []byte(strings.Join(lines, "\n"))
-		filename := "sha256sum.txt"
-		if err := ioutil.WriteFile(filepath.Join(dir, filename), data, 0644); err != nil {
-			return fmt.Errorf("unable to write checksum file: %v", err)
-		}
-		// sign the content manifest
-		if signer != nil {
-			buf := &bytes.Buffer{}
-			if err := openpgp.ArmoredDetachSign(buf, signer, bytes.NewBuffer(data), nil); err != nil {
-				return fmt.Errorf("unable to sign the sha256sum.txt file: %v", err)
-			}
-			if err := ioutil.WriteFile(filepath.Join(dir, filename+".asc"), buf.Bytes(), 0644); err != nil {
-				return fmt.Errorf("unable to write signed manifest: %v", err)
-			}
-		}
-	}
-
-	// if we did not process some targets, report that to the user and error if necessary
-	if len(targetsByName) > 0 {
-		var missing []string
-		for _, target := range targetsByName {
-			missing = append(missing, target.Mapping.From)
-		}
-		sort.Strings(missing)
-		if len(missing) == 1 {
-			return fmt.Errorf("image did not contain %s", missing[0])
-		}
-		return fmt.Errorf("unable to find multiple files: %s", strings.Join(missing, ", "))
-	}
-
-	return nil
-}
-
-const (
-	// installerReplacement is the location within the installer binary that we can insert our
-	// release payload string
-	installerReplacement = "\x00_RELEASE_IMAGE_LOCATION_\x00XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\x00"
-)
-
-// copyAndReplaceReleaseImage performs a targeted replacement for binaries that contain a special marker string
-// as a constant, replacing the marker with releaseImage and a NUL terminating byte. It returns true if the
-// replacement was performed.
-func copyAndReplaceReleaseImage(w io.Writer, r io.Reader, bufferSize int, releaseImage string) (bool, error) {
-	if len(releaseImage)+1 > len(installerReplacement) {
-		return false, fmt.Errorf("the release image pull spec is longer than the maximum replacement length for the installer binary")
-	}
-	if bufferSize < len(installerReplacement) {
-		return false, fmt.Errorf("the buffer size must be greater than %d bytes", len(installerReplacement))
-	}
-
-	match := []byte(installerReplacement[:len(releaseImage)+1])
-	offset := 0
-	max := bufferSize
-	buf := make([]byte, max+offset)
-	matched := false
-
-	for {
-		n, err := io.ReadFull(r, buf[offset:])
-
-		// search in the buffer for the expected match
-		end := offset + n
-		if n > 0 {
-			index := bytes.Index(buf[:end], match)
-			if index != -1 {
-				klog.V(2).Infof("Found match at %d (len=%d, offset=%d, n=%d)", index, len(buf), offset, n)
-				// the replacement starts at the beginning of the match, contains the release string and a terminating NUL byte
-				copy(buf[index:index+len(releaseImage)], []byte(releaseImage))
-				buf[index+len(releaseImage)] = 0x00
-				matched = true
-			}
-		}
-
-		// write everything that we have already searched (excluding the end of the buffer that will
-		// be checked next pass)
-		nextOffset := end - len(installerReplacement)
-		if nextOffset < 0 || matched {
-			nextOffset = 0
-		}
-		_, wErr := w.Write(buf[:end-nextOffset])
-		if wErr != nil {
-			return matched, wErr
-		}
-		if err != nil {
-			if err == io.EOF || err == io.ErrUnexpectedEOF {
-				return matched, nil
-			}
-			return matched, err
-		}
-
-		// once we complete a single match, we can copy the rest of the file without processing
-		if matched {
-			_, err := io.Copy(w, r)
-			return matched, err
-		}
-
-		// ensure the beginning of the buffer matches the end of the current buffer so that we
-		// can search for matches that span buffers
-		copy(buf[:nextOffset], buf[end-nextOffset:end])
-		offset = nextOffset
-	}
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/release/extract_tools_test.go b/vendor/github.com/openshift/oc/pkg/cli/admin/release/extract_tools_test.go
deleted file mode 100644
index c1ff6465a4e6..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/release/extract_tools_test.go
+++ /dev/null
@@ -1,99 +0,0 @@
-package release
-
-import (
-	"bytes"
-	"encoding/hex"
-	"math/rand"
-	"strings"
-	"testing"
-)
-
-func Test_copyAndReplaceReleaseImage(t *testing.T) {
-	baseLen := len(installerReplacement)
-	tests := []struct {
-		name         string
-		r            *bytes.Buffer
-		buffer       int
-		releaseImage string
-		wantIndex    int
-		wantErr      bool
-	}{
-		{buffer: 10, wantErr: true, wantIndex: -1},
-		{buffer: baseLen, wantErr: false, wantIndex: -1},
-
-		{releaseImage: "test:latest", r: fakeInput(1024, 0), wantIndex: 1024, name: "end of file"},
-		{releaseImage: "test:latest", r: fakeInput(2*1024, 0), wantIndex: 2 * 1024},
-
-		{releaseImage: "test:latest", r: fakeInput(1024-1, 0, 1), wantIndex: 1024 - 1},
-		{releaseImage: "test:latest", r: fakeInput(0, 1), wantIndex: 0},
-
-		{releaseImage: "test:latest", r: fakeInput(baseLen, 0), wantIndex: baseLen},
-		{releaseImage: "test:latest", r: fakeInput(baseLen*2, 0), wantIndex: baseLen * 2},
-		{releaseImage: "test:latest", r: fakeInput(baseLen-1, 0), wantIndex: baseLen - 1},
-		{releaseImage: "test:latest", r: fakeInput(baseLen*2-1, 0), wantIndex: baseLen*2 - 1},
-		{releaseImage: "test:latest", r: fakeInput(baseLen+1, 0), wantIndex: baseLen + 1},
-		{releaseImage: "test:latest", r: fakeInput(baseLen*2+1, 0), wantIndex: baseLen*2 + 1},
-
-		{releaseImage: strings.Repeat("a", baseLen), wantIndex: -1, wantErr: true},
-		{releaseImage: strings.Repeat("a", baseLen+1), wantIndex: -1, wantErr: true},
-
-		{releaseImage: strings.Repeat("a", baseLen-1), r: fakeInput(baseLen, 0), wantIndex: baseLen},
-		{releaseImage: strings.Repeat("a", baseLen-2), r: fakeInput(baseLen, 0), wantIndex: baseLen},
-		{releaseImage: strings.Repeat("a", baseLen-1), r: fakeInput(1, baseLen, 0), wantIndex: 1 + baseLen},
-		{releaseImage: strings.Repeat("a", baseLen-2), r: fakeInput(1, 0, baseLen), wantIndex: 1},
-
-		{releaseImage: strings.Repeat("a", baseLen-1), r: fakeInput(baseLen*2, 0), wantIndex: baseLen * 2},
-		{releaseImage: strings.Repeat("a", baseLen-2), r: fakeInput(baseLen*2, 0), wantIndex: baseLen * 2},
-		{releaseImage: strings.Repeat("a", baseLen-1), r: fakeInput(1, baseLen*2, 0), wantIndex: 1 + baseLen*2},
-		{releaseImage: strings.Repeat("a", baseLen-2), r: fakeInput(1, 0, baseLen*2), wantIndex: 1},
-	}
-	for _, tt := range tests {
-		t.Run(tt.name, func(t *testing.T) {
-			w := &bytes.Buffer{}
-			if tt.buffer == 0 {
-				tt.buffer = 1024
-			}
-			if tt.r == nil {
-				tt.r = &bytes.Buffer{}
-			}
-
-			src := tt.r.Bytes()
-			original := make([]byte, len(src))
-			copy(original, src)
-
-			got, err := copyAndReplaceReleaseImage(w, tt.r, tt.buffer, tt.releaseImage)
-			if (err != nil) != tt.wantErr {
-				t.Fatalf("copyAndReplaceReleaseImage() error = %v, wantErr %v", err, tt.wantErr)
-			}
-			if got != (tt.wantIndex != -1) {
-				t.Fatalf("copyAndReplaceReleaseImage() = %v, want %v", got, tt.wantIndex != -1)
-			}
-			if got {
-				if len(w.Bytes()) != len(original) {
-					t.Fatalf("mismatched lengths: %d vs %d \n%s\n%s", len(original), w.Len(), hex.Dump(original), hex.Dump(w.Bytes()))
-				}
-				index := bytes.Index(w.Bytes(), []byte(tt.releaseImage+"\x00"))
-				if index != tt.wantIndex {
-					t.Errorf("expected index %d, got index %d\n%s", tt.wantIndex, index, hex.Dump(w.Bytes()))
-				}
-			} else {
-				if !bytes.Equal(w.Bytes(), original) {
-					t.Fatalf("unexpected response body:\n%s\n%s", hex.Dump(original), hex.Dump(w.Bytes()))
-				}
-			}
-		})
-	}
-}
-
-func fakeInput(lengths ...int) *bytes.Buffer {
-	buf := &bytes.Buffer{}
-	for _, l := range lengths {
-		if l == 0 {
-			buf.WriteString(installerReplacement)
-		} else {
-			b := byte(rand.Intn(256))
-			buf.Write(bytes.Repeat([]byte{b}, l))
-		}
-	}
-	return buf
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/release/git.go b/vendor/github.com/openshift/oc/pkg/cli/admin/release/git.go
deleted file mode 100644
index eb72a2996503..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/release/git.go
+++ /dev/null
@@ -1,313 +0,0 @@
-package release
-
-import (
-	"bytes"
-	"crypto/md5"
-	"encoding/base64"
-	"errors"
-	"fmt"
-	"io"
-	"net/url"
-	"os"
-	"os/exec"
-	"path"
-	"path/filepath"
-	"regexp"
-	"strconv"
-	"strings"
-	"time"
-
-	"k8s.io/klog"
-)
-
-// git is a wrapper to invoke git safely, similar to
-// github.com/openshift/library-go/pkg/git but giving access to lower level
-// calls. Consider improving pkg/git in the future.
-type git struct {
-	path string
-}
-
-var noSuchRepo = errors.New("location is not a git repo")
-
-func (g *git) exec(command ...string) (string, error) {
-	buf := &bytes.Buffer{}
-	bufErr := &bytes.Buffer{}
-	cmd := exec.Command("git", command...)
-	cmd.Dir = g.path
-	cmd.Stdout = buf
-	cmd.Stderr = bufErr
-	klog.V(5).Infof("Executing git: %v\n", cmd.Args)
-	err := cmd.Run()
-	if err != nil {
-		return bufErr.String(), err
-	}
-	return buf.String(), nil
-}
-
-func (g *git) streamExec(out, errOut io.Writer, command ...string) error {
-	cmd := exec.Command("git", command...)
-	cmd.Dir = g.path
-	cmd.Stdout = out
-	cmd.Stderr = errOut
-	return cmd.Run()
-}
-
-func (g *git) ChangeContext(path string) (*git, error) {
-	location := &git{path: path}
-	if errOut, err := location.exec("rev-parse", "--git-dir"); err != nil {
-		if strings.Contains(strings.ToLower(errOut), "not a git repository") {
-			return location, noSuchRepo
-		}
-		return location, err
-	}
-	return location, nil
-}
-
-func (g *git) Clone(repository string, out, errOut io.Writer) error {
-	cmd := exec.Command("git", "clone", repository, g.path)
-	cmd.Stdout = out
-	cmd.Stderr = errOut
-	return cmd.Run()
-}
-
-func (g *git) parent() *git {
-	return &git{path: filepath.Dir(g.path)}
-}
-
-func (g *git) basename() string {
-	return filepath.Base(g.path)
-}
-
-func (g *git) CheckoutCommit(repo, commit string) error {
-	_, err := g.exec("checkout", commit)
-	if err == nil {
-		return nil
-	}
-
-	// try to fetch by URL
-	klog.V(4).Infof("failed to checkout: %v", err)
-	if err := ensureFetchedRemoteForRepo(g, repo); err == nil {
-		if _, err := g.exec("checkout", commit); err == nil {
-			return nil
-		}
-	} else {
-		klog.V(4).Infof("failed to fetch: %v", err)
-	}
-
-	return fmt.Errorf("could not locate commit %s", commit)
-}
-
-var reMatch = regexp.MustCompile(`^([a-zA-Z0-9\-\_]+)@([^:]+):(.+)$`)
-
-func sourceLocationAsURL(location string) (*url.URL, error) {
-	if matches := reMatch.FindStringSubmatch(location); matches != nil {
-		return &url.URL{Scheme: "git", User: url.UserPassword(matches[1], ""), Host: matches[2], Path: matches[3]}, nil
-	}
-	return url.Parse(location)
-}
-
-func sourceLocationAsRelativePath(dir, location string) (string, error) {
-	u, err := sourceLocationAsURL(location)
-	if err != nil {
-		return "", err
-	}
-	gitPath := u.Path
-	if strings.HasSuffix(gitPath, ".git") {
-		gitPath = strings.TrimSuffix(gitPath, ".git")
-	}
-	gitPath = path.Clean(gitPath)
-	basePath := filepath.Join(dir, u.Host, filepath.FromSlash(gitPath))
-	return basePath, nil
-}
-
-type MergeCommit struct {
-	CommitDate time.Time
-
-	Commit        string
-	ParentCommits []string
-
-	PullRequest int
-	Bug         int
-
-	Subject string
-}
-
-func gitOutputToError(err error, out string) error {
-	out = strings.TrimSpace(out)
-	if strings.HasPrefix(out, "fatal: ") {
-		out = strings.TrimPrefix(out, "fatal: ")
-	}
-	if len(out) == 0 {
-		return err
-	}
-	return fmt.Errorf(out)
-}
-
-func mergeLogForRepo(g *git, repo string, from, to string) ([]MergeCommit, error) {
-	if from == to {
-		return nil, nil
-	}
-
-	rePR, err := regexp.Compile(`^Merge pull request #(\d+) from`)
-	if err != nil {
-		return nil, err
-	}
-	reBug, err := regexp.Compile(`^Bug (\d+)\s*(-|:)\s*`)
-	if err != nil {
-		return nil, err
-	}
-
-	args := []string{"log", "--merges", "--topo-order", "-z", "--pretty=format:%H %P%x1E%ct%x1E%s%x1E%b", fmt.Sprintf("%s..%s", from, to)}
-	out, err := g.exec(args...)
-	if err != nil {
-		// retry once if there's a chance we haven't fetched the latest commits
-		if !strings.Contains(out, "Invalid revision range") {
-			return nil, gitOutputToError(err, out)
-		}
-		if _, err := g.exec("fetch", "--all"); err != nil {
-			return nil, gitOutputToError(err, out)
-		}
-		if _, err := g.exec("cat-file", "-e", from+"^{commit}"); err != nil {
-			return nil, fmt.Errorf("from commit %s does not exist", from)
-		}
-		if _, err := g.exec("cat-file", "-e", to+"^{commit}"); err != nil {
-			return nil, fmt.Errorf("to commit %s does not exist", to)
-		}
-		out, err = g.exec(args...)
-		if err != nil {
-			return nil, gitOutputToError(err, out)
-		}
-	}
-
-	if klog.V(5) {
-		klog.Infof("Got commit info:\n%s", strconv.Quote(out))
-	}
-
-	var commits []MergeCommit
-	if len(out) == 0 {
-		return nil, nil
-	}
-	for _, entry := range strings.Split(out, "\x00") {
-		records := strings.Split(entry, "\x1e")
-		if len(records) != 4 {
-			return nil, fmt.Errorf("unexpected git log output width %d columns", len(records))
-		}
-		unixTS, err := strconv.ParseInt(records[1], 10, 64)
-		if err != nil {
-			return nil, fmt.Errorf("unexpected timestamp: %v", err)
-		}
-		commitValues := strings.Split(records[0], " ")
-
-		mergeCommit := MergeCommit{
-			CommitDate:    time.Unix(unixTS, 0).UTC(),
-			Commit:        commitValues[0],
-			ParentCommits: commitValues[1:],
-		}
-
-		msg := records[3]
-		if m := reBug.FindStringSubmatch(msg); m != nil {
-			mergeCommit.Subject = msg[len(m[0]):]
-			mergeCommit.Bug, err = strconv.Atoi(m[1])
-			if err != nil {
-				return nil, fmt.Errorf("could not extract bug number from %q: %v", msg, err)
-			}
-		} else {
-			mergeCommit.Subject = msg
-		}
-		mergeCommit.Subject = strings.TrimSpace(mergeCommit.Subject)
-		mergeCommit.Subject = strings.SplitN(mergeCommit.Subject, "\n", 2)[0]
-
-		mergeMsg := records[2]
-		if m := rePR.FindStringSubmatch(mergeMsg); m != nil {
-			mergeCommit.PullRequest, err = strconv.Atoi(m[1])
-			if err != nil {
-				return nil, fmt.Errorf("could not extract PR number from %q: %v", mergeMsg, err)
-			}
-		} else {
-			klog.V(2).Infof("Omitted commit %s which has no pull-request", mergeCommit.Commit)
-			continue
-		}
-		if len(mergeCommit.Subject) == 0 {
-			mergeCommit.Subject = "Merge"
-		}
-
-		commits = append(commits, mergeCommit)
-	}
-
-	return commits, nil
-}
-
-// ensureCloneForRepo ensures that the repo exists on disk, is cloned, and has remotes for
-// both repo and alternateRepos defined. The remotes for alternateRepos will be file system
-// relative to avoid cloning repos twice.
-func ensureCloneForRepo(dir string, repo string, alternateRepos []string, out, errOut io.Writer) (*git, error) {
-	basePath, err := sourceLocationAsRelativePath(dir, repo)
-	if err != nil {
-		return nil, err
-	}
-	klog.V(4).Infof("Ensure repo is cloned at %s pointing to %s", basePath, repo)
-	fi, err := os.Stat(basePath)
-	if err != nil {
-		if !os.IsNotExist(err) {
-			return nil, err
-		}
-		if err := os.MkdirAll(basePath, 0777); err != nil {
-			return nil, err
-		}
-	} else {
-		if !fi.IsDir() {
-			return nil, fmt.Errorf("repo path %s is not a directory", basePath)
-		}
-	}
-	cloner := &git{}
-	extractedRepo, err := cloner.ChangeContext(basePath)
-	if err != nil {
-		if err != noSuchRepo {
-			return nil, err
-		}
-		klog.V(2).Infof("Cloning %s ...", repo)
-		if err := extractedRepo.Clone(repo, out, errOut); err != nil {
-			return nil, err
-		}
-	} else {
-		if err := ensureRemoteForRepo(extractedRepo, repo); err != nil {
-			return nil, err
-		}
-	}
-
-	for _, altRepo := range alternateRepos {
-		if altRepo == repo {
-			continue
-		}
-		if err := ensureRemoteForRepo(extractedRepo, altRepo); err != nil {
-			return nil, err
-		}
-	}
-
-	return extractedRepo, nil
-}
-
-func remoteNameForRepo(repo string) string {
-	sum := md5.Sum([]byte(repo))
-	repoName := fmt.Sprintf("up-%s", base64.RawURLEncoding.EncodeToString(sum[:])[:10])
-	return repoName
-}
-
-func ensureRemoteForRepo(g *git, repo string) error {
-	repoName := remoteNameForRepo(repo)
-	if out, err := g.exec("remote", "add", repoName, repo); err != nil && !strings.Contains(out, "already exists") {
-		return gitOutputToError(err, out)
-	}
-	return nil
-}
-
-func ensureFetchedRemoteForRepo(g *git, repo string) error {
-	repoName := remoteNameForRepo(repo)
-	if out, err := g.exec("remote", "add", repoName, repo); err != nil && !strings.Contains(out, "already exists") {
-		return gitOutputToError(err, out)
-	}
-	if out, err := g.exec("fetch", repoName); err != nil {
-		return gitOutputToError(err, out)
-	}
-	return nil
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/release/image_mapper.go b/vendor/github.com/openshift/oc/pkg/cli/admin/release/image_mapper.go
deleted file mode 100644
index bbc492ebe8d1..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/release/image_mapper.go
+++ /dev/null
@@ -1,478 +0,0 @@
-package release
-
-import (
-	"bytes"
-	"fmt"
-	"io/ioutil"
-	"os"
-	"path/filepath"
-	"regexp"
-	"sort"
-	"strings"
-
-	"github.com/blang/semver"
-	"github.com/ghodss/yaml"
-	imageapi "github.com/openshift/api/image/v1"
-	imagereference "github.com/openshift/library-go/pkg/image/reference"
-	"k8s.io/klog"
-)
-
-type Payload struct {
-	path string
-
-	references *imageapi.ImageStream
-}
-
-func NewPayload(path string) *Payload {
-	return &Payload{path: path}
-}
-
-func (p *Payload) Path() string {
-	return p.path
-}
-
-// Rewrite updates the image stream to point to the locations described by the provided function.
-// If a new ID appears in the returned reference, it will be used instead of the existing digest.
-// All references in manifest files will be updated and then the image stream will be written to
-// the correct location with any updated metadata.
-func (p *Payload) Rewrite(allowTags bool, fn func(component string) imagereference.DockerImageReference) error {
-	is, err := p.References()
-	if err != nil {
-		return err
-	}
-
-	replacements, err := ReplacementsForImageStream(is, allowTags, fn)
-	if err != nil {
-		return err
-	}
-
-	mapper, err := NewExactMapper(replacements)
-	if err != nil {
-		return err
-	}
-
-	files, err := ioutil.ReadDir(p.path)
-	if err != nil {
-		return err
-	}
-	for _, file := range files {
-		if file.IsDir() {
-			continue
-		}
-		if filepath.Base(file.Name()) == "image-references" {
-			continue
-		}
-		path := filepath.Join(p.path, file.Name())
-		data, err := ioutil.ReadFile(path)
-		if err != nil {
-			return err
-		}
-		out, err := mapper(data)
-		if err != nil {
-			return fmt.Errorf("unable to rewrite the contents of %s: %v", path, err)
-		}
-		if bytes.Equal(data, out) {
-			continue
-		}
-		klog.V(6).Infof("Rewrote\n%s\n\nto\n\n%s\n", string(data), string(out))
-		if err := ioutil.WriteFile(path, out, file.Mode()); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (p *Payload) References() (*imageapi.ImageStream, error) {
-	if p.references != nil {
-		return p.references, nil
-	}
-	is, err := parseImageStream(filepath.Join(p.path, "image-references"))
-	if err != nil {
-		return nil, err
-	}
-	p.references = is
-	return is, nil
-}
-
-func parseImageStream(path string) (*imageapi.ImageStream, error) {
-	data, err := ioutil.ReadFile(path)
-	if os.IsNotExist(err) {
-		return nil, err
-	}
-	if err != nil {
-		return nil, fmt.Errorf("unable to read release image info from release contents: %v", err)
-	}
-	return readReleaseImageReferences(data)
-}
-
-func readReleaseImageReferences(data []byte) (*imageapi.ImageStream, error) {
-	is := &imageapi.ImageStream{}
-	if err := yaml.Unmarshal(data, &is); err != nil {
-		return nil, fmt.Errorf("unable to load release image-references: %v", err)
-	}
-	if is.Kind != "ImageStream" || is.APIVersion != "image.openshift.io/v1" {
-		return nil, fmt.Errorf("unrecognized image-references in release payload")
-	}
-	return is, nil
-}
-
-type ManifestMapper func(data []byte) ([]byte, error)
-
-func NewTransformFromImageStreamFile(path string, input *imageapi.ImageStream, allowMissingImages bool) (ManifestMapper, error) {
-	is, err := parseImageStream(path)
-	if err != nil {
-		return nil, err
-	}
-
-	references := make(map[string]ImageReference)
-	for _, tag := range is.Spec.Tags {
-		if tag.From == nil || tag.From.Kind != "DockerImage" {
-			continue
-		}
-		if len(tag.From.Name) == 0 {
-			return nil, fmt.Errorf("no from.name for the tag %s", tag.Name)
-		}
-		ref := ImageReference{SourceRepository: tag.From.Name}
-		for _, inputTag := range input.Spec.Tags {
-			if inputTag.Name == tag.Name {
-				ref.TargetPullSpec = inputTag.From.Name
-				break
-			}
-		}
-		if len(ref.TargetPullSpec) == 0 {
-			if allowMissingImages {
-				klog.V(2).Infof("Image file %q referenced an image %q that is not part of the input images, skipping", path, tag.From.Name)
-				continue
-			}
-			return nil, fmt.Errorf("no input image tag named %q", tag.Name)
-		}
-		references[tag.Name] = ref
-	}
-	imageMapper, err := NewImageMapper(references)
-	if err != nil {
-		return nil, err
-	}
-
-	// load all version values from the input stream, including any defaults, to perform
-	// version substitution in the returned manifests.
-	versions := make(map[string]string)
-	tagsByName := make(map[string][]string)
-	for _, tag := range input.Spec.Tags {
-		if _, ok := references[tag.Name]; !ok {
-			continue
-		}
-		value, ok := tag.Annotations[annotationBuildVersions]
-		if !ok {
-			continue
-		}
-		klog.V(4).Infof("Found build versions from %s: %s", tag.Name, value)
-		items, err := parseComponentVersionsLabel(value)
-		if err != nil {
-			return nil, fmt.Errorf("input image stream has an invalid version annotation for tag %q: %v", tag.Name, value)
-		}
-		for k, v := range items {
-			existing, ok := versions[k]
-			if ok {
-				if existing != v {
-					return nil, fmt.Errorf("input image stream has multiple versions defined for version %s: %s defines %s but was already set to %s on %s", k, tag.Name, v, existing, strings.Join(tagsByName[k], ", "))
-				}
-			} else {
-				versions[k] = v
-				klog.V(4).Infof("Found version %s=%s from %s", k, v, tag.Name)
-			}
-			tagsByName[k] = append(tagsByName[k], tag.Name)
-		}
-	}
-	defaults, err := parseComponentVersionsLabel(input.Annotations[annotationBuildVersions])
-	if err != nil {
-		return nil, fmt.Errorf("unable to read default versions label on input image stream: %v", err)
-	}
-	for k, v := range defaults {
-		if _, ok := versions[k]; !ok {
-			versions[k] = v
-		}
-	}
-
-	versionMapper := NewComponentVersionsMapper(input.Name, versions, tagsByName)
-	return func(data []byte) ([]byte, error) {
-		data, err := imageMapper(data)
-		if err != nil {
-			return nil, err
-		}
-		return versionMapper(data)
-	}, nil
-}
-
-type ImageReference struct {
-	SourceRepository string
-	TargetPullSpec   string
-}
-
-func NopManifestMapper(data []byte) ([]byte, error) {
-	return data, nil
-}
-
-// patternImageFormat attempts to match a docker pull spec by prefix (%s) and capture the
-// prefix and either a tag or digest. It requires leading and trailing whitespace, quotes, or
-// end of file.
-const patternImageFormat = `([\W]|^)(%s)(:[\w][\w.-]{0,127}|@[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{2,})?([\s"']|$)`
-
-func NewImageMapper(images map[string]ImageReference) (ManifestMapper, error) {
-	repositories := make([]string, 0, len(images))
-	bySource := make(map[string]string)
-	for name, ref := range images {
-		if len(ref.SourceRepository) == 0 {
-			return nil, fmt.Errorf("an empty source repository is not allowed for name %q", name)
-		}
-		if existing, ok := bySource[ref.SourceRepository]; ok {
-			return nil, fmt.Errorf("the source repository %q was defined more than once (for %q and %q)", ref.SourceRepository, existing, name)
-		}
-		bySource[ref.SourceRepository] = name
-		repositories = append(repositories, regexp.QuoteMeta(ref.SourceRepository))
-	}
-	if len(repositories) == 0 {
-		klog.V(5).Infof("No images are mapped, will not replace any contents")
-		return NopManifestMapper, nil
-	}
-	pattern := fmt.Sprintf(patternImageFormat, strings.Join(repositories, "|"))
-	re := regexp.MustCompile(pattern)
-
-	return func(data []byte) ([]byte, error) {
-		out := re.ReplaceAllFunc(data, func(in []byte) []byte {
-			parts := re.FindSubmatch(in)
-			repository := string(parts[2])
-			name, ok := bySource[repository]
-			if !ok {
-				klog.V(4).Infof("found potential image %q, but no matching definition", repository)
-				return in
-			}
-			ref := images[name]
-
-			suffix := parts[3]
-			klog.V(2).Infof("found repository %q with locator %q in the input, switching to %q (from pattern %s)", string(repository), string(suffix), ref.TargetPullSpec, pattern)
-			switch {
-			case len(suffix) == 0:
-				// we found a repository, but no tag or digest (implied latest), or we got an exact match
-				return []byte(string(parts[1]) + ref.TargetPullSpec + string(parts[4]))
-			case suffix[0] == '@':
-				// we got a digest
-				return []byte(string(parts[1]) + ref.TargetPullSpec + string(parts[4]))
-			default:
-				// TODO: we didn't get a digest, so we have to decide what to replace
-				return []byte(string(parts[1]) + ref.TargetPullSpec + string(parts[4]))
-			}
-		})
-		return out, nil
-	}, nil
-}
-
-// exactImageFormat attempts to match a string on word boundaries
-const exactImageFormat = `\b%s\b`
-
-func NewExactMapper(mappings map[string]string) (ManifestMapper, error) {
-	patterns := make(map[string]*regexp.Regexp)
-	for from, to := range mappings {
-		pattern := fmt.Sprintf(exactImageFormat, regexp.QuoteMeta(from))
-		re, err := regexp.Compile(pattern)
-		if err != nil {
-			return nil, err
-		}
-		patterns[to] = re
-	}
-
-	return func(data []byte) ([]byte, error) {
-		for to, pattern := range patterns {
-			data = pattern.ReplaceAll(data, []byte(to))
-		}
-		return data, nil
-	}, nil
-}
-
-func ComponentReferencesForImageStream(is *imageapi.ImageStream) (func(string) imagereference.DockerImageReference, error) {
-	components := make(map[string]imagereference.DockerImageReference)
-	for _, tag := range is.Spec.Tags {
-		if tag.From == nil || tag.From.Kind != "DockerImage" {
-			continue
-		}
-		ref, err := imagereference.Parse(tag.From.Name)
-		if err != nil {
-			return nil, fmt.Errorf("reference for %q is invalid: %v", tag.Name, err)
-		}
-		components[tag.Name] = ref
-	}
-	return func(component string) imagereference.DockerImageReference {
-		ref, ok := components[component]
-		if !ok {
-			panic(fmt.Errorf("unknown component %s", component))
-		}
-		return ref
-	}, nil
-}
-
-const (
-	componentVersionFormat = `([\W]|^)0\.0\.1-snapshot([a-z0-9\-]*)`
-)
-
-// NewComponentVersionsMapper substitutes strings of the form 0.0.1-snapshot with releaseName and strings
-// of the form 0.0.1-snapshot-[component] with the version value located in versions, or returns an error.
-// tagsByName allows the caller to return an error if references are ambiguous (two tags declare different
-// version values) - if that replacement is detected and tagsByName[component] has more than one entry,
-// then an error is returned by the ManifestMapper.
-// If the input release name is not a semver, a request for `0.0.1-snapshot` will be left unmodified.
-func NewComponentVersionsMapper(releaseName string, versions map[string]string, tagsByName map[string][]string) ManifestMapper {
-	if v, err := semver.Parse(releaseName); err == nil {
-		v.Build = nil
-		releaseName = v.String()
-	} else {
-		releaseName = ""
-	}
-	re, err := regexp.Compile(componentVersionFormat)
-	if err != nil {
-		return func([]byte) ([]byte, error) {
-			return nil, fmt.Errorf("component versions mapper regex: %v", err)
-		}
-	}
-	return func(data []byte) ([]byte, error) {
-		var missing []string
-		var conflicts []string
-		data = re.ReplaceAllFunc(data, func(part []byte) []byte {
-			matches := re.FindSubmatch(part)
-			if matches == nil {
-				return part
-			}
-			key := string(matches[2])
-			if len(key) == 0 && len(releaseName) > 0 {
-				buf := &bytes.Buffer{}
-				buf.Write(matches[1])
-				buf.WriteString(releaseName)
-				return buf.Bytes()
-			}
-			if !strings.HasPrefix(key, "-") {
-				return part
-			}
-			key = key[1:]
-			value, ok := versions[key]
-			if !ok {
-				missing = append(missing, key)
-				return part
-			}
-			if len(tagsByName[key]) > 1 {
-				conflicts = append(conflicts, key)
-				return part
-			}
-			buf := &bytes.Buffer{}
-			buf.Write(matches[1])
-			buf.WriteString(value)
-			return buf.Bytes()
-		})
-		if len(missing) > 0 {
-			switch len(missing) {
-			case 1:
-				if len(missing[0]) == 0 {
-					return nil, fmt.Errorf("empty version references are not allowed")
-				}
-				return nil, fmt.Errorf("unknown version reference %q", missing[0])
-			default:
-				return nil, fmt.Errorf("unknown version references: %s", strings.Join(missing, ", "))
-			}
-		}
-		if len(conflicts) > 0 {
-			allImageTags := tagsByName[conflicts[0]]
-			sort.Strings(allImageTags)
-			return nil, fmt.Errorf("the version for %q is inconsistent across the referenced images: %s", conflicts[0], strings.Join(allImageTags, ", "))
-		}
-		return data, nil
-	}
-}
-
-var (
-	reAllowedVersionKey = regexp.MustCompile(`^[a-z0-9]+[\-a-z0-9]*[a-z0-9]+$`)
-)
-
-// ComponentVersions is a map of component names to semantic versions. Names are
-// lowercase alphanumeric and dashes. Semantic versions will have all build
-// labels removed, but prerelease segments are preserved.
-type ComponentVersions map[string]string
-
-func (v ComponentVersions) String() string {
-	var keys []string
-	for k := range v {
-		keys = append(keys, k)
-	}
-	sort.Strings(keys)
-	buf := &bytes.Buffer{}
-	for i, k := range keys {
-		if i != 0 {
-			buf.WriteRune(',')
-		}
-		fmt.Fprintf(buf, "%s=%s", k, v[k])
-	}
-	return buf.String()
-}
-
-// parseComponentVersionsLabel returns the version labels specified in the string or
-// an error. Labels are comma-delimited, key=value pairs, and surrounding whitespace is
-// ignored. Names must be a-z, 0-9, or have interior dashes. All values must be
-// semantic versions.
-func parseComponentVersionsLabel(label string) (ComponentVersions, error) {
-	label = strings.TrimSpace(label)
-	if len(label) == 0 {
-		return nil, nil
-	}
-	labels := make(map[string]string)
-	for _, pair := range strings.Split(label, ",") {
-		pair = strings.TrimSpace(pair)
-		parts := strings.SplitN(pair, "=", 2)
-		if len(parts) == 1 {
-			return nil, fmt.Errorf("the version pair %q must be NAME=VERSION", pair)
-		}
-		if !reAllowedVersionKey.MatchString(parts[0]) {
-			return nil, fmt.Errorf("the version name %q must only be ASCII alphanumerics and internal hyphens", parts[0])
-		}
-		v, err := semver.Parse(parts[1])
-		if err != nil {
-			return nil, fmt.Errorf("the version pair %q must have a valid semantic version: %v", pair, err)
-		}
-		v.Build = nil
-		labels[parts[0]] = v.String()
-	}
-	return labels, nil
-}
-
-func ReplacementsForImageStream(is *imageapi.ImageStream, allowTags bool, fn func(component string) imagereference.DockerImageReference) (map[string]string, error) {
-	replacements := make(map[string]string)
-	for i := range is.Spec.Tags {
-		tag := &is.Spec.Tags[i]
-		if tag.From == nil || tag.From.Kind != "DockerImage" {
-			continue
-		}
-		oldImage := tag.From.Name
-		oldRef, err := imagereference.Parse(oldImage)
-		if err != nil {
-			return nil, fmt.Errorf("unable to parse image reference for tag %q from payload: %v", tag.Name, err)
-		}
-		if len(oldRef.Tag) > 0 || len(oldRef.ID) == 0 {
-			if !allowTags {
-				return nil, fmt.Errorf("image reference tag %q in payload does not point to an image digest - unable to rewrite payload", tag.Name)
-			}
-		}
-		ref := fn(tag.Name)
-		if !allowTags {
-			if len(ref.ID) == 0 {
-				ref.Tag = ""
-				ref.ID = oldRef.ID
-			}
-		}
-		newImage := ref.Exact()
-		replacements[oldImage] = newImage
-		tag.From.Name = newImage
-	}
-
-	if klog.V(5) {
-		for k, v := range replacements {
-			klog.Infof("Mapping %s -> %s", k, v)
-		}
-	}
-	return replacements, nil
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/release/image_mapper_test.go b/vendor/github.com/openshift/oc/pkg/cli/admin/release/image_mapper_test.go
deleted file mode 100644
index fa89600029f0..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/release/image_mapper_test.go
+++ /dev/null
@@ -1,365 +0,0 @@
-package release
-
-import (
-	"strings"
-	"testing"
-)
-
-func TestNewImageMapper(t *testing.T) {
-	type args struct {
-		images map[string]ImageReference
-	}
-	tests := []struct {
-		name    string
-		args    args
-		input   string
-		output  string
-		wantErr bool
-	}{
-		// TODO: Add test cases.
-		{name: "empty input"},
-		{
-			name: "empty source repository",
-			args: args{
-				images: map[string]ImageReference{
-					"etcd": {
-						TargetPullSpec: "quay.io/openshift/origin-etcd@sha256:1234",
-					},
-				},
-			},
-			wantErr: true,
-		},
-		{
-			name: "duplicate source repositories",
-			args: args{
-				images: map[string]ImageReference{
-					"etcd": {
-						SourceRepository: "quay.io/coreos/etcd",
-						TargetPullSpec:   "quay.io/openshift/origin-etcd@sha256:1234",
-					},
-					"etcd-2": {
-						SourceRepository: "quay.io/coreos/etcd",
-						TargetPullSpec:   "quay.io/openshift/origin-etcd@sha256:5678",
-					},
-				},
-			},
-			wantErr: true,
-		},
-		{
-			name: "replace repository with tag",
-			args: args{
-				images: map[string]ImageReference{
-					"etcd": {
-						SourceRepository: "quay.io/coreos/etcd",
-						TargetPullSpec:   "quay.io/openshift/origin-etcd@sha256:1234",
-					},
-				},
-			},
-			input:  "image: quay.io/coreos/etcd:latest",
-			output: "image: quay.io/openshift/origin-etcd@sha256:1234",
-		},
-		{
-			name: "replace tag with digest",
-			args: args{
-				images: map[string]ImageReference{
-					"etcd": {
-						SourceRepository: "quay.io/coreos/etcd:latest",
-						TargetPullSpec:   "quay.io/openshift/origin-etcd@sha256:1234",
-					},
-				},
-			},
-			input:  "image: quay.io/coreos/etcd:latest",
-			output: "image: quay.io/openshift/origin-etcd@sha256:1234",
-		},
-		{
-			name: "replace repository with tag with trailing whitespace",
-			args: args{
-				images: map[string]ImageReference{
-					"etcd": {
-						SourceRepository: "quay.io/coreos/etcd",
-						TargetPullSpec:   "quay.io/openshift/origin-etcd@sha256:1234",
-					},
-				},
-			},
-			input:  "image: quay.io/coreos/etcd:latest\n",
-			output: "image: quay.io/openshift/origin-etcd@sha256:1234\n",
-		},
-		{
-			name: "replace repository with digest",
-			args: args{
-				images: map[string]ImageReference{
-					"etcd": {
-						SourceRepository: "quay.io/coreos/etcd",
-						TargetPullSpec:   "quay.io/openshift/origin-etcd@sha256:1234",
-					},
-				},
-			},
-			input:  "image: quay.io/coreos/etcd@sha256:5678",
-			output: "image: quay.io/openshift/origin-etcd@sha256:1234",
-		},
-		{
-			name: "replace with digest on a multi-line file with quotes and newlines",
-			args: args{
-				images: map[string]ImageReference{
-					"etcd": {
-						SourceRepository: "quay.io/openshift/origin-prometheus:latest",
-						TargetPullSpec:   "quay.io/openshift/origin-prometheus@sha256:1234",
-					},
-				},
-			},
-			input: `
-	- "-images=prometheus=quay.io/openshift/origin-prometheus:latest"
-	- "-images=alertmanager=quay.io/openshift/origin-prometheus-alertmanager:latest"
-`,
-			output: `
-	- "-images=prometheus=quay.io/openshift/origin-prometheus@sha256:1234"
-	- "-images=alertmanager=quay.io/openshift/origin-prometheus-alertmanager:latest"
-`,
-		},
-		{
-			name: "replace with digest on a multi-line file with quotes and newlines",
-			args: args{
-				images: map[string]ImageReference{
-					"etcd": {
-						SourceRepository: "quay.io/openshift/origin-prometheus:latest",
-						TargetPullSpec:   "quay.io/openshift/origin-prometheus@sha256:1234",
-					},
-				},
-			},
-			input: `
-	- "quay.io/openshift/origin-prometheus:latest"
-`,
-			output: `
-	- "quay.io/openshift/origin-prometheus@sha256:1234"
-`,
-		},
-		{
-			name: "replace bare repository when told to do so",
-			args: args{
-				images: map[string]ImageReference{
-					"etcd": {
-						SourceRepository: "quay.io/coreos/etcd",
-						TargetPullSpec:   "quay.io/openshift/origin-etcd@sha256:1234",
-					},
-				},
-			},
-			input:  "image: quay.io/coreos/etcd",
-			output: "image: quay.io/openshift/origin-etcd@sha256:1234",
-		},
-		{
-			name: "replace bare repository with trailing whitespace when told to do so",
-			args: args{
-				images: map[string]ImageReference{
-					"etcd": {
-						SourceRepository: "quay.io/coreos/etcd",
-						TargetPullSpec:   "quay.io/openshift/origin-etcd@sha256:1234",
-					},
-				},
-			},
-			input:  "image: quay.io/coreos/etcd ",
-			output: "image: quay.io/openshift/origin-etcd@sha256:1234 ",
-		},
-		{
-			name: "Ignore things that only look like images",
-			args: args{
-				images: map[string]ImageReference{
-					"etcd": {
-						SourceRepository: "quay.io/coreos/etcd",
-						TargetPullSpec:   "quay.io/openshift/origin-etcd@sha256:1234",
-					},
-				},
-			},
-			input:  "example_url: https://quay.io/coreos/etcd:8443/test",
-			output: "example_url: https://quay.io/coreos/etcd:8443/test",
-		},
-		{
-			name: "replace entire file - just to verify the regex",
-			args: args{
-				images: map[string]ImageReference{
-					"etcd": {
-						SourceRepository: "quay.io/coreos/etcd",
-						TargetPullSpec:   "quay.io/openshift/origin-etcd@sha256:1234",
-					},
-				},
-			},
-			input:  "quay.io/coreos/etcd:latest",
-			output: "quay.io/openshift/origin-etcd@sha256:1234",
-		},
-	}
-	for _, tt := range tests {
-		t.Run(tt.name, func(t *testing.T) {
-			m, err := NewImageMapper(tt.args.images)
-			if (err != nil) != tt.wantErr {
-				t.Fatal(err)
-			}
-			if err != nil {
-				return
-			}
-			out, err := m([]byte(tt.input))
-			if (err != nil) != tt.wantErr {
-				t.Fatal(err)
-			}
-			if err != nil {
-				return
-			}
-			if string(out) != tt.output {
-				t.Errorf("unexpected output, wanted\n%s\ngot\n%s", tt.output, string(out))
-			}
-		})
-	}
-}
-
-func TestNewExactMapper(t *testing.T) {
-	type args struct {
-		mappings map[string]string
-	}
-	tests := []struct {
-		name    string
-		args    args
-		input   string
-		output  string
-		wantErr bool
-	}{
-		{
-			name:   "replace at end of file",
-			args:   args{mappings: map[string]string{"reg/repo@sha256:01234": "reg2/repo2@sha256:01234"}},
-			input:  "image: reg/repo@sha256:01234",
-			output: "image: reg2/repo2@sha256:01234",
-		},
-		{
-			name:   "replace at beginning of file",
-			args:   args{mappings: map[string]string{"reg/repo@sha256:01234": "reg2/repo2@sha256:01234"}},
-			input:  "reg/repo@sha256:01234",
-			output: "reg2/repo2@sha256:01234",
-		},
-		// TODO: Add test cases.
-	}
-	for _, tt := range tests {
-		t.Run(tt.name, func(t *testing.T) {
-			m, err := NewExactMapper(tt.args.mappings)
-			if (err != nil) != tt.wantErr {
-				t.Fatal(err)
-			}
-			if err != nil {
-				return
-			}
-			out, err := m([]byte(tt.input))
-			if (err != nil) != tt.wantErr {
-				t.Fatal(err)
-			}
-			if err != nil {
-				return
-			}
-			if string(out) != tt.output {
-				t.Errorf("unexpected output, wanted\n%s\ngot\n%s", tt.output, string(out))
-			}
-		})
-	}
-}
-
-func TestNewComponentVersionsMapper(t *testing.T) {
-	type args struct {
-	}
-	tests := []struct {
-		name        string
-		releaseName string
-		versions    map[string]string
-		imagesByTag map[string][]string
-		in          string
-		out         string
-		wantErr     string
-	}{
-		{
-			in:  `version: 0.0.1-snapshot\n`,
-			out: `version: 0.0.1-snapshot\n`,
-		},
-		{
-			in:      `version: 0.0.1-snapshot-\n`,
-			wantErr: `empty version references are not allowed`,
-		},
-		{
-			in:      `version: 0.0.1-snapshot-a\n`,
-			wantErr: `unknown version reference "a"`,
-		},
-		{
-			releaseName: "2.0.0",
-			in:          `version: 0.0.1-snapshot\n`,
-			out:         `version: 2.0.0\n`,
-		},
-		{
-			name:        "release name is not semver",
-			releaseName: "2.0",
-			in:          `version: 0.0.1-snapshot\n`,
-			out:         `version: 0.0.1-snapshot\n`,
-		},
-		{
-			versions: map[string]string{"a": "2.0.0"},
-			in:       `version: 0.0.1-snapshot-a\n`,
-			out:      `version: 2.0.0\n`,
-		},
-		{
-			versions:    map[string]string{"a": "2.0.0"},
-			imagesByTag: map[string][]string{"a": {"tag1", "tag2"}},
-			in:          `version: 0.0.1-snapshot-a\n`,
-			wantErr:     `the version for "a" is inconsistent across the referenced images: tag1, tag2`,
-		},
-		{
-			versions:    map[string]string{"a": "2.0.0", "b": "3.0.0"},
-			imagesByTag: map[string][]string{"a": {"tag1", "tag2"}},
-			in:          `version: 0.0.1-snapshot-b\n`,
-			out:         `version: 3.0.0\n`,
-		},
-		{
-			versions: map[string]string{"a": "2.0.0"},
-			in:       `version: 0.0.1-snapshot-a`,
-			out:      `version: 2.0.0`,
-		},
-		{
-			versions: map[string]string{"a": "2.0.0"},
-			in:       `0.0.1-snapshot-a`,
-			out:      `2.0.0`,
-		},
-		{
-			versions: map[string]string{"a": "2.0.0"},
-			in:       `:0.0.1-snapshot-a`,
-			out:      `:2.0.0`,
-		},
-		{
-			versions: map[string]string{"a": "2.0.0"},
-			in:       `-0.0.1-snapshot-a_`,
-			out:      `-2.0.0_`,
-		},
-		{
-			versions: map[string]string{"a": "2.0.0"},
-			in:       `0.0.1-snapshot-a 0.0.1-snapshot-b`,
-			wantErr:  `unknown version reference "b"`,
-		},
-		{
-			versions: map[string]string{"a": "2.0.0", "b": "1.0.0"},
-			in:       `0.0.1-snapshot-a 0.0.1-snapshot-b`,
-			out:      `2.0.0 1.0.0`,
-		},
-		{
-			in:      `0.0.1-snapshot-a0.0.1-snapshot-b`,
-			wantErr: `unknown version reference "a0"`,
-		},
-	}
-	for _, tt := range tests {
-		t.Run(tt.name, func(t *testing.T) {
-			m := NewComponentVersionsMapper(tt.releaseName, tt.versions, tt.imagesByTag)
-			out, err := m([]byte(tt.in))
-			if (err != nil) != (len(tt.wantErr) > 0) {
-				t.Fatalf("unexpected error: %v", err)
-			}
-			if err != nil {
-				if !strings.Contains(err.Error(), tt.wantErr) {
-					t.Fatalf("unexpected error: %v", err)
-				}
-				return
-			}
-			if tt.out != string(out) {
-				t.Errorf("mismatch:\n%s\n%s", tt.out, out)
-			}
-		})
-	}
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/release/info.go b/vendor/github.com/openshift/oc/pkg/cli/admin/release/info.go
deleted file mode 100644
index 1d484ae9a72f..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/release/info.go
+++ /dev/null
@@ -1,1633 +0,0 @@
-package release
-
-import (
-	"archive/tar"
-	"bytes"
-	"encoding/json"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"net/http"
-	"net/url"
-	"os"
-	"path"
-	"sort"
-	"strconv"
-	"strings"
-	"sync"
-	"text/tabwriter"
-	"time"
-
-	"github.com/MakeNowJust/heredoc"
-	"github.com/blang/semver"
-	"github.com/docker/distribution"
-	units "github.com/docker/go-units"
-	digest "github.com/opencontainers/go-digest"
-	"github.com/spf13/cobra"
-	"k8s.io/klog"
-
-	"k8s.io/apimachinery/pkg/api/errors"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/util/duration"
-	"k8s.io/apimachinery/pkg/util/sets"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-
-	imageapi "github.com/openshift/api/image/v1"
-	configv1client "github.com/openshift/client-go/config/clientset/versioned"
-	"github.com/openshift/library-go/pkg/image/dockerv1client"
-	imagereference "github.com/openshift/library-go/pkg/image/reference"
-	"github.com/openshift/oc/pkg/cli/image/extract"
-	imageinfo "github.com/openshift/oc/pkg/cli/image/info"
-	imagemanifest "github.com/openshift/oc/pkg/cli/image/manifest"
-)
-
-func NewInfoOptions(streams genericclioptions.IOStreams) *InfoOptions {
-	return &InfoOptions{
-		IOStreams:       streams,
-		ParallelOptions: imagemanifest.ParallelOptions{MaxPerRegistry: 4},
-	}
-}
-
-func NewInfo(f kcmdutil.Factory, parentName string, streams genericclioptions.IOStreams) *cobra.Command {
-	o := NewInfoOptions(streams)
-	cmd := &cobra.Command{
-		Use:   "info IMAGE [--changes-from=IMAGE] [--verify|--commits|--pullspecs]",
-		Short: "Display information about a release",
-		Long: templates.LongDesc(`
-			Show information about an OpenShift release
-
-			This command retrieves, verifies, and formats the information describing an OpenShift update.
-			Updates are delivered as container images with metadata describing the component images and
-			the configuration necessary to install the system operators. A release image is usually
-			referenced via its content digest, which allows this command and the update infrastructure to
-			validate that updates have not been tampered with.
-
-			If no arguments are specified the release of the currently connected cluster is displayed.
-			Specify one or more images via pull spec to see details of each release image. The --commits
-			flag will display the Git commit IDs and repository URLs for the source of each component
-			image. The --pullspecs flag will display the full component image pull spec. --size will show
-			a breakdown of each image, their layers, and the total size of the payload. --contents shows
-			the configuration that will be applied to the cluster when the update is run. If you have
-			specified two images the difference between the first and second image will be shown. You
-			may use -o name, -o digest, or -o pullspec to output the tag name, digest for image, or
-			pullspec of the images referenced in the release image.
-
-			The --verify flag will display one summary line per input release image and verify the
-			integrity of each. The command will return an error if the release has been tampered with.
-			Passing a pull spec with a digest (e.g. quay.io/openshift/release@sha256:a9bc...) instead of
-			a tag when verifying an image is recommended since it ensures an attacker cannot trick you
-			into installing an older, potentially vulnerable version.
-
-			The --bugs and --changelog flags will use git to clone the source of the release and display
-			the code changes that occurred between the two release arguments. This operation is slow
-			and requires sufficient disk space on the selected drive to clone all repositories.
-		`),
-		Run: func(cmd *cobra.Command, args []string) {
-			kcmdutil.CheckErr(o.Complete(f, cmd, args))
-			kcmdutil.CheckErr(o.Validate())
-			kcmdutil.CheckErr(o.Run())
-		},
-	}
-	flags := cmd.Flags()
-	o.SecurityOptions.Bind(flags)
-	o.ParallelOptions.Bind(flags)
-
-	flags.StringVar(&o.From, "changes-from", o.From, "Show changes from this image to the requested image.")
-
-	flags.BoolVar(&o.Verify, "verify", o.Verify, "Generate bug listings from the changelogs in the git repositories extracted to this path.")
-
-	flags.BoolVar(&o.ShowContents, "contents", o.ShowContents, "Display the contents of a release.")
-	flags.BoolVar(&o.ShowCommit, "commits", o.ShowCommit, "Display information about the source an image was created with.")
-	flags.BoolVar(&o.ShowPullSpec, "pullspecs", o.ShowPullSpec, "Display the pull spec of each image instead of the digest.")
-	flags.BoolVar(&o.ShowSize, "size", o.ShowSize, "Display the size of each image including overlap.")
-	flags.StringVar(&o.ImageFor, "image-for", o.ImageFor, "Print the pull spec of the specified image or an error if it does not exist.")
-	flags.StringVarP(&o.Output, "output", "o", o.Output, "Display the release info in an alternative format: json")
-	flags.StringVar(&o.ChangelogDir, "changelog", o.ChangelogDir, "Generate changelog output from the git directories extracted to this path.")
-	flags.StringVar(&o.BugsDir, "bugs", o.BugsDir, "Generate bug listings from the changelogs in the git repositories extracted to this path.")
-	flags.BoolVar(&o.IncludeImages, "include-images", o.IncludeImages, "When displaying JSON output of a release output the images the release references.")
-	return cmd
-}
-
-type InfoOptions struct {
-	genericclioptions.IOStreams
-
-	Images []string
-	From   string
-
-	Output        string
-	ImageFor      string
-	IncludeImages bool
-	ShowContents  bool
-	ShowCommit    bool
-	ShowPullSpec  bool
-	ShowSize      bool
-	Verify        bool
-
-	ChangelogDir string
-	BugsDir      string
-
-	ParallelOptions imagemanifest.ParallelOptions
-	SecurityOptions imagemanifest.SecurityOptions
-}
-
-func (o *InfoOptions) Complete(f kcmdutil.Factory, cmd *cobra.Command, args []string) error {
-	if len(args) == 0 {
-		cfg, err := f.ToRESTConfig()
-		if err != nil {
-			return fmt.Errorf("info expects one argument, or a connection to an OpenShift 4.x server: %v", err)
-		}
-		client, err := configv1client.NewForConfig(cfg)
-		if err != nil {
-			return fmt.Errorf("info expects one argument, or a connection to an OpenShift 4.x server: %v", err)
-		}
-		cv, err := client.ConfigV1().ClusterVersions().Get("version", metav1.GetOptions{})
-		if err != nil {
-			if errors.IsNotFound(err) {
-				return fmt.Errorf("you must be connected to an OpenShift 4.x server to fetch the current version")
-			}
-			return fmt.Errorf("info expects one argument, or a connection to an OpenShift 4.x server: %v", err)
-		}
-		image := cv.Status.Desired.Image
-		if len(image) == 0 && cv.Spec.DesiredUpdate != nil {
-			image = cv.Spec.DesiredUpdate.Image
-		}
-		if len(image) == 0 {
-			return fmt.Errorf("the server is not reporting a release image at this time, please specify an image to view")
-		}
-		args = []string{image}
-	}
-	if len(args) < 1 {
-		return fmt.Errorf("info expects at least one argument, a release image pull spec")
-	}
-	o.Images = args
-	if len(o.From) == 0 && len(o.Images) == 2 && !o.Verify {
-		o.From = o.Images[0]
-		o.Images = o.Images[1:]
-	}
-	return nil
-}
-
-func (o *InfoOptions) Validate() error {
-	count := 0
-	if len(o.ImageFor) > 0 {
-		count++
-	}
-	if o.ShowCommit {
-		count++
-	}
-	if o.ShowPullSpec {
-		count++
-	}
-	if o.ShowContents {
-		count++
-	}
-	if o.ShowSize {
-		count++
-	}
-	if o.Verify {
-		count++
-	}
-	if count > 1 {
-		return fmt.Errorf("only one of --commits, --pullspecs, --contents, --size, --verify may be specified")
-	}
-	if len(o.ImageFor) > 0 && len(o.Output) > 0 {
-		return fmt.Errorf("--output and --image-for may not both be specified")
-	}
-	if len(o.ChangelogDir) > 0 || len(o.BugsDir) > 0 {
-		if len(o.From) == 0 {
-			return fmt.Errorf("--changelog/--bugs require --from")
-		}
-	}
-	if len(o.ChangelogDir) > 0 && len(o.BugsDir) > 0 {
-		return fmt.Errorf("--changelog and --bugs may not both be specified")
-	}
-	switch {
-	case len(o.BugsDir) > 0:
-		switch o.Output {
-		case "", "name":
-		default:
-			return fmt.Errorf("--output only supports 'name' for --bugs")
-		}
-	case len(o.ChangelogDir) > 0:
-		if len(o.Output) > 0 {
-			return fmt.Errorf("--output is not supported for this mode")
-		}
-	default:
-		switch o.Output {
-		case "", "json", "pullspec", "digest", "name":
-		default:
-			return fmt.Errorf("--output only supports 'name', 'json', 'pullspec', or 'digest'")
-		}
-	}
-
-	if len(o.Images) == 0 {
-		return fmt.Errorf("must specify a release image as an argument")
-	}
-	if len(o.From) > 0 && len(o.Images) != 1 {
-		return fmt.Errorf("must specify a single release image as argument when comparing to another release image")
-	}
-
-	return nil
-}
-
-func (o *InfoOptions) Run() error {
-	fetchImages := o.ShowSize || o.Verify || o.IncludeImages
-
-	if len(o.From) > 0 && !o.Verify {
-		if o.ShowContents {
-			return diffContents(o.From, o.Images[0], o.Out)
-		}
-
-		var baseRelease *ReleaseInfo
-		var baseErr error
-		done := make(chan struct{})
-		go func() {
-			defer close(done)
-			baseRelease, baseErr = o.LoadReleaseInfo(o.From, fetchImages)
-		}()
-
-		release, err := o.LoadReleaseInfo(o.Images[0], fetchImages)
-		if err != nil {
-			return err
-		}
-
-		<-done
-		if baseErr != nil {
-			return baseErr
-		}
-
-		diff, err := calculateDiff(baseRelease, release)
-		if err != nil {
-			return err
-		}
-		if len(o.BugsDir) > 0 {
-			return describeBugs(o.Out, o.ErrOut, diff, o.BugsDir, o.Output)
-		}
-		if len(o.ChangelogDir) > 0 {
-			return describeChangelog(o.Out, o.ErrOut, diff, o.ChangelogDir)
-		}
-		return describeReleaseDiff(o.Out, diff, o.ShowCommit, o.Output)
-	}
-
-	var exitErr error
-	for _, image := range o.Images {
-		release, err := o.LoadReleaseInfo(image, fetchImages)
-		if err != nil {
-			exitErr = kcmdutil.ErrExit
-			fmt.Fprintf(o.ErrOut, "error: %v\n", err)
-			continue
-		}
-		if o.Verify {
-			fmt.Fprintf(o.Out, "%s %s %s\n", release.Digest, release.References.CreationTimestamp.UTC().Format(time.RFC3339), release.PreferredName())
-			continue
-		}
-		if err := o.describeImage(release); err != nil {
-			exitErr = kcmdutil.ErrExit
-			fmt.Fprintf(o.ErrOut, "error: %v\n", err)
-			continue
-		}
-	}
-	return exitErr
-}
-
-func diffContents(a, b string, out io.Writer) error {
-	fmt.Fprintf(out, `To see the differences between these releases, run:
-
-  %[1]s adm release extract %[2]s --to=/tmp/old
-  %[1]s adm release extract %[3]s --to=/tmp/new
-  diff /tmp/old /tmp/new
-
-`, os.Args[0], a, b)
-	return nil
-}
-
-func (o *InfoOptions) describeImage(release *ReleaseInfo) error {
-	if o.ShowContents {
-		_, err := io.Copy(o.Out, newContentStreamForRelease(release))
-		return err
-	}
-	switch o.Output {
-	case "json":
-		data, err := json.MarshalIndent(release, "", "  ")
-		if err != nil {
-			return err
-		}
-		fmt.Fprintln(o.Out, string(data))
-		return nil
-	case "name":
-		for _, tag := range release.References.Spec.Tags {
-			fmt.Fprintf(o.Out, "%s\n", tag.Name)
-		}
-		return nil
-	case "pullspec":
-		for _, tag := range release.References.Spec.Tags {
-			if tag.From != nil && tag.From.Kind == "DockerImage" {
-				fmt.Fprintf(o.Out, "%s\n", tag.From.Name)
-			}
-		}
-		return nil
-	case "digest":
-		for _, tag := range release.References.Spec.Tags {
-			if tag.From != nil && tag.From.Kind == "DockerImage" {
-				if ref, err := imagereference.Parse(tag.From.Name); err != nil {
-					fmt.Fprintf(o.ErrOut, "error: %s is not a valid reference: %v\n", tag.Name, err)
-				} else if len(ref.ID) == 0 {
-					fmt.Fprintf(o.ErrOut, "error: %s does not point to a digest\n", tag.Name)
-				} else {
-					fmt.Fprintf(o.Out, "%s\n", ref.ID)
-				}
-			}
-		}
-		return nil
-	case "":
-	default:
-		return fmt.Errorf("output mode only supports 'name', 'json', 'pullspec', or 'digest'")
-	}
-	if len(o.ImageFor) > 0 {
-		spec, err := findImageSpec(release.References, o.ImageFor, release.Image)
-		if err != nil {
-			return err
-		}
-		fmt.Fprintln(o.Out, spec)
-		return nil
-	}
-	return describeReleaseInfo(o.Out, release, o.ShowCommit, o.ShowPullSpec, o.ShowSize)
-}
-
-func findImageSpec(image *imageapi.ImageStream, tagName, imageName string) (string, error) {
-	for _, tag := range image.Spec.Tags {
-		if tag.Name == tagName {
-			if tag.From != nil && tag.From.Kind == "DockerImage" && len(tag.From.Name) > 0 {
-				return tag.From.Name, nil
-			}
-		}
-	}
-	return "", fmt.Errorf("no image tag %q exists in the release image %s", tagName, imageName)
-}
-
-func calculateDiff(from, to *ReleaseInfo) (*ReleaseDiff, error) {
-	diff := &ReleaseDiff{
-		From:             from,
-		To:               to,
-		ChangedImages:    make(map[string]*ImageReferenceDiff),
-		ChangedManifests: make(map[string]*ReleaseManifestDiff),
-	}
-	for i := range from.References.Spec.Tags {
-		tag := &from.References.Spec.Tags[i]
-		if tag.From == nil || tag.From.Kind != "DockerImage" {
-			continue
-		}
-		diff.ChangedImages[tag.Name] = &ImageReferenceDiff{
-			Name: tag.Name,
-			From: tag,
-		}
-	}
-	for i := range to.References.Spec.Tags {
-		tag := &to.References.Spec.Tags[i]
-		if tag.From == nil || tag.From.Kind != "DockerImage" {
-			continue
-		}
-		if exists, ok := diff.ChangedImages[tag.Name]; ok {
-			exists.To = tag
-			continue
-		}
-		diff.ChangedImages[tag.Name] = &ImageReferenceDiff{
-			Name: tag.Name,
-			To:   tag,
-		}
-	}
-	for k, v := range diff.ChangedImages {
-		if v.From != nil && v.To != nil && v.From.From.Name == v.To.From.Name {
-			delete(diff.ChangedImages, k)
-		}
-	}
-	for name, manifest := range from.ManifestFiles {
-		diff.ChangedManifests[name] = &ReleaseManifestDiff{
-			Filename: name,
-			From:     manifest,
-		}
-	}
-	for name, manifest := range to.ManifestFiles {
-		if exists, ok := diff.ChangedManifests[name]; ok {
-			exists.To = manifest
-			continue
-		}
-		diff.ChangedManifests[name] = &ReleaseManifestDiff{
-			Filename: name,
-			From:     manifest,
-		}
-	}
-	for k, v := range diff.ChangedManifests {
-		if bytes.Equal(v.From, v.To) {
-			delete(diff.ChangedManifests, k)
-		}
-	}
-
-	return diff, nil
-}
-
-type ReleaseDiff struct {
-	From *ReleaseInfo `json:"from"`
-	To   *ReleaseInfo `json:"to"`
-
-	ChangedImages    map[string]*ImageReferenceDiff  `json:"changedImages"`
-	ChangedManifests map[string]*ReleaseManifestDiff `json:"changedManifests"`
-}
-
-type ImageReferenceDiff struct {
-	Name string `json:"name"`
-
-	From *imageapi.TagReference `json:"from"`
-	To   *imageapi.TagReference `json:"to"`
-}
-
-type ReleaseManifestDiff struct {
-	Filename string `json:"filename"`
-
-	From []byte `json:"from"`
-	To   []byte `json:"to"`
-}
-
-type ReleaseInfo struct {
-	Image         string                              `json:"image"`
-	ImageRef      imagereference.DockerImageReference `json:"-"`
-	Digest        digest.Digest                       `json:"digest"`
-	ContentDigest digest.Digest                       `json:"contentDigest"`
-	// TODO: return the list digest in the future
-	// ListDigest    digest.Digest                       `json:"listDigest"`
-	Config     *dockerv1client.DockerImageConfig `json:"config"`
-	Metadata   *CincinnatiMetadata               `json:"metadata"`
-	References *imageapi.ImageStream             `json:"references"`
-
-	ComponentVersions map[string]string `json:"versions"`
-
-	Images map[string]*Image `json:"images"`
-
-	RawMetadata   map[string][]byte `json:"-"`
-	ManifestFiles map[string][]byte `json:"-"`
-	UnknownFiles  []string          `json:"-"`
-
-	Warnings []string `json:"warnings"`
-}
-
-type Image struct {
-	Name          string                              `json:"name"`
-	Ref           imagereference.DockerImageReference `json:"-"`
-	Digest        digest.Digest                       `json:"digest"`
-	ContentDigest digest.Digest                       `json:"contentDigest"`
-	ListDigest    digest.Digest                       `json:"listDigest"`
-	MediaType     string                              `json:"mediaType"`
-	Layers        []distribution.Descriptor           `json:"layers"`
-	Config        *dockerv1client.DockerImageConfig   `json:"config"`
-
-	Manifest distribution.Manifest `json:"-"`
-}
-
-func (i *ReleaseInfo) PreferredName() string {
-	if i.Metadata != nil {
-		return i.Metadata.Version
-	}
-	return i.References.Name
-}
-
-func (i *ReleaseInfo) Platform() string {
-	os := i.Config.OS
-	if len(os) > 0 {
-		os = "unknown"
-	}
-	arch := i.Config.Architecture
-	if len(arch) == 0 {
-		arch = "unknown"
-	}
-	return fmt.Sprintf("%s/%s", os, arch)
-}
-
-func (o *InfoOptions) LoadReleaseInfo(image string, retrieveImages bool) (*ReleaseInfo, error) {
-	ref, err := imagereference.Parse(image)
-	if err != nil {
-		return nil, err
-	}
-
-	verifier := imagemanifest.NewVerifier()
-	opts := extract.NewOptions(genericclioptions.IOStreams{Out: o.Out, ErrOut: o.ErrOut})
-	opts.SecurityOptions = o.SecurityOptions
-
-	release := &ReleaseInfo{
-		Image:    image,
-		ImageRef: ref,
-
-		RawMetadata: make(map[string][]byte),
-	}
-
-	opts.ImageMetadataCallback = func(m *extract.Mapping, dgst, contentDigest digest.Digest, config *dockerv1client.DockerImageConfig) {
-		verifier.Verify(dgst, contentDigest)
-		release.Digest = dgst
-		release.ContentDigest = contentDigest
-		release.Config = config
-	}
-	opts.OnlyFiles = true
-	opts.Mappings = []extract.Mapping{
-		{
-			ImageRef: ref,
-
-			From:        "release-manifests/",
-			To:          ".",
-			LayerFilter: extract.NewPositionLayerFilter(-1),
-		},
-	}
-	var errs []error
-	opts.TarEntryCallback = func(hdr *tar.Header, _ extract.LayerInfo, r io.Reader) (bool, error) {
-		switch hdr.Name {
-		case "image-references":
-			data, err := ioutil.ReadAll(r)
-			if err != nil {
-				errs = append(errs, fmt.Errorf("unable to read release image-references: %v", err))
-				return true, nil
-			}
-			release.RawMetadata[hdr.Name] = data
-			is, err := readReleaseImageReferences(data)
-			if err != nil {
-				errs = append(errs, err)
-				return true, nil
-			}
-			release.References = is
-		case "release-metadata":
-			data, err := ioutil.ReadAll(r)
-			if err != nil {
-				errs = append(errs, fmt.Errorf("unable to read release metadata: %v", err))
-				return true, nil
-			}
-			release.RawMetadata[hdr.Name] = data
-			m := &CincinnatiMetadata{}
-			if err := json.Unmarshal(data, m); err != nil {
-				errs = append(errs, fmt.Errorf("invalid release metadata: %v", err))
-				return true, nil
-			}
-			release.Metadata = m
-		default:
-			if ext := path.Ext(hdr.Name); len(ext) > 0 && (ext == ".yaml" || ext == ".yml" || ext == ".json") {
-				klog.V(4).Infof("Found manifest %s", hdr.Name)
-				data, err := ioutil.ReadAll(r)
-				if err != nil {
-					errs = append(errs, fmt.Errorf("unable to read release manifest %q: %v", hdr.Name, err))
-					return true, nil
-				}
-				if release.ManifestFiles == nil {
-					release.ManifestFiles = make(map[string][]byte)
-				}
-				release.ManifestFiles[hdr.Name] = data
-			} else {
-				release.UnknownFiles = append(release.UnknownFiles, hdr.Name)
-			}
-		}
-		return true, nil
-	}
-	if err := opts.Run(); err != nil {
-		return nil, err
-	}
-	if len(errs) > 0 {
-		return nil, fmt.Errorf("release image could not be read: %s", errorList(errs))
-	}
-
-	if release.References == nil {
-		return nil, fmt.Errorf("release image did not contain an image-references file")
-	}
-
-	release.ComponentVersions, errs = readComponentVersions(release.References)
-	for _, err := range errs {
-		release.Warnings = append(release.Warnings, err.Error())
-	}
-
-	if retrieveImages {
-		var lock sync.Mutex
-		release.Images = make(map[string]*Image)
-		r := &imageinfo.ImageRetriever{
-			Image:           make(map[string]imagereference.DockerImageReference),
-			SecurityOptions: o.SecurityOptions,
-			ParallelOptions: o.ParallelOptions,
-			ImageMetadataCallback: func(name string, image *imageinfo.Image, err error) error {
-				if image != nil {
-					verifier.Verify(image.Digest, image.ContentDigest)
-				}
-				lock.Lock()
-				defer lock.Unlock()
-				if err != nil {
-					release.Warnings = append(release.Warnings, fmt.Sprintf("tag %q: %v", name, err))
-					return nil
-				}
-				copied := Image(*image)
-				release.Images[name] = &copied
-				return nil
-			},
-		}
-		for _, tag := range release.References.Spec.Tags {
-			if tag.From == nil || tag.From.Kind != "DockerImage" {
-				continue
-			}
-			ref, err := imagereference.Parse(tag.From.Name)
-			if err != nil {
-				release.Warnings = append(release.Warnings, fmt.Sprintf("tag %q has an invalid reference: %v", tag.Name, err))
-				continue
-			}
-			r.Image[tag.Name] = ref
-		}
-		if err := r.Run(); err != nil {
-			return nil, err
-		}
-	}
-
-	if !verifier.Verified() {
-		err := fmt.Errorf("the release image failed content verification and may have been tampered with")
-		if !o.SecurityOptions.SkipVerification {
-			return nil, err
-		}
-		fmt.Fprintf(o.ErrOut, "warning: %v\n", err)
-	}
-
-	sort.Strings(release.Warnings)
-
-	return release, nil
-}
-
-func readComponentVersions(is *imageapi.ImageStream) (map[string]string, []error) {
-	var errs []error
-	combined := make(map[string]sets.String)
-	for _, tag := range is.Spec.Tags {
-		versions, ok := tag.Annotations[annotationBuildVersions]
-		if !ok {
-			continue
-		}
-		all, err := parseComponentVersionsLabel(versions)
-		if err != nil {
-			errs = append(errs, fmt.Errorf("the referenced image %s had an invalid version annotation: %v", tag.Name, err))
-		}
-		for k, v := range all {
-			existing, ok := combined[k]
-			if !ok {
-				existing = sets.NewString()
-				combined[k] = existing
-			}
-			existing.Insert(v)
-		}
-	}
-	out := make(map[string]string)
-	var multiples []string
-	for k, v := range combined {
-		if v.Len() > 1 {
-			multiples = append(multiples, k)
-		}
-		out[k], _ = v.PopAny()
-	}
-	if len(multiples) > 0 {
-		sort.Strings(multiples)
-		errs = append(errs, fmt.Errorf("multiple versions reported for the following component(s): %v", strings.Join(multiples, ",  ")))
-	}
-	return out, errs
-}
-
-func errorList(errs []error) string {
-	if len(errs) == 1 {
-		return errs[0].Error()
-	}
-	buf := &bytes.Buffer{}
-	fmt.Fprintf(buf, "\n\n")
-	for _, err := range errs {
-		fmt.Fprintf(buf, "* %v\n", err)
-	}
-	return buf.String()
-}
-
-func stringArrContains(arr []string, s string) bool {
-	for _, item := range arr {
-		if item == s {
-			return true
-		}
-	}
-	return false
-}
-
-func describeReleaseDiff(out io.Writer, diff *ReleaseDiff, showCommit bool, outputMode string) error {
-	switch outputMode {
-	case "json":
-		data, err := json.MarshalIndent(diff, "", "  ")
-		if err != nil {
-			return err
-		}
-		fmt.Fprintln(out, string(data))
-		return nil
-	case "":
-		// print human readable output
-	default:
-		return fmt.Errorf("unrecognized output mode: %s", outputMode)
-	}
-	if diff.To.Digest == diff.From.Digest {
-		fmt.Fprintf(out, "Releases are identical\n")
-		return nil
-	}
-	w := tabwriter.NewWriter(out, 0, 4, 2, ' ', 0)
-	defer w.Flush()
-	now := time.Now()
-	fmt.Fprintf(w, "\tFROM\tTO\n")
-	fmt.Fprintf(w, "Name:\t%s\t%s\n", diff.From.PreferredName(), diff.To.PreferredName())
-	fmt.Fprintf(w, "Created:\t%s\t%s\n", duration.ShortHumanDuration(now.Sub(diff.From.Config.Created)), duration.ShortHumanDuration(now.Sub(diff.To.Config.Created)))
-	if from, to := diff.From.Platform(), diff.To.Platform(); from != to {
-		fmt.Fprintf(w, "OS/Arch:\t%s\t%s\n", from, to)
-	}
-
-	switch {
-	case diff.From.Metadata != nil && diff.To.Metadata != nil:
-		fmt.Fprintln(w)
-		fmt.Fprintf(w, "Version:\t%s\t%s\n", diff.From.Metadata.Version, diff.To.Metadata.Version)
-		canUpgrade := "No"
-		if stringArrContains(diff.To.Metadata.Previous, diff.From.Metadata.Version) {
-			canUpgrade = "Yes"
-		}
-		fmt.Fprintf(w, "Upgrade From:\t\t%s\n", canUpgrade)
-	case diff.From.Metadata != nil && diff.To.Metadata == nil:
-		fmt.Fprintf(w, "Has Release Metadata:\tYes\t\n")
-	case diff.From.Metadata == nil && diff.To.Metadata != nil:
-		fmt.Fprintf(w, "Has Release Metadata:\t\tYes\n")
-	}
-
-	if len(diff.ChangedImages) > 0 {
-		var keys []string
-		maxLen := 0
-		for k := range diff.ChangedImages {
-			if len(k) > maxLen {
-				maxLen = len(k)
-			}
-			keys = append(keys, k)
-		}
-		justify := func(s string) string {
-			return s + strings.Repeat(" ", maxLen-len(s))
-		}
-		sort.Strings(keys)
-		var rebuilt []string
-		writeTabSection(w, func(w io.Writer) {
-			count := 0
-			for _, k := range keys {
-				if image := diff.ChangedImages[k]; image.To != nil && image.From != nil {
-					if !codeChanged(image.From, image.To) {
-						rebuilt = append(rebuilt, k)
-						continue
-					}
-					if count == 0 {
-						fmt.Fprintln(w)
-						fmt.Fprintf(w, "Images Changed:\n")
-					}
-					count++
-					old, new := digestOrRef(image.From.From.Name), digestOrRef(image.To.From.Name)
-					if old != new {
-						if showCommit {
-							fmt.Fprintf(w, "  %s\t%s\n", justify(image.Name), gitDiffOrCommit(image.From, image.To))
-						} else {
-							fmt.Fprintf(w, "  %s\t%s\t%s\n", justify(image.Name), old, new)
-						}
-					}
-				}
-			}
-		})
-
-		if len(rebuilt) > 0 {
-			writeTabSection(w, func(w io.Writer) {
-				count := 0
-				for _, k := range rebuilt {
-					if image := diff.ChangedImages[k]; image.To != nil && image.From != nil {
-						if count == 0 {
-							fmt.Fprintln(w)
-							fmt.Fprintf(w, "Images Rebuilt:\n")
-						}
-						count++
-						old, new := digestOrRef(image.From.From.Name), digestOrRef(image.To.From.Name)
-						if old != new {
-							if showCommit {
-								fmt.Fprintf(w, "  %s\t%s\n", justify(image.Name), gitDiffOrCommit(image.From, image.To))
-							} else {
-								fmt.Fprintf(w, "  %s\t%s\t%s\n", justify(image.Name), old, new)
-							}
-						}
-					}
-				}
-			})
-		}
-
-		writeTabSection(w, func(w io.Writer) {
-			count := 0
-			for _, k := range keys {
-				if image := diff.ChangedImages[k]; image.From == nil {
-					if count == 0 {
-						fmt.Fprintln(w)
-						fmt.Fprintf(w, "Images Added:\n")
-					}
-					count++
-					if showCommit {
-						fmt.Fprintf(w, "  %s\t%s\n", justify(image.Name), repoAndCommit(image.To))
-					} else {
-						fmt.Fprintf(w, "  %s\t%s\n", justify(image.Name), digestOrRef(image.To.From.Name))
-					}
-				}
-			}
-		})
-
-		writeTabSection(w, func(w io.Writer) {
-			count := 0
-			for _, k := range keys {
-				if image := diff.ChangedImages[k]; image.To == nil {
-					if count == 0 {
-						fmt.Fprintln(w)
-						fmt.Fprintf(w, "Images Removed:\n")
-					}
-					count++
-					fmt.Fprintf(w, "  %s\n", justify(image.Name))
-				}
-			}
-		})
-	}
-	fmt.Fprintln(w)
-	return nil
-}
-
-func repoAndCommit(ref *imageapi.TagReference) string {
-	repo := ref.Annotations[annotationBuildSourceLocation]
-	commit := ref.Annotations[annotationBuildSourceCommit]
-	if len(repo) == 0 || len(commit) == 0 {
-		return ""
-	}
-	return urlForRepoAndCommit(repo, commit)
-}
-
-func gitDiffOrCommit(from, to *imageapi.TagReference) string {
-	oldRepo, newRepo := from.Annotations[annotationBuildSourceLocation], to.Annotations[annotationBuildSourceLocation]
-	oldCommit, newCommit := from.Annotations[annotationBuildSourceCommit], to.Annotations[annotationBuildSourceCommit]
-	if len(newRepo) == 0 || len(newCommit) == 0 {
-		return ""
-	}
-	if oldRepo == newRepo {
-		if oldCommit == newCommit {
-			return urlForRepoAndCommit(newRepo, newCommit)
-		}
-		return urlForRepoAndCommitRange(newRepo, oldCommit, newCommit)
-	}
-	if len(oldCommit) == 0 {
-		return fmt.Sprintf("%s  -> %s", oldRepo, urlForRepoAndCommit(newRepo, newCommit))
-	}
-	if oldCommit == newCommit {
-		return fmt.Sprintf("%s -> %s", oldRepo, urlForRepoAndCommit(newRepo, newCommit))
-	}
-	return fmt.Sprintf("%s -> %s", urlForRepoAndCommit(oldRepo, oldCommit), urlForRepoAndCommit(newRepo, newCommit))
-}
-
-func urlForRepoAndCommit(repo, commit string) string {
-	if strings.HasPrefix(repo, urlGithubPrefix) {
-		if u, err := url.Parse(repo); err == nil {
-			u.Path = path.Join(u.Path, "commit", fmt.Sprintf("%s", commit))
-			return u.String()
-		}
-	}
-	return fmt.Sprintf("%s %s", repo, commit)
-}
-
-func urlForRepoAndCommitRange(repo, from, to string) string {
-	if strings.HasPrefix(repo, urlGithubPrefix) {
-		if u, err := url.Parse(repo); err == nil {
-			u.Path = path.Join(u.Path, "compare", fmt.Sprintf("%s...%s", from, to))
-			return u.String()
-		}
-	}
-	return fmt.Sprintf("%s %s %s", repo, from, to)
-}
-
-func codeChanged(from, to *imageapi.TagReference) bool {
-	oldCommit, newCommit := from.Annotations[annotationBuildSourceCommit], to.Annotations[annotationBuildSourceCommit]
-	return len(oldCommit) > 0 && len(newCommit) > 0 && oldCommit != newCommit
-}
-
-func describeReleaseInfo(out io.Writer, release *ReleaseInfo, showCommit, pullSpec, showSize bool) error {
-	w := tabwriter.NewWriter(out, 0, 4, 1, ' ', 0)
-	defer w.Flush()
-	now := time.Now()
-	fmt.Fprintf(w, "Name:\t%s\n", release.PreferredName())
-	fmt.Fprintf(w, "Digest:\t%s\n", release.Digest)
-	fmt.Fprintf(w, "Created:\t%s\n", release.Config.Created.UTC().Truncate(time.Second).Format(time.RFC3339))
-	fmt.Fprintf(w, "OS/Arch:\t%s/%s\n", release.Config.OS, release.Config.Architecture)
-	fmt.Fprintf(w, "Manifests:\t%d\n", len(release.ManifestFiles))
-	if len(release.UnknownFiles) > 0 {
-		fmt.Fprintf(w, "Unknown files:\t%d\n", len(release.UnknownFiles))
-	}
-
-	fmt.Fprintln(w)
-	refExact := release.ImageRef
-	refExact.Tag = ""
-	refExact.ID = release.Digest.String()
-	fmt.Fprintf(w, "Pull From:\t%s\n", refExact.String())
-
-	if m := release.Metadata; m != nil {
-		fmt.Fprintln(w)
-		fmt.Fprintf(w, "Release Metadata:\n")
-		fmt.Fprintf(w, "  Version:\t%s\n", m.Version)
-		if len(m.Previous) > 0 {
-			fmt.Fprintf(w, "  Upgrades:\t%s\n", strings.Join(sortSemanticVersions(m.Previous), ", "))
-		} else {
-			fmt.Fprintf(w, "  Upgrades:\t\n")
-		}
-		var keys []string
-		for k := range m.Metadata {
-			keys = append(keys, k)
-		}
-		sort.Strings(keys)
-		writeTabSection(w, func(w io.Writer) {
-			for _, k := range keys {
-				fmt.Fprintf(w, "  Metadata:\n")
-				fmt.Fprintf(w, "    %s:\t%s\n", k, m.Metadata[k])
-			}
-		})
-	}
-	if len(release.ComponentVersions) > 0 {
-		fmt.Fprintln(w)
-		fmt.Fprintf(w, "Component Versions:\n")
-		keys := orderedKeys(release.ComponentVersions)
-		for _, key := range keys {
-			fmt.Fprintf(w, "  %s\t%s\n", componentName(key), release.ComponentVersions[key])
-		}
-	}
-	writeTabSection(w, func(w io.Writer) {
-		fmt.Fprintln(w)
-		fmt.Fprintf(w, "Images:\n")
-		switch {
-		case showSize:
-			layerCount := make(map[string]int)
-			baseLayer := make(map[string]int)
-			totalSize := int64(0)
-			for _, image := range release.Images {
-				for i, layer := range image.Layers {
-					digest := layer.Digest.String()
-					if i == 0 {
-						baseLayer[digest] = 0
-					}
-					count := layerCount[digest]
-					if count == 0 {
-						totalSize += layer.Size
-					}
-					layerCount[digest] = count + 1
-				}
-			}
-
-			var baseHeader string
-			if len(baseLayer) > 1 {
-				baseHeader = "BASE"
-			}
-			fmt.Fprintf(w, "  NAME\t AGE\t LAYERS\t SIZE MB\t UNIQUE MB\t %s\n", baseHeader)
-			coveredLayer := make(map[string]struct{})
-			currentBase := 1
-			for _, tag := range release.References.Spec.Tags {
-				if tag.From == nil || tag.From.Kind != "DockerImage" {
-					continue
-				}
-
-				image, ok := release.Images[tag.Name]
-				if !ok {
-					fmt.Fprintf(w, "  %s\t\t\t\t\t\n", tag.Name)
-					continue
-				}
-
-				// create a column for a small number of unique base layers that visually indicates
-				// which base layer belongs to which image
-				var base string
-				if len(baseLayer) > 1 {
-					if baseIndex, ok := baseLayer[image.Layers[0].Digest.String()]; ok {
-						if baseIndex == 0 {
-							baseLayer[image.Layers[0].Digest.String()] = currentBase
-							baseIndex = currentBase
-							currentBase++
-						}
-						if len(baseLayer) <= 5 {
-							base = strings.Repeat(" ", baseIndex-1) + string(rune('A'+baseIndex-1))
-						} else {
-							base = strconv.Itoa(baseIndex)
-						}
-					}
-				}
-
-				// count the size of the image and the unique size of the image, to give a better
-				// idea of which images impact the payload the most
-				unshared := int64(0)
-				size := int64(0)
-				for _, layer := range image.Layers {
-					size += layer.Size
-					if layerCount[layer.Digest.String()] > 1 {
-						continue
-					}
-					unshared += layer.Size
-				}
-				// if this image has no unique layers, find the top-most layer and if this is the
-				// first time it has been shown print the top layer size (as a reasonable proxy
-				// for how much this image in particular contributes)
-				if unshared == 0 {
-					top := image.Layers[len(image.Layers)-1]
-					if _, ok := coveredLayer[top.Digest.String()]; !ok {
-						unshared = top.Size
-						coveredLayer[top.Digest.String()] = struct{}{}
-					}
-				}
-				age := ""
-				if image.Config != nil && !image.Config.Created.IsZero() {
-					age = duration.ShortHumanDuration(now.Sub(image.Config.Created))
-				}
-				fmt.Fprintf(w, "  %s\t%4s\t%7d\t%8.1f\t%10.1f\t %s\n", tag.Name, age, len(image.Layers), float64(size)/1024/1024, float64(unshared)/1024/1024, base)
-			}
-			fmt.Fprintln(w)
-			if len(baseLayer) > 1 {
-				fmt.Fprintf(w, "  %s across %d layers, %d different base images\n", units.HumanSize(float64(totalSize)), len(layerCount), len(baseLayer))
-			} else {
-				fmt.Fprintf(w, "  %s across %d layers\n", units.HumanSize(float64(totalSize)), len(layerCount))
-			}
-
-		case showCommit:
-			fmt.Fprintf(w, "  NAME\tREPO\tCOMMIT\t\n")
-			for _, tag := range release.References.Spec.Tags {
-				if tag.From == nil || tag.From.Kind != "DockerImage" {
-					continue
-				}
-				fmt.Fprintf(w, "  %s\t%s\t%s\n", tag.Name, tag.Annotations[annotationBuildSourceLocation], tag.Annotations[annotationBuildSourceCommit])
-			}
-
-		case pullSpec:
-			fmt.Fprintf(w, "  NAME\tPULL SPEC\n")
-			for _, tag := range release.References.Spec.Tags {
-				if tag.From == nil || tag.From.Kind != "DockerImage" {
-					continue
-				}
-				fmt.Fprintf(w, "  %s\t%s\n", tag.Name, tag.From.Name)
-			}
-
-		default:
-			fmt.Fprintf(w, "  NAME\tDIGEST\n")
-			for _, tag := range release.References.Spec.Tags {
-				if tag.From == nil || tag.From.Kind != "DockerImage" {
-					continue
-				}
-				var id string
-				if ref, err := imagereference.Parse(tag.From.Name); err == nil {
-					id = ref.ID
-				}
-				if len(id) == 0 {
-					id = tag.From.Name
-				}
-				fmt.Fprintf(w, "  %s\t%s\n", tag.Name, id)
-			}
-		}
-	})
-	if len(release.Warnings) > 0 {
-		writeTabSection(w, func(w io.Writer) {
-			fmt.Fprintln(w)
-			fmt.Fprintf(w, "Warnings:\n")
-			for _, warning := range release.Warnings {
-				fmt.Fprintf(w, "* %s\n", warning)
-			}
-		})
-	}
-	fmt.Fprintln(w)
-	return nil
-}
-
-func writeTabSection(out io.Writer, fn func(w io.Writer)) {
-	w := tabwriter.NewWriter(out, 0, 4, 1, ' ', 0)
-	fn(w)
-	w.Flush()
-}
-
-func sortSemanticVersions(versionStrings []string) []string {
-	var versions []semver.Version
-	for _, version := range versionStrings {
-		v, err := semver.Parse(version)
-		if err != nil {
-			return versionStrings
-		}
-		versions = append(versions, v)
-	}
-	semver.Sort(versions)
-	versionStrings = make([]string, 0, len(versions))
-	for _, v := range versions {
-		versionStrings = append(versionStrings, v.String())
-	}
-	return versionStrings
-}
-
-func digestOrRef(ref string) string {
-	if ref, err := imagereference.Parse(ref); err == nil && len(ref.ID) > 0 {
-		return ref.ID
-	}
-	return ref
-}
-
-func describeChangelog(out, errOut io.Writer, diff *ReleaseDiff, dir string) error {
-	if diff.To.Digest == diff.From.Digest {
-		return fmt.Errorf("releases are identical")
-	}
-
-	fmt.Fprintf(out, heredoc.Docf(`
-		# %s
-
-		Created: %s
-
-		Image Digest: %s
-
-	`, diff.To.PreferredName(), diff.To.References.CreationTimestamp.UTC(), "`"+diff.To.Digest+"`"))
-
-	if release, ok := diff.To.References.Annotations[annotationReleaseFromRelease]; ok {
-		fmt.Fprintf(out, "Promoted from %s\n\n", release)
-	}
-	fmt.Fprintln(out)
-	fmt.Fprintf(out, "## Changes from %s\n\n", diff.From.PreferredName())
-
-	if keys := orderedKeys(diff.To.ComponentVersions); len(keys) > 0 {
-		fmt.Fprintf(out, "### Components\n\n")
-		for _, key := range keys {
-			version := diff.To.ComponentVersions[key]
-			old, ok := diff.From.ComponentVersions[key]
-			if !ok || old == version {
-				fmt.Fprintf(out, "* %s %s\n", componentName(key), version)
-				continue
-			}
-			fmt.Fprintf(out, "* %s upgraded from %s to %s\n", componentName(key), old, version)
-		}
-		fmt.Fprintln(out)
-		fmt.Fprintln(out)
-	}
-
-	var hasError bool
-
-	var added, removed []string
-	for k, imageDiff := range diff.ChangedImages {
-		switch {
-		case imageDiff.From == nil:
-			added = append(added, k)
-		case imageDiff.To == nil:
-			removed = append(removed, k)
-		}
-	}
-	codeChanges, imageChanges, incorrectImageChanges := releaseDiffContentChanges(diff)
-
-	sort.Strings(added)
-	sort.Strings(removed)
-
-	if len(added) > 0 {
-		fmt.Fprintf(out, "### New images\n\n")
-		for _, k := range added {
-			fmt.Fprintf(out, "* %s\n", refToShortDescription(diff.ChangedImages[k].To))
-		}
-		fmt.Fprintln(out)
-		fmt.Fprintln(out)
-	}
-
-	if len(removed) > 0 {
-		fmt.Fprintf(out, "### Removed images\n\n")
-		for _, k := range removed {
-			fmt.Fprintf(out, "* %s\n", k)
-		}
-		fmt.Fprintln(out)
-		fmt.Fprintln(out)
-	}
-
-	if len(imageChanges) > 0 || len(incorrectImageChanges) > 0 {
-		fmt.Fprintf(out, "### Rebuilt images without code change\n\n")
-		for _, change := range imageChanges {
-			fmt.Fprintf(out, "* %s\n", refToShortDescription(diff.ChangedImages[change.Name].To))
-		}
-		for _, k := range incorrectImageChanges {
-			fmt.Fprintf(out, "* %s\n", k)
-		}
-		fmt.Fprintln(out)
-		fmt.Fprintln(out)
-	}
-
-	for _, change := range codeChanges {
-		u, commits, err := commitsForRepo(dir, change, out, errOut)
-		if err != nil {
-			fmt.Fprintf(errOut, "error: %v\n", err)
-			hasError = true
-			continue
-		}
-		if len(commits) > 0 {
-			if u.Host == "github.com" {
-				fmt.Fprintf(out, "### [%s](https://github.com%s/tree/%s)\n\n", strings.Join(change.ImagesAffected, ", "), u.Path, change.To)
-			} else {
-				fmt.Fprintf(out, "### %s\n\n", strings.Join(change.ImagesAffected, ", "))
-			}
-			for _, commit := range commits {
-				var suffix string
-				switch {
-				case commit.PullRequest > 0:
-					suffix = fmt.Sprintf("[#%d](%s)", commit.PullRequest, fmt.Sprintf("https://%s%s/pull/%d", u.Host, u.Path, commit.PullRequest))
-				case u.Host == "github.com":
-					commit := commit.Commit[:8]
-					suffix = fmt.Sprintf("[%s](%s)", commit, fmt.Sprintf("https://%s%s/commit/%s", u.Host, u.Path, commit))
-				default:
-					suffix = commit.Commit[:8]
-				}
-				switch {
-				case commit.Bug > 0:
-					fmt.Fprintf(out,
-						"* [Bug %d](%s): %s %s\n",
-						commit.Bug,
-						fmt.Sprintf("https://bugzilla.redhat.com/show_bug.cgi?id=%d", commit.Bug),
-						commit.Subject,
-						suffix,
-					)
-				default:
-					fmt.Fprintf(out,
-						"* %s %s\n",
-						commit.Subject,
-						suffix,
-					)
-				}
-			}
-			if u.Host == "github.com" {
-				fmt.Fprintf(out, "* [Full changelog](%s)\n\n", fmt.Sprintf("https://%s%s/compare/%s...%s", u.Host, u.Path, change.From, change.To))
-			} else {
-				fmt.Fprintf(out, "* %s from %s to %s\n\n", change.Repo, change.FromShort(), change.ToShort())
-			}
-			fmt.Fprintln(out)
-		}
-	}
-	if hasError {
-		return kcmdutil.ErrExit
-	}
-	return nil
-}
-
-func describeBugs(out, errOut io.Writer, diff *ReleaseDiff, dir string, format string) error {
-	if diff.To.Digest == diff.From.Digest {
-		return fmt.Errorf("releases are identical")
-	}
-
-	var hasError bool
-	codeChanges, _, _ := releaseDiffContentChanges(diff)
-
-	bugIDs := sets.NewInt()
-	for _, change := range codeChanges {
-		_, commits, err := commitsForRepo(dir, change, out, errOut)
-		if err != nil {
-			fmt.Fprintf(errOut, "error: %v\n", err)
-			hasError = true
-			continue
-		}
-		for _, commit := range commits {
-			if commit.Bug == 0 {
-				continue
-			}
-			bugIDs.Insert(commit.Bug)
-		}
-	}
-
-	bugs := make(map[int]BugInfo)
-
-	u, err := url.Parse("https://bugzilla.redhat.com/rest/bug")
-	if err != nil {
-		return err
-	}
-	client := http.DefaultClient
-	allBugIDs := bugIDs.List()
-	for len(allBugIDs) > 0 {
-		var next []int
-		if len(allBugIDs) > 10 {
-			next = allBugIDs[:10]
-			allBugIDs = allBugIDs[10:]
-		} else {
-			next = allBugIDs
-			allBugIDs = nil
-		}
-
-		bugList, err := retrieveBugs(client, u, next, 2)
-		if err != nil {
-
-		}
-		for _, bug := range bugList.Bugs {
-			bugs[bug.ID] = bug
-		}
-	}
-
-	var valid []int
-	for _, id := range bugIDs.List() {
-		if _, ok := bugs[id]; !ok {
-			fmt.Fprintf(errOut, "error: Bug %d was not retrieved\n", id)
-			hasError = true
-			continue
-		}
-		valid = append(valid, id)
-	}
-
-	if len(valid) > 0 {
-		switch format {
-		case "name":
-			for _, id := range valid {
-				fmt.Fprintln(out, id)
-			}
-		default:
-			tw := tabwriter.NewWriter(out, 0, 0, 1, ' ', 0)
-			fmt.Fprintln(tw, "ID\tSTATUS\tPRIORITY\tSUMMARY")
-			for _, id := range valid {
-				bug := bugs[id]
-				fmt.Fprintf(tw, "%d\t%s\t%s\t%s\n", id, bug.Status, bug.Priority, bug.Summary)
-			}
-			tw.Flush()
-		}
-	}
-
-	if hasError {
-		return kcmdutil.ErrExit
-	}
-	return nil
-}
-
-func retrieveBugs(client *http.Client, server *url.URL, bugs []int, retries int) (*BugList, error) {
-	q := url.Values{}
-	for _, id := range bugs {
-		q.Add("id", strconv.Itoa(id))
-	}
-	u := *server
-	u.RawQuery = q.Encode()
-	var lastErr error
-	for i := 0; i < retries; i++ {
-		resp, err := client.Get(u.String())
-		if err != nil {
-			lastErr = err
-			continue
-		}
-		defer resp.Body.Close()
-		if resp.StatusCode != 200 {
-			lastErr = fmt.Errorf("server responded with %d", resp.StatusCode)
-			continue
-		}
-		data, err := ioutil.ReadAll(resp.Body)
-		if err != nil {
-			lastErr = fmt.Errorf("unable to get body contents: %v", err)
-			continue
-		}
-		resp.Body.Close()
-		var bugList BugList
-		if err := json.Unmarshal(data, &bugList); err != nil {
-			lastErr = fmt.Errorf("unable to parse bug list: %v", err)
-			continue
-		}
-		return &bugList, nil
-	}
-	return nil, lastErr
-}
-
-type BugList struct {
-	Bugs []BugInfo `json:"bugs"`
-}
-
-type BugInfo struct {
-	ID       int    `json:"id"`
-	Status   string `json:"status"`
-	Priority string `json:"priority"`
-	Summary  string `json:"summary"`
-}
-
-type ImageChange struct {
-	Name     string
-	From, To imagereference.DockerImageReference
-}
-
-type CodeChange struct {
-	Repo     string
-	From, To string
-
-	AlternateRepos []string
-
-	ImagesAffected []string
-}
-
-func (c CodeChange) FromShort() string {
-	if len(c.From) > 8 {
-		return c.From[:8]
-	}
-	return c.From
-}
-
-func (c CodeChange) ToShort() string {
-	if len(c.To) > 8 {
-		return c.To[:8]
-	}
-	return c.To
-}
-
-func commitsForRepo(dir string, change CodeChange, out, errOut io.Writer) (*url.URL, []MergeCommit, error) {
-	u, err := sourceLocationAsURL(change.Repo)
-	if err != nil {
-		return nil, nil, fmt.Errorf("The source repository cannot be parsed %s: %v", change.Repo, err)
-	}
-	g, err := ensureCloneForRepo(dir, change.Repo, change.AlternateRepos, errOut, errOut)
-	if err != nil {
-		return nil, nil, err
-	}
-	commits, err := mergeLogForRepo(g, change.Repo, change.From, change.To)
-	if err != nil {
-		return nil, nil, fmt.Errorf("Could not load commits for %s: %v", change.Repo, err)
-	}
-	return u, commits, nil
-}
-
-func releaseDiffContentChanges(diff *ReleaseDiff) ([]CodeChange, []ImageChange, []string) {
-	var imageChanges []ImageChange
-	var unexpectedChanges []string
-	var keys []string
-	repoToCommitsToImages := make(map[string]map[string][]string)
-	for k, imageDiff := range diff.ChangedImages {
-		from, to := imageDiff.From, imageDiff.To
-		switch {
-		case from == nil, to == nil:
-		default:
-			newRepo := to.Annotations[annotationBuildSourceLocation]
-			oldCommit, newCommit := from.Annotations[annotationBuildSourceCommit], to.Annotations[annotationBuildSourceCommit]
-			if len(oldCommit) == 0 || oldCommit == newCommit {
-				if from.From != nil && to.From != nil {
-					if fromRef, err := imagereference.Parse(from.From.Name); err == nil {
-						if toRef, err := imagereference.Parse(to.From.Name); err == nil {
-							if len(fromRef.ID) > 0 && fromRef.ID == toRef.ID {
-								// no change or only location changed
-								break
-							}
-							imageChanges = append(imageChanges, ImageChange{
-								Name: imageDiff.Name,
-								From: fromRef,
-								To:   toRef,
-							})
-							break
-						}
-					}
-				}
-				// before or after tag did not have a valid image reference
-				unexpectedChanges = append(unexpectedChanges, k)
-				break
-			}
-			commitRange, ok := repoToCommitsToImages[newRepo]
-			if !ok {
-				commitRange = make(map[string][]string)
-				repoToCommitsToImages[newRepo] = commitRange
-			}
-			rangeID := fmt.Sprintf("%s..%s", oldCommit, newCommit)
-			commitRange[rangeID] = append(commitRange[rangeID], k)
-			keys = append(keys, k)
-		}
-	}
-	sort.Slice(imageChanges, func(i, j int) bool {
-		return imageChanges[i].Name < imageChanges[j].Name
-	})
-	sort.Strings(unexpectedChanges)
-	sort.Strings(keys)
-	var codeChanges []CodeChange
-	for _, key := range keys {
-		imageDiff := diff.ChangedImages[key]
-		from, to := imageDiff.From, imageDiff.To
-		oldRepo, newRepo := from.Annotations[annotationBuildSourceLocation], to.Annotations[annotationBuildSourceLocation]
-		oldCommit, newCommit := from.Annotations[annotationBuildSourceCommit], to.Annotations[annotationBuildSourceCommit]
-
-		var alternateRepos []string
-		if len(oldRepo) > 0 && oldRepo != newRepo {
-			alternateRepos = append(alternateRepos, oldRepo)
-		}
-
-		// only display a given chunk of changes once
-		commitRange := fmt.Sprintf("%s..%s", oldCommit, newCommit)
-		allKeys := repoToCommitsToImages[newRepo][commitRange]
-		if len(allKeys) == 0 {
-			continue
-		}
-		repoToCommitsToImages[newRepo][commitRange] = nil
-		sort.Strings(allKeys)
-
-		codeChanges = append(codeChanges, CodeChange{
-			Repo:           newRepo,
-			From:           oldCommit,
-			To:             newCommit,
-			AlternateRepos: alternateRepos,
-			ImagesAffected: allKeys,
-		})
-	}
-	return codeChanges, imageChanges, unexpectedChanges
-}
-
-func refToShortDescription(ref *imageapi.TagReference) string {
-	if from := ref.From; from != nil {
-		name := ref.Name
-		if u, err := sourceLocationAsURL(ref.Annotations[annotationBuildSourceLocation]); err == nil {
-			if u.Host == "github.com" {
-				if commit, ok := ref.Annotations[annotationBuildSourceCommit]; ok {
-					shortCommit := commit
-					if len(shortCommit) > 8 {
-						shortCommit = shortCommit[:8]
-					}
-					name = fmt.Sprintf("[%s](https://github.com%s) git [%s](https://github.com%s/commit/%s)", name, u.Path, shortCommit, u.Path, commit)
-				} else {
-					name = fmt.Sprintf("[%s](https://github.com%s)", name, u.Path)
-				}
-			}
-		}
-		imageRef, err := imagereference.Parse(from.Name)
-		if err == nil {
-			switch {
-			case len(imageRef.ID) > 0:
-				return fmt.Sprintf("%s `%s`", name, imageRef.ID)
-			case len(imageRef.Tag) > 0:
-				return fmt.Sprintf("%s `:%s`", name, imageRef.Tag)
-			default:
-				return fmt.Sprintf("%s `%s`", name, imageRef.Exact())
-			}
-		}
-		return fmt.Sprintf("%s `%s`", name, from.Name)
-	}
-	return ref.Name
-}
-
-func componentName(key string) string {
-	parts := strings.Split(key, "-")
-	for i, part := range parts {
-		if len(part) > 0 {
-			parts[i] = strings.ToUpper(part[:1]) + part[1:]
-		}
-	}
-	return strings.Join(parts, " ")
-}
-
-func orderedKeys(m map[string]string) []string {
-	keys := make([]string, 0, len(m))
-	for k := range m {
-		keys = append(keys, k)
-	}
-	sort.Strings(keys)
-	return keys
-}
-
-type contentStream struct {
-	current []byte
-	parts   [][]byte
-}
-
-func (s *contentStream) Read(p []byte) (int, error) {
-	remaining := len(p)
-	count := 0
-	for remaining > 0 {
-		// find the next buffer, if we have nothing
-		if len(s.current) == 0 {
-			if len(s.parts) == 0 {
-				return count, io.EOF
-			}
-			s.current = s.parts[0]
-			s.parts = s.parts[1:]
-		}
-
-		have := len(s.current)
-
-		// fill the buffer completely
-		if have >= remaining {
-			copy(p, s.current[:remaining])
-			s.current = s.current[remaining:]
-			return count + remaining, nil
-		}
-
-		// fill the buffer with whatever we have left
-		copy(p, s.current[:have])
-		s.current = nil
-		p = p[have:]
-		count += have
-		remaining -= have
-	}
-	return count, nil
-}
-
-func newContentStreamForRelease(image *ReleaseInfo) io.Reader {
-	names := make([]string, 0, len(image.ManifestFiles))
-	for name := range image.ManifestFiles {
-		names = append(names, name)
-	}
-	sort.Strings(names)
-
-	rawNames := make([]string, 0, len(image.RawMetadata))
-	for name := range image.RawMetadata {
-		rawNames = append(rawNames, name)
-	}
-	sort.Strings(rawNames)
-
-	data := make([][]byte, 0, (len(names)+len(rawNames))*3)
-
-	for _, name := range rawNames {
-		content := image.RawMetadata[name]
-		data = append(data, []byte(fmt.Sprintf("# %s\n", name)), content)
-		if len(content) > 0 && !bytes.HasSuffix(content, []byte("\n")) {
-			data = append(data, []byte("\n"))
-		}
-	}
-	for _, name := range names {
-		content := image.ManifestFiles[name]
-		data = append(data, []byte(fmt.Sprintf("# %s\n", name)), content)
-		if len(content) > 0 && !bytes.HasSuffix(content, []byte("\n")) {
-			data = append(data, []byte("\n"))
-		}
-	}
-	return &contentStream{parts: data}
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/release/info_test.go b/vendor/github.com/openshift/oc/pkg/cli/admin/release/info_test.go
deleted file mode 100644
index 5868097c0275..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/release/info_test.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package release
-
-import (
-	"bytes"
-	"encoding/hex"
-	"io"
-	"strings"
-	"testing"
-)
-
-func Test_contentStream_Read(t *testing.T) {
-	tests := []struct {
-		name    string
-		parts   [][]byte
-		want    string
-		wantN   int64
-		wantErr bool
-	}{
-		{
-			parts: [][]byte{[]byte("test"), []byte("other"), []byte("a")},
-			want:  "testothera",
-			wantN: 10,
-		},
-		{
-			parts: [][]byte{[]byte("test"), []byte(strings.Repeat("a", 4096))},
-			want:  "test" + strings.Repeat("a", 4096),
-			wantN: 4100,
-		},
-		{
-			parts: nil,
-			want:  "",
-			wantN: 0,
-		},
-	}
-	for _, tt := range tests {
-		t.Run(tt.name, func(t *testing.T) {
-			buf := &bytes.Buffer{}
-			s := &contentStream{
-				parts: tt.parts,
-			}
-			gotN, err := io.Copy(buf, s)
-			if (err != nil) != tt.wantErr {
-				t.Errorf("contentStream.Read() error = %v, wantErr %v", err, tt.wantErr)
-				return
-			}
-			if gotN != tt.wantN {
-				t.Errorf("expected %d but got %d", tt.wantN, gotN)
-			}
-			if !bytes.Equal([]byte(tt.want), buf.Bytes()) {
-				t.Errorf("contentStream.Read():\n%s\n%s", hex.Dump(buf.Bytes()), hex.Dump([]byte(tt.want)))
-			}
-		})
-	}
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/release/mirror.go b/vendor/github.com/openshift/oc/pkg/cli/admin/release/mirror.go
deleted file mode 100644
index db45400846c0..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/release/mirror.go
+++ /dev/null
@@ -1,533 +0,0 @@
-package release
-
-import (
-	"bytes"
-	"encoding/json"
-	"fmt"
-	"io"
-	"os"
-	"sort"
-	"strings"
-	"sync"
-	"time"
-
-	digest "github.com/opencontainers/go-digest"
-	"github.com/spf13/cobra"
-	corev1 "k8s.io/api/core/v1"
-	"k8s.io/apimachinery/pkg/api/errors"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
-	"k8s.io/apimachinery/pkg/runtime"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	"k8s.io/client-go/util/retry"
-	"k8s.io/klog"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-	"sigs.k8s.io/yaml"
-
-	imagev1 "github.com/openshift/api/image/v1"
-	operatorv1alpha1 "github.com/openshift/api/operator/v1alpha1"
-	imageclient "github.com/openshift/client-go/image/clientset/versioned"
-	"github.com/openshift/library-go/pkg/image/dockerv1client"
-	imagereference "github.com/openshift/library-go/pkg/image/reference"
-	"github.com/openshift/oc/pkg/cli/image/extract"
-	imagemanifest "github.com/openshift/oc/pkg/cli/image/manifest"
-	"github.com/openshift/oc/pkg/cli/image/mirror"
-)
-
-// NewMirrorOptions creates the options for mirroring a release.
-func NewMirrorOptions(streams genericclioptions.IOStreams) *MirrorOptions {
-	return &MirrorOptions{
-		IOStreams:       streams,
-		ParallelOptions: imagemanifest.ParallelOptions{MaxPerRegistry: 6},
-	}
-}
-
-// NewMirror creates a command to mirror an existing release.
-//
-// Example command to mirror a release to a local repository to work offline
-//
-// $ oc adm release mirror \
-//     --from=registry.svc.ci.openshift.org/openshift/v4.0 \
-//     --to=mycompany.com/myrepository/repo
-//
-func NewMirror(f kcmdutil.Factory, parentName string, streams genericclioptions.IOStreams) *cobra.Command {
-	o := NewMirrorOptions(streams)
-	cmd := &cobra.Command{
-		Use:   "mirror",
-		Short: "Mirror a release to a different image registry location",
-		Long: templates.LongDesc(`
-			Mirror an OpenShift release image to another registry
-
-			Copies the images and update payload for a given release from one registry to another.
-			By default this command will not alter the payload and will print out the configuration
-			that must be applied to a cluster to use the mirror, but you may opt to rewrite the
-			update to point to the new location and lose the cryptographic integrity of the update.
-
-			The common use for this command is to mirror a specific OpenShift release version to
-			a private registry for use in a disconnected or offline context. The command copies all
-			images that are part of a release into the target repository and then prints the
-			correct information to give to OpenShift to use that content offline. An alternate mode
-			is to specify --to-image-stream, which imports the images directly into an OpenShift
-			image stream.
-		`),
-		Run: func(cmd *cobra.Command, args []string) {
-			kcmdutil.CheckErr(o.Complete(cmd, f, args))
-			kcmdutil.CheckErr(o.Run())
-		},
-	}
-	flags := cmd.Flags()
-	o.SecurityOptions.Bind(flags)
-	o.ParallelOptions.Bind(flags)
-
-	flags.StringVar(&o.From, "from", o.From, "Image containing the release payload.")
-	flags.StringVar(&o.To, "to", o.To, "An image repository to push to.")
-	flags.StringVar(&o.ToImageStream, "to-image-stream", o.ToImageStream, "An image stream to tag images into.")
-	flags.BoolVar(&o.DryRun, "dry-run", o.DryRun, "Display information about the mirror without actually executing it.")
-
-	flags.BoolVar(&o.SkipRelease, "skip-release-image", o.SkipRelease, "Do not push the release image.")
-	flags.StringVar(&o.ToRelease, "to-release-image", o.ToRelease, "Specify an alternate locations for the release image instead as tag 'release' in --to")
-	return cmd
-}
-
-type MirrorOptions struct {
-	genericclioptions.IOStreams
-
-	SecurityOptions imagemanifest.SecurityOptions
-	ParallelOptions imagemanifest.ParallelOptions
-
-	From string
-
-	To            string
-	ToImageStream string
-
-	ToRelease   string
-	SkipRelease bool
-
-	DryRun bool
-
-	ClientFn func() (imageclient.Interface, string, error)
-
-	ImageStream *imagev1.ImageStream
-	TargetFn    func(component string) imagereference.DockerImageReference
-}
-
-func (o *MirrorOptions) Complete(cmd *cobra.Command, f kcmdutil.Factory, args []string) error {
-	switch {
-	case len(args) == 0 && len(o.From) == 0:
-		return fmt.Errorf("must specify a release image with --from")
-	case len(args) == 1 && len(o.From) == 0:
-		o.From = args[0]
-	case len(args) == 1 && len(o.From) > 0:
-		return fmt.Errorf("you may not specify an argument and --from")
-	case len(args) > 1:
-		return fmt.Errorf("only one argument is accepted")
-	}
-	o.ClientFn = func() (imageclient.Interface, string, error) {
-		cfg, err := f.ToRESTConfig()
-		if err != nil {
-			return nil, "", err
-		}
-		client, err := imageclient.NewForConfig(cfg)
-		if err != nil {
-			return nil, "", err
-		}
-		ns, _, err := f.ToRawKubeConfigLoader().Namespace()
-		if err != nil {
-			return nil, "", err
-		}
-		return client, ns, nil
-	}
-	return nil
-}
-
-const replaceComponentMarker = "X-X-X-X-X-X-X"
-
-func (o *MirrorOptions) Run() error {
-	if len(o.From) == 0 && o.ImageStream == nil {
-		return fmt.Errorf("must specify a release image with --from")
-	}
-
-	if (len(o.To) == 0) == (len(o.ToImageStream) == 0) {
-		return fmt.Errorf("must specify an image repository or image stream to mirror the release to")
-	}
-
-	if o.SkipRelease && len(o.ToRelease) > 0 {
-		return fmt.Errorf("--skip-release-image and --to-release-image may not both be specified")
-	}
-
-	var recreateRequired bool
-	var hasPrefix bool
-	var targetFn func(name string) mirror.MirrorReference
-	var dst string
-	if len(o.ToImageStream) > 0 {
-		dst = imagereference.DockerImageReference{
-			Registry:  "example.com",
-			Namespace: "somenamespace",
-			Name:      "mirror",
-		}.Exact()
-	} else {
-		dst = o.To
-	}
-
-	if strings.Contains(dst, "${component}") {
-		format := strings.Replace(dst, "${component}", replaceComponentMarker, -1)
-		dstRef, err := mirror.ParseMirrorReference(format)
-		if err != nil {
-			return fmt.Errorf("--to must be a valid image reference: %v", err)
-		}
-		targetFn = func(name string) mirror.MirrorReference {
-			value := strings.Replace(dst, "${component}", name, -1)
-			ref, err := mirror.ParseMirrorReference(value)
-			if err != nil {
-				klog.Fatalf("requested component %q could not be injected into %s: %v", name, dst, err)
-			}
-			return ref
-		}
-		replaceCount := strings.Count(dst, "${component}")
-		recreateRequired = replaceCount > 1 || (replaceCount == 1 && !strings.Contains(dstRef.Tag, replaceComponentMarker))
-
-	} else {
-		ref, err := mirror.ParseMirrorReference(dst)
-		if err != nil {
-			return fmt.Errorf("--to must be a valid image repository: %v", err)
-		}
-		if len(ref.ID) > 0 || len(ref.Tag) > 0 {
-			return fmt.Errorf("--to must be to an image repository and may not contain a tag or digest")
-		}
-		targetFn = func(name string) mirror.MirrorReference {
-			copied := ref
-			copied.Tag = name
-			return copied
-		}
-		hasPrefix = true
-	}
-
-	o.TargetFn = func(name string) imagereference.DockerImageReference {
-		ref := targetFn(name)
-		return ref.DockerImageReference
-	}
-
-	if recreateRequired {
-		return fmt.Errorf("when mirroring to multiple repositories, use the new release command with --from-release and --mirror")
-	}
-
-	verifier := imagemanifest.NewVerifier()
-	is := o.ImageStream
-	if is == nil {
-		o.ImageStream = &imagev1.ImageStream{}
-		is = o.ImageStream
-		// load image references
-		buf := &bytes.Buffer{}
-		extractOpts := NewExtractOptions(genericclioptions.IOStreams{Out: buf, ErrOut: o.ErrOut})
-		extractOpts.SecurityOptions = o.SecurityOptions
-		extractOpts.ImageMetadataCallback = func(m *extract.Mapping, dgst, contentDigest digest.Digest, config *dockerv1client.DockerImageConfig) {
-			verifier.Verify(dgst, contentDigest)
-		}
-		extractOpts.From = o.From
-		extractOpts.File = "image-references"
-		if err := extractOpts.Run(); err != nil {
-			return fmt.Errorf("unable to retrieve release image info: %v", err)
-		}
-		if err := json.Unmarshal(buf.Bytes(), &is); err != nil {
-			return fmt.Errorf("unable to load image-references from release payload: %v", err)
-		}
-		if is.Kind != "ImageStream" || is.APIVersion != "image.openshift.io/v1" {
-			return fmt.Errorf("unrecognized image-references in release payload")
-		}
-		if !verifier.Verified() {
-			err := fmt.Errorf("the release image failed content verification and may have been tampered with")
-			if !o.SecurityOptions.SkipVerification {
-				return err
-			}
-			fmt.Fprintf(o.ErrOut, "warning: %v\n", err)
-		}
-	}
-
-	var mappings []mirror.Mapping
-	if len(o.From) > 0 && !o.SkipRelease {
-		src := o.From
-		srcRef, err := imagereference.Parse(src)
-		if err != nil {
-			return err
-		}
-		if len(o.ToRelease) > 0 {
-			dstRef, err := imagereference.Parse(o.ToRelease)
-			if err != nil {
-				return fmt.Errorf("invalid --to-release-image: %v", err)
-			}
-			mappings = append(mappings, mirror.Mapping{
-				Type:        mirror.DestinationRegistry,
-				Source:      srcRef,
-				Destination: dstRef,
-				Name:        o.ToRelease,
-			})
-		} else if !o.SkipRelease {
-			dstRef := targetFn("release")
-			mappings = append(mappings, mirror.Mapping{
-				Source:      srcRef,
-				Type:        dstRef.Type(),
-				Destination: dstRef.Combined(),
-				Name:        "release",
-			})
-		}
-	}
-
-	repositories := make(map[string]struct{})
-
-	// build the mapping list for mirroring and rewrite if necessary
-	for i := range is.Spec.Tags {
-		tag := &is.Spec.Tags[i]
-		if tag.From == nil || tag.From.Kind != "DockerImage" {
-			continue
-		}
-		from, err := imagereference.Parse(tag.From.Name)
-		if err != nil {
-			return fmt.Errorf("release tag %q is not valid: %v", tag.Name, err)
-		}
-		if len(from.Tag) > 0 || len(from.ID) == 0 {
-			return fmt.Errorf("image-references should only contain pointers to images by digest: %s", tag.From.Name)
-		}
-
-		// Create a unique map of repos as keys
-		currentRepo := from.AsRepository().String()
-		repositories[currentRepo] = struct{}{}
-
-		dstMirrorRef := targetFn(tag.Name)
-		mappings = append(mappings, mirror.Mapping{
-			Source:      from,
-			Type:        dstMirrorRef.Type(),
-			Destination: dstMirrorRef.Combined(),
-			Name:        tag.Name,
-		})
-		klog.V(2).Infof("Mapping %#v", mappings[len(mappings)-1])
-
-		dstRef := targetFn(tag.Name)
-		dstRef.Tag = ""
-		dstRef.ID = from.ID
-		tag.From.Name = dstRef.Exact()
-	}
-
-	if len(mappings) == 0 {
-		fmt.Fprintf(o.ErrOut, "warning: Release image contains no image references - is this a valid release?\n")
-	}
-
-	if len(o.ToImageStream) > 0 {
-		remaining := make(map[string]mirror.Mapping)
-		for _, mapping := range mappings {
-			remaining[mapping.Name] = mapping
-		}
-		client, ns, err := o.ClientFn()
-		if err != nil {
-			return err
-		}
-		hasErrors := make(map[string]error)
-		maxPerIteration := 12
-
-		for retries := 4; (len(remaining) > 0 || len(hasErrors) > 0) && retries > 0; {
-			if len(remaining) == 0 {
-				for _, mapping := range mappings {
-					if _, ok := hasErrors[mapping.Name]; ok {
-						remaining[mapping.Name] = mapping
-						delete(hasErrors, mapping.Name)
-					}
-				}
-				retries--
-			}
-			err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
-				isi := &imagev1.ImageStreamImport{
-					ObjectMeta: metav1.ObjectMeta{
-						Name: o.ToImageStream,
-					},
-					Spec: imagev1.ImageStreamImportSpec{
-						Import: !o.DryRun,
-					},
-				}
-				for _, mapping := range remaining {
-					isi.Spec.Images = append(isi.Spec.Images, imagev1.ImageImportSpec{
-						From: corev1.ObjectReference{
-							Kind: "DockerImage",
-							Name: mapping.Source.Exact(),
-						},
-						To: &corev1.LocalObjectReference{
-							Name: mapping.Name,
-						},
-					})
-					if len(isi.Spec.Images) > maxPerIteration {
-						break
-					}
-				}
-
-				// use RESTClient directly here to be able to extend request timeout
-				result := &imagev1.ImageStreamImport{}
-				if err := client.ImageV1().RESTClient().Post().
-					Namespace(ns).
-					Resource(imagev1.Resource("imagestreamimports").Resource).
-					Body(isi).
-					// this instructs the api server to allow our request to take up to an hour - chosen as a high boundary
-					Timeout(3 * time.Minute).
-					Do().
-					Into(result); err != nil {
-					return err
-				}
-
-				for i, image := range result.Status.Images {
-					name := result.Spec.Images[i].To.Name
-					klog.V(4).Infof("Import result for %s: %#v", name, image.Status)
-					if image.Status.Status == metav1.StatusSuccess {
-						delete(remaining, name)
-						delete(hasErrors, name)
-					} else {
-						delete(remaining, name)
-						err := errors.FromObject(&image.Status)
-						hasErrors[name] = err
-						klog.V(2).Infof("Failed to import %s as tag %s: %v", remaining[name].Source, name, err)
-					}
-				}
-				return nil
-			})
-			if err != nil {
-				return err
-			}
-		}
-
-		if len(hasErrors) > 0 {
-			var messages []string
-			for k, v := range hasErrors {
-				messages = append(messages, fmt.Sprintf("%s: %v", k, v))
-			}
-			sort.Strings(messages)
-			if len(messages) == 1 {
-				return fmt.Errorf("unable to import a release image: %s", messages[0])
-			}
-			return fmt.Errorf("unable to import some release images:\n* %s", strings.Join(messages, "\n* "))
-		}
-
-		fmt.Fprintf(os.Stderr, "Mirrored %d images to %s/%s\n", len(mappings), ns, o.ToImageStream)
-		return nil
-	}
-
-	fmt.Fprintf(os.Stderr, "info: Mirroring %d images to %s ...\n", len(mappings), dst)
-	var lock sync.Mutex
-	opts := mirror.NewMirrorImageOptions(genericclioptions.IOStreams{Out: o.Out, ErrOut: o.ErrOut})
-	opts.SecurityOptions = o.SecurityOptions
-	opts.ParallelOptions = o.ParallelOptions
-	opts.Mappings = mappings
-	opts.DryRun = o.DryRun
-	opts.ManifestUpdateCallback = func(registry string, manifests map[digest.Digest]digest.Digest) error {
-		lock.Lock()
-		defer lock.Unlock()
-
-		// when uploading to a schema1 registry, manifest ids change and we must remap them
-		for i := range is.Spec.Tags {
-			tag := &is.Spec.Tags[i]
-			if tag.From == nil || tag.From.Kind != "DockerImage" {
-				continue
-			}
-			ref, err := imagereference.Parse(tag.From.Name)
-			if err != nil {
-				return fmt.Errorf("unable to parse image reference %s (%s): %v", tag.Name, tag.From.Name, err)
-			}
-			if ref.Registry != registry {
-				continue
-			}
-			if changed, ok := manifests[digest.Digest(ref.ID)]; ok {
-				ref.ID = changed.String()
-				klog.V(4).Infof("During mirroring, image %s was updated to digest %s", tag.From.Name, changed)
-				tag.From.Name = ref.Exact()
-			}
-		}
-		return nil
-	}
-	if err := opts.Run(); err != nil {
-		return err
-	}
-
-	to := o.ToRelease
-	if len(to) == 0 {
-		to = targetFn("release").String()
-	}
-	if hasPrefix {
-		fmt.Fprintf(o.Out, "\nSuccess\nUpdate image:  %s\nMirror prefix: %s\n", to, o.To)
-	} else {
-		fmt.Fprintf(o.Out, "\nSuccess\nUpdate image:  %s\nMirrored to: %s\n", to, o.To)
-	}
-
-	if err := printImageContentInstructions(o.Out, o.From, o.To, repositories); err != nil {
-		return fmt.Errorf("Error creating mirror usage instructions: %v", err)
-	}
-	return nil
-}
-
-// printImageContentInstructions provides exapmles to the user for using the new repository mirror
-// https://github.com/openshift/installer/blob/master/docs/dev/alternative_release_image_sources.md
-func printImageContentInstructions(out io.Writer, from, to string, repositories map[string]struct{}) error {
-	type installConfigSubsection struct {
-		ImageContentSources []operatorv1alpha1.RepositoryDigestMirrors `json:"imageContentSources"`
-	}
-
-	var sources []operatorv1alpha1.RepositoryDigestMirrors
-
-	mirrorRef, err := imagereference.Parse(to)
-	if err != nil {
-		return fmt.Errorf("Unable to parse image reference '%s': %v", to, err)
-	}
-	mirrorRepo := mirrorRef.AsRepository().String()
-
-	sourceRef, err := imagereference.Parse(from)
-	if err != nil {
-		return fmt.Errorf("Unable to parse image reference '%s': %v", from, err)
-	}
-	sourceRepo := sourceRef.AsRepository().String()
-	repositories[sourceRepo] = struct{}{}
-
-	for repository := range repositories {
-		sources = append(sources, operatorv1alpha1.RepositoryDigestMirrors{
-			Source:  repository,
-			Mirrors: []string{mirrorRepo},
-		})
-	}
-	sort.Slice(sources, func(i, j int) bool {
-		return sources[i].Source < sources[j].Source
-	})
-
-	// Create and display install-config.yaml example
-	imageContentSources := installConfigSubsection{
-		ImageContentSources: sources}
-	installConfigExample, err := yaml.Marshal(imageContentSources)
-	if err != nil {
-		return fmt.Errorf("Unable to marshal install-config.yaml example yaml: %v", err)
-	}
-	fmt.Fprintf(out, "\nTo use the new mirrored repository to install, add the following section to the install-config.yaml:\n\n")
-	fmt.Fprintf(out, string(installConfigExample))
-
-	// Create and display ImageContentSourcePolicy example
-	icsp := operatorv1alpha1.ImageContentSourcePolicy{
-		TypeMeta: metav1.TypeMeta{
-			APIVersion: operatorv1alpha1.GroupVersion.String(),
-			Kind:       "ImageContentSourcePolicy"},
-		ObjectMeta: metav1.ObjectMeta{
-			Name: "example",
-		},
-		Spec: operatorv1alpha1.ImageContentSourcePolicySpec{
-			RepositoryDigestMirrors: sources,
-		},
-	}
-
-	// Create an unstructured object for removing creationTimestamp
-	unstructuredObj := unstructured.Unstructured{}
-	unstructuredObj.Object, err = runtime.DefaultUnstructuredConverter.ToUnstructured(&icsp)
-	if err != nil {
-		return fmt.Errorf("ToUnstructured error: %v", err)
-	}
-	delete(unstructuredObj.Object["metadata"].(map[string]interface{}), "creationTimestamp")
-
-	icspExample, err := yaml.Marshal(unstructuredObj.Object)
-	if err != nil {
-		return fmt.Errorf("Unable to marshal ImageContentSourcePolicy example yaml: %v", err)
-	}
-	fmt.Fprintf(out, "\n\nTo use the new mirrored repository for upgrades, use the following to create an ImageContentSourcePolicy:\n\n")
-	fmt.Fprintf(out, string(icspExample))
-
-	return nil
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/release/new.go b/vendor/github.com/openshift/oc/pkg/cli/admin/release/new.go
deleted file mode 100644
index 1998f5580908..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/release/new.go
+++ /dev/null
@@ -1,1597 +0,0 @@
-package release
-
-import (
-	"archive/tar"
-	"bufio"
-	"bytes"
-	"compress/gzip"
-	"encoding/json"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"net/http"
-	"os"
-	"path"
-	"path/filepath"
-	"sort"
-	"strings"
-	"sync"
-	"time"
-
-	"github.com/blang/semver"
-	"github.com/docker/docker/pkg/archive"
-	"github.com/ghodss/yaml"
-	digest "github.com/opencontainers/go-digest"
-	"github.com/spf13/cobra"
-	"k8s.io/klog"
-
-	corev1 "k8s.io/api/core/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/util/sets"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	"k8s.io/client-go/pkg/version"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-
-	"github.com/openshift/api/image/docker10"
-	imageapi "github.com/openshift/api/image/v1"
-	imageclient "github.com/openshift/client-go/image/clientset/versioned"
-	"github.com/openshift/library-go/pkg/image/dockerv1client"
-	imagereference "github.com/openshift/library-go/pkg/image/reference"
-	imageappend "github.com/openshift/oc/pkg/cli/image/append"
-	"github.com/openshift/oc/pkg/cli/image/extract"
-	imagemanifest "github.com/openshift/oc/pkg/cli/image/manifest"
-)
-
-func NewNewOptions(streams genericclioptions.IOStreams) *NewOptions {
-	return &NewOptions{
-		IOStreams:       streams,
-		ParallelOptions: imagemanifest.ParallelOptions{MaxPerRegistry: 4},
-		// TODO: only cluster-version-operator and maybe CLI should be in this list,
-		//   the others should always be referenced by the cluster-bootstrap or
-		//   another operator.
-		AlwaysInclude:  []string{"cluster-version-operator", "cli", "installer"},
-		ToImageBaseTag: "cluster-version-operator",
-		// We strongly control the set of allowed component versions to prevent confusion
-		// about what component versions may be used for. Changing this list requires
-		// approval from the release architects.
-		AllowedComponents: []string{"kubernetes"},
-	}
-}
-
-func NewRelease(f kcmdutil.Factory, parentName string, streams genericclioptions.IOStreams) *cobra.Command {
-	o := NewNewOptions(streams)
-	cmd := &cobra.Command{
-		Use:   "new [SRC=DST ...]",
-		Short: "Create a new OpenShift release",
-		Long: templates.LongDesc(`
-			Build a new OpenShift release image that will update a cluster
-
-			OpenShift uses long-running active management processes called "operators" to
-			keep the cluster running and manage component lifecycle. This command
-			composes a set of images with operator definitions into a single update payload
-			that can be used to install or update a cluster.
-
-			Operators are expected to host the config they need to be installed to a cluster
-			in the '/manifests' directory in their image. This command iterates over a set of
-			operator images and extracts those manifests into a single, ordered list of
-			Kubernetes objects that can then be iteratively updated on a cluster by the
-			cluster version operator when it is time to perform an update. Manifest files are
-			renamed to '0000_70__' by default, and an operator author that
-			needs to provide a global-ordered file (before or after other operators) should
-			prepend '0000_NN__' to their filename, which instructs the release builder
-			to not assign a component prefix. Only images in the input that have the image label
-			'io.openshift.release.operator=true' will have manifests loaded.
-
-			If an image is in the input but is not referenced by an operator's image-references
-			file, the image will not be included in the final release image unless
-			--include=NAME is provided.
-
-			Mappings specified via SRC=DST positional arguments allows overriding particular
-			operators with a specific image.  For example:
-
-			cluster-version-operator=registry.example.com/openshift/cluster-version-operator:test-123
-
-			will override the default cluster-version-operator image with one pulled from
-			registry.example.com.
-		`),
-		Example: templates.Examples(fmt.Sprintf(`
-			# Create a release from the latest origin images and push to a DockerHub repo
-			%[1]s new --from-image-stream=4.1 -n origin --to-image docker.io/mycompany/myrepo:latest
-
-			# Create a new release with updated metadata from a previous release
-			%[1]s new --from-release registry.svc.ci.openshift.org/origin/release:v4.1 --name 4.1.1 \
-				--previous 4.1.0 --metadata ... --to-image docker.io/mycompany/myrepo:latest
-
-			# Create a new release and override a single image
-			%[1]s new --from-release registry.svc.ci.openshift.org/origin/release:v4.1 \
-				cli=docker.io/mycompany/cli:latest --to-image docker.io/mycompany/myrepo:latest
-
-			# Run a verification pass to ensure the release can be reproduced
-			%[1]s new --from-release registry.svc.ci.openshift.org/origin/release:v4.1
-				`, parentName)),
-		Run: func(cmd *cobra.Command, args []string) {
-			kcmdutil.CheckErr(o.Complete(f, cmd, args))
-			kcmdutil.CheckErr(o.Validate())
-			kcmdutil.CheckErr(o.Run())
-		},
-	}
-	flags := cmd.Flags()
-	o.SecurityOptions.Bind(flags)
-	o.ParallelOptions.Bind(flags)
-
-	// image inputs
-	flags.StringSliceVar(&o.MappingFilenames, "mapping-file", o.MappingFilenames, "A file defining a mapping of input images to use to build the release")
-	flags.StringVar(&o.FromImageStream, "from-image-stream", o.FromImageStream, "Look at all tags in the provided image stream and build a release payload from them.")
-	flags.StringVarP(&o.FromImageStreamFile, "from-image-stream-file", "f", o.FromImageStreamFile, "Take the provided image stream on disk and build a release payload from it.")
-	flags.StringVar(&o.FromDirectory, "from-dir", o.FromDirectory, "Use this directory as the source for the release payload.")
-	flags.StringVar(&o.FromReleaseImage, "from-release", o.FromReleaseImage, "Use an existing release image as input.")
-	flags.StringVar(&o.ReferenceMode, "reference-mode", o.ReferenceMode, "By default, the image reference from an image stream points to the public registry for the stream and the image digest. Pass 'source' to build references to the originating image.")
-	flags.StringVar(&o.ExtraComponentVersions, "component-versions", o.ExtraComponentVersions, "Supply additional version strings to the release in key=value[,key=value] form.")
-
-	// properties of the release
-	flags.StringVar(&o.Name, "name", o.Name, "The name of the release. Will default to the current time.")
-	flags.StringSliceVar(&o.PreviousVersions, "previous", o.PreviousVersions, "A list of semantic versions that should preceed this version in the release manifest.")
-	flags.StringVar(&o.ReleaseMetadata, "metadata", o.ReleaseMetadata, "A JSON object to attach as the metadata for the release manifest.")
-	flags.BoolVar(&o.ForceManifest, "release-manifest", o.ForceManifest, "If true, a release manifest will be created using --name as the semantic version.")
-
-	// validation
-	flags.BoolVar(&o.AllowMissingImages, "allow-missing-images", o.AllowMissingImages, "Ignore errors when an operator references a release image that is not included.")
-	flags.BoolVar(&o.SkipManifestCheck, "skip-manifest-check", o.SkipManifestCheck, "Ignore errors when an operator includes a yaml/yml/json file that is not parseable.")
-
-	flags.StringSliceVar(&o.Exclude, "exclude", o.Exclude, "A list of image names or tags to exclude. It is applied after all inputs. Comma separated or individual arguments.")
-	flags.StringSliceVar(&o.AlwaysInclude, "include", o.AlwaysInclude, "A list of image tags that should not be pruned. Excluding a tag takes precedence. Comma separated or individual arguments.")
-
-	// destination
-	flags.BoolVar(&o.DryRun, "dry-run", o.DryRun, "Skips changes to external registries via mirroring or pushing images.")
-	flags.StringVar(&o.Mirror, "mirror", o.Mirror, "Mirror the contents of the release to this repository.")
-	flags.StringVar(&o.ToDir, "to-dir", o.ToDir, "Output the release manifests to a directory instead of creating an image.")
-	flags.StringVar(&o.ToFile, "to-file", o.ToFile, "Output the release to a tar file instead of creating an image.")
-	flags.StringVar(&o.ToImage, "to-image", o.ToImage, "The location to upload the release image to.")
-	flags.StringVar(&o.ToImageBase, "to-image-base", o.ToImageBase, "If specified, the image to add the release layer on top of.")
-	flags.StringVar(&o.ToImageBaseTag, "to-image-base-tag", o.ToImageBaseTag, "If specified, the image tag in the input to add the release layer on top of. Defaults to cluster-version-operator.")
-	flags.StringVar(&o.ToSignature, "to-signature", o.ToSignature, "If specified, output a message that can be signed that describes this release. Requires --to-image.")
-
-	// misc
-	flags.StringVarP(&o.Output, "output", "o", o.Output, "Output the mapping definition in this format.")
-	flags.StringVar(&o.Directory, "dir", o.Directory, "Directory to write release contents to, will default to a temporary directory.")
-
-	return cmd
-}
-
-type NewOptions struct {
-	genericclioptions.IOStreams
-
-	SecurityOptions imagemanifest.SecurityOptions
-	ParallelOptions imagemanifest.ParallelOptions
-
-	FromDirectory    string
-	Directory        string
-	MappingFilenames []string
-	Output           string
-	Name             string
-
-	FromReleaseImage string
-
-	FromImageStream     string
-	FromImageStreamFile string
-	Namespace           string
-	ReferenceMode       string
-
-	ExtraComponentVersions string
-	AllowedComponents      []string
-
-	Exclude       []string
-	AlwaysInclude []string
-
-	ForceManifest    bool
-	ReleaseMetadata  string
-	PreviousVersions []string
-
-	DryRun bool
-
-	ToFile         string
-	ToDir          string
-	ToImage        string
-	ToImageBase    string
-	ToImageBaseTag string
-	ToSignature    string
-
-	Mirror string
-
-	AllowMissingImages bool
-	SkipManifestCheck  bool
-
-	Mappings []Mapping
-
-	ImageClient imageclient.Interface
-
-	VerifyOutputFn func(dgst digest.Digest) error
-
-	cleanupFns []func()
-}
-
-func (o *NewOptions) Complete(f kcmdutil.Factory, cmd *cobra.Command, args []string) error {
-	overlap := make(map[string]string)
-	var mappings []Mapping
-	for _, filename := range o.MappingFilenames {
-		fileMappings, err := parseFile(filename, overlap)
-		if err != nil {
-			return err
-		}
-		mappings = append(mappings, fileMappings...)
-	}
-	argMappings, err := parseArgs(args, overlap)
-	if err != nil {
-		return err
-	}
-	mappings = append(mappings, argMappings...)
-	o.Mappings = mappings
-
-	if len(o.FromImageStream) > 0 {
-		cfg, err := f.ToRESTConfig()
-		if err != nil {
-			return err
-		}
-		client, err := imageclient.NewForConfig(cfg)
-		if err != nil {
-			return err
-		}
-		o.ImageClient = client
-		if len(o.Namespace) == 0 {
-			namespace, _, err := f.ToRawKubeConfigLoader().Namespace()
-			if err != nil {
-				return err
-			}
-			o.Namespace = namespace
-		}
-	}
-	return nil
-}
-
-func (o *NewOptions) Validate() error {
-	sources := 0
-	if len(o.FromImageStream) > 0 {
-		sources++
-	}
-	if len(o.FromImageStreamFile) > 0 {
-		sources++
-	}
-	if len(o.FromReleaseImage) > 0 {
-		sources++
-	}
-	if len(o.FromDirectory) > 0 {
-		sources++
-	}
-	if sources > 1 {
-		return fmt.Errorf("only one of --from-image-stream, --from-image-stream-file, --from-release, or --from-dir may be specified")
-	}
-	if sources == 0 {
-		if len(o.Mappings) == 0 {
-			return fmt.Errorf("must specify image mappings when no other source is defined")
-		}
-	}
-	if len(o.ToSignature) > 0 && len(o.ToImage) == 0 {
-		return fmt.Errorf("--to-signature requires --to-image")
-	}
-	if len(o.Mirror) > 0 && o.ReferenceMode != "" && o.ReferenceMode != "public" {
-		return fmt.Errorf("--reference-mode must be public or empty when using --mirror")
-	}
-	return nil
-}
-
-type imageData struct {
-	Ref           imagereference.DockerImageReference
-	Config        *dockerv1client.DockerImageConfig
-	Digest        digest.Digest
-	ContentDigest digest.Digest
-	Directory     string
-}
-
-func findStatusTagEvents(tags []imageapi.NamedTagEventList, name string) *imageapi.NamedTagEventList {
-	for i := range tags {
-		tag := &tags[i]
-		if tag.Tag != name {
-			continue
-		}
-		return tag
-	}
-	return nil
-}
-
-func findStatusTagEvent(tags []imageapi.NamedTagEventList, name string) *imageapi.TagEvent {
-	events := findStatusTagEvents(tags, name)
-	if events == nil || len(events.Items) == 0 {
-		return nil
-	}
-	return &events.Items[0]
-}
-
-func findSpecTag(tags []imageapi.TagReference, name string) *imageapi.TagReference {
-	for i, tag := range tags {
-		if tag.Name != name {
-			continue
-		}
-		return &tags[i]
-	}
-	return nil
-}
-
-type CincinnatiMetadata struct {
-	Kind string `json:"kind"`
-
-	Version  string   `json:"version"`
-	Previous []string `json:"previous"`
-
-	Metadata map[string]interface{} `json:"metadata,omitempty"`
-}
-
-func (o *NewOptions) cleanup() {
-	for _, fn := range o.cleanupFns {
-		fn()
-	}
-	o.cleanupFns = nil
-}
-
-func (o *NewOptions) Run() error {
-	defer o.cleanup()
-
-	// check parameters
-	extraComponentVersions, err := parseComponentVersionsLabel(o.ExtraComponentVersions)
-	if err != nil {
-		return fmt.Errorf("--component-versions is invalid: %v", err)
-	}
-	if len(o.Name) > 0 {
-		if _, err := semver.Parse(o.Name); err != nil {
-			return fmt.Errorf("--name must be a semantic version: %v", err)
-		}
-	}
-	if len(o.ReleaseMetadata) > 0 {
-		if err := json.Unmarshal([]byte(o.ReleaseMetadata), &CincinnatiMetadata{}); err != nil {
-			return fmt.Errorf("invalid --metadata: %v", err)
-		}
-	}
-
-	hasMetadataOverrides := len(o.Name) > 0 ||
-		len(o.ReleaseMetadata) > 0 ||
-		len(o.PreviousVersions) > 0 ||
-		len(o.ToImageBase) > 0 ||
-		len(o.ExtraComponentVersions) > 0
-
-	exclude := sets.NewString()
-	for _, s := range o.Exclude {
-		exclude.Insert(s)
-	}
-
-	metadata := make(map[string]imageData)
-	var ordered []string
-	var is *imageapi.ImageStream
-	now := time.Now().UTC().Truncate(time.Second)
-
-	switch {
-	case len(o.FromReleaseImage) > 0:
-		ref, err := imagereference.Parse(o.FromReleaseImage)
-		if err != nil {
-			return fmt.Errorf("--from-release was not a valid pullspec: %v", err)
-		}
-
-		verifier := imagemanifest.NewVerifier()
-		var releaseDigest digest.Digest
-		var baseDigest string
-		var imageReferencesData, releaseMetadata []byte
-
-		buf := &bytes.Buffer{}
-		extractOpts := extract.NewOptions(genericclioptions.IOStreams{Out: buf, ErrOut: o.ErrOut})
-		extractOpts.SecurityOptions = o.SecurityOptions
-		extractOpts.OnlyFiles = true
-		extractOpts.Mappings = []extract.Mapping{
-			{
-				ImageRef: ref,
-				From:     "release-manifests/",
-			},
-		}
-		extractOpts.ImageMetadataCallback = func(m *extract.Mapping, dgst, contentDigest digest.Digest, config *dockerv1client.DockerImageConfig) {
-			verifier.Verify(dgst, contentDigest)
-			releaseDigest = contentDigest
-			if config.Config != nil {
-				baseDigest = config.Config.Labels[annotationReleaseBaseImageDigest]
-				klog.V(4).Infof("Release image was built on top of %s", baseDigest)
-			}
-		}
-		extractOpts.TarEntryCallback = func(hdr *tar.Header, _ extract.LayerInfo, r io.Reader) (bool, error) {
-			var err error
-			if hdr.Name == "image-references" {
-				imageReferencesData, err = ioutil.ReadAll(r)
-				if err != nil {
-					return false, err
-				}
-			}
-			if hdr.Name == "release-metadata" {
-				releaseMetadata, err = ioutil.ReadAll(r)
-				if err != nil {
-					return false, err
-				}
-			}
-			if len(imageReferencesData) > 0 && len(releaseMetadata) > 0 {
-				return false, nil
-			}
-			return true, nil
-		}
-		if err := extractOpts.Run(); err != nil {
-			return err
-		}
-		if len(imageReferencesData) == 0 {
-			return fmt.Errorf("release image did not contain any image-references content")
-		}
-		if !verifier.Verified() {
-			err := fmt.Errorf("the input release image failed content verification and may have been tampered with")
-			if !o.SecurityOptions.SkipVerification {
-				return err
-			}
-			fmt.Fprintf(o.ErrOut, "warning: %v\n", err)
-		}
-
-		inputIS, err := readReleaseImageReferences(imageReferencesData)
-		if err != nil {
-			return fmt.Errorf("unable to load image-references from release contents: %v", err)
-		}
-		var cm CincinnatiMetadata
-		if err := json.Unmarshal(releaseMetadata, &cm); err != nil {
-			return fmt.Errorf("unable to load release-metadata from release contents: %v", err)
-		}
-
-		is = inputIS.DeepCopy()
-
-		for _, tag := range is.Spec.Tags {
-			ordered = append(ordered, tag.Name)
-		}
-
-		// default the base image to a matching release payload digest or error
-		if len(o.ToImageBase) == 0 && len(baseDigest) > 0 {
-			for _, tag := range is.Spec.Tags {
-				if tag.From == nil || tag.From.Kind != "DockerImage" {
-					continue
-				}
-				ref, err := imagereference.Parse(tag.From.Name)
-				if err != nil {
-					return fmt.Errorf("release image contains unparseable reference for %q: %v", tag.Name, err)
-				}
-				if ref.ID == baseDigest {
-					o.ToImageBase = tag.From.Name
-					break
-				}
-			}
-			if len(o.ToImageBase) == 0 {
-				return fmt.Errorf("unable to find an image within the release that matches the base image manifest %q, please specify --to-image-base", baseDigest)
-			}
-		}
-
-		if len(o.Name) == 0 {
-			o.Name = is.Name
-		}
-		if len(o.ReleaseMetadata) == 0 && cm.Metadata != nil {
-			data, err := json.Marshal(cm.Metadata)
-			if err != nil {
-				return fmt.Errorf("unable to marshal release metadata: %v", err)
-			}
-			o.ReleaseMetadata = string(data)
-		}
-		if o.PreviousVersions == nil {
-			o.PreviousVersions = cm.Previous
-		}
-
-		if hasMetadataOverrides {
-			if is.Annotations == nil {
-				is.Annotations = map[string]string{}
-			}
-			is.Annotations[annotationReleaseFromRelease] = o.FromReleaseImage
-			fmt.Fprintf(o.ErrOut, "info: Found %d images in release\n", len(is.Spec.Tags))
-
-		} else {
-			klog.V(2).Infof("No metadata changes, building canonical release")
-			now = is.CreationTimestamp.Time.UTC()
-			if o.VerifyOutputFn == nil {
-				o.VerifyOutputFn = func(actual digest.Digest) error {
-					// TODO: check contents, digests, image stream, the layers, and the manifest
-					if actual != releaseDigest {
-						return fmt.Errorf("the release could not be reproduced from its inputs")
-					}
-					return nil
-				}
-			}
-			if len(ref.Tag) > 0 {
-				fmt.Fprintf(o.ErrOut, "info: Release %s built from %d images\n", releaseDigest, len(is.Spec.Tags))
-			} else {
-				fmt.Fprintf(o.ErrOut, "info: Release built from %d images\n", len(is.Spec.Tags))
-			}
-		}
-
-	case len(o.FromImageStream) > 0, len(o.FromImageStreamFile) > 0:
-		is = &imageapi.ImageStream{}
-		is.Annotations = map[string]string{}
-		if len(o.FromImageStream) > 0 && len(o.Namespace) > 0 {
-			is.Annotations[annotationReleaseFromImageStream] = fmt.Sprintf("%s/%s", o.Namespace, o.FromImageStream)
-		}
-
-		var inputIS *imageapi.ImageStream
-		if len(o.FromImageStreamFile) > 0 {
-			data, err := filenameContents(o.FromImageStreamFile, o.IOStreams.In)
-			if os.IsNotExist(err) {
-				return err
-			}
-			if err != nil {
-				return fmt.Errorf("unable to read input image stream file: %v", err)
-			}
-			is := &imageapi.ImageStream{}
-			if err := yaml.Unmarshal(data, &is); err != nil {
-				return fmt.Errorf("unable to load input image stream file: %v", err)
-			}
-			if is.Kind != "ImageStream" || is.APIVersion != "image.openshift.io/v1" {
-				return fmt.Errorf("unrecognized input image stream file, must be an ImageStream in image.openshift.io/v1")
-			}
-			inputIS = is
-
-		} else {
-			is, err := o.ImageClient.ImageV1().ImageStreams(o.Namespace).Get(o.FromImageStream, metav1.GetOptions{})
-			if err != nil {
-				return err
-			}
-			inputIS = is
-		}
-
-		if inputIS.Annotations == nil {
-			inputIS.Annotations = make(map[string]string)
-		}
-		inputIS.Annotations[annotationBuildVersions] = extraComponentVersions.String()
-		if err := resolveImageStreamTagsToReferenceMode(inputIS, is, o.ReferenceMode, exclude); err != nil {
-			return err
-		}
-
-		for _, tag := range is.Spec.Tags {
-			ordered = append(ordered, tag.Name)
-		}
-
-		fmt.Fprintf(o.ErrOut, "info: Found %d images in image stream\n", len(is.Spec.Tags))
-
-	case len(o.FromDirectory) > 0:
-		fmt.Fprintf(o.ErrOut, "info: Using %s as the input to the release\n", o.FromDirectory)
-		files, err := ioutil.ReadDir(o.FromDirectory)
-		if err != nil {
-			return err
-		}
-		for _, f := range files {
-			if f.IsDir() {
-				name := f.Name()
-				if exclude.Has(name) {
-					klog.V(2).Infof("Excluded directory %#v", f)
-					continue
-				}
-				metadata[name] = imageData{Directory: filepath.Join(o.FromDirectory, f.Name())}
-				ordered = append(ordered, name)
-			}
-			if f.Name() == "image-references" {
-				data, err := ioutil.ReadFile(filepath.Join(o.FromDirectory, "image-references"))
-				if err != nil {
-					return err
-				}
-				overrideIS := &imageapi.ImageStream{}
-				if err := json.Unmarshal(data, overrideIS); err != nil {
-					return fmt.Errorf("unable to load image data from release directory")
-				}
-				if overrideIS.TypeMeta.Kind != "ImageStream" || overrideIS.APIVersion != "image.openshift.io/v1" {
-					return fmt.Errorf("could not parse images: invalid kind/apiVersion")
-				}
-				is = overrideIS
-				continue
-			}
-		}
-		fmt.Fprintf(o.ErrOut, "info: Found %d operator manifest directories on disk\n", len(ordered))
-
-	default:
-		for _, m := range o.Mappings {
-			if exclude.Has(m.Source) {
-				klog.V(2).Infof("Excluded mapping %s", m.Source)
-				continue
-			}
-			ordered = append(ordered, m.Source)
-		}
-	}
-
-	name := o.Name
-	if len(name) == 0 {
-		name = "0.0.1-" + now.Format("2006-01-02-150405")
-	}
-
-	cm := &CincinnatiMetadata{Kind: "cincinnati-metadata-v0"}
-	semverName, err := semver.Parse(name)
-	if err != nil {
-		return fmt.Errorf("--name must be a semantic version")
-	}
-	cm.Version = semverName.String()
-	if len(o.ReleaseMetadata) > 0 {
-		if err := json.Unmarshal([]byte(o.ReleaseMetadata), &cm.Metadata); err != nil {
-			return fmt.Errorf("invalid --metadata: %v", err)
-		}
-	}
-	for _, previous := range o.PreviousVersions {
-		if len(previous) == 0 {
-			continue
-		}
-		v, err := semver.Parse(previous)
-		if err != nil {
-			return fmt.Errorf("%q is not a valid semantic version: %v", previous, err)
-		}
-		cm.Previous = append(cm.Previous, v.String())
-	}
-	sort.Strings(cm.Previous)
-	if cm.Previous == nil {
-		cm.Previous = []string{}
-	}
-	klog.V(4).Infof("Release metadata:\n%s", toJSONString(cm))
-
-	if is == nil {
-		is = &imageapi.ImageStream{
-			ObjectMeta: metav1.ObjectMeta{},
-		}
-	}
-
-	is.TypeMeta = metav1.TypeMeta{APIVersion: "image.openshift.io/v1", Kind: "ImageStream"}
-	is.CreationTimestamp = metav1.Time{Time: now}
-	is.Name = name
-	if is.Annotations == nil {
-		is.Annotations = make(map[string]string)
-	}
-
-	// update any custom mappings and then sort the spec tags
-	for _, m := range o.Mappings {
-		if exclude.Has(m.Source) {
-			klog.V(2).Infof("Excluded mapping %s", m.Source)
-			continue
-		}
-		tag := hasTag(is.Spec.Tags, m.Source)
-		if tag == nil {
-			is.Spec.Tags = append(is.Spec.Tags, imageapi.TagReference{
-				Name: m.Source,
-			})
-			tag = &is.Spec.Tags[len(is.Spec.Tags)-1]
-		} else {
-			// when we override the spec, we have to reset any annotations
-			tag.Annotations = nil
-		}
-		if tag.Annotations == nil {
-			tag.Annotations = make(map[string]string)
-		}
-		tag.Annotations[annotationReleaseOverride] = "true"
-		tag.From = &corev1.ObjectReference{
-			Name: m.Destination,
-			Kind: "DockerImage",
-		}
-	}
-	sort.Slice(is.Spec.Tags, func(i, j int) bool {
-		return is.Spec.Tags[i].Name < is.Spec.Tags[j].Name
-	})
-
-	if o.Output == "json" {
-		data, err := json.MarshalIndent(is, "", "  ")
-		if err != nil {
-			return err
-		}
-		fmt.Fprintf(o.Out, "%s\n", string(data))
-		return nil
-	}
-
-	if len(o.FromDirectory) == 0 {
-		if err := o.extractManifests(is, name, metadata); err != nil {
-			return err
-		}
-
-		var filteredNames []string
-		for _, s := range ordered {
-			if _, ok := metadata[s]; ok {
-				filteredNames = append(filteredNames, s)
-			}
-		}
-		ordered = filteredNames
-	}
-
-	if len(o.Mirror) > 0 {
-		if err := o.mirrorImages(is); err != nil {
-			return err
-		}
-	}
-
-	var verifiers []PayloadVerifier
-	if !o.SkipManifestCheck {
-		verifiers = append(verifiers, func(filename string, data []byte) error {
-			for _, suffix := range []string{".json", ".yml", ".yaml"} {
-				if !strings.HasSuffix(filename, suffix) {
-					continue
-				}
-				var obj interface{}
-				if err := yaml.Unmarshal(data, &obj); err != nil {
-					// strip the slightly verbose prefix for the error message
-					msg := err.Error()
-					for _, s := range []string{"error converting YAML to JSON: ", "error unmarshaling JSON: ", "yaml: "} {
-						msg = strings.TrimPrefix(msg, s)
-					}
-					return fmt.Errorf("%s: invalid YAML/JSON: %s", filename, msg)
-				}
-				s := string(data)
-				if len(s) > 30 {
-					s = s[:30] + "..."
-				}
-				m, ok := obj.(map[string]interface{})
-				if !ok {
-					return fmt.Errorf("%s: not a valid YAML/JSON object, got: %s", filename, s)
-				}
-				if s, ok := m["kind"].(string); !ok || s == "" {
-					return fmt.Errorf("%s: manifests must contain Kubernetes API objects with 'kind' and 'apiVersion' set: %s", filename, s)
-				}
-				if s, ok := m["apiVersion"].(string); !ok || s == "" {
-					return fmt.Errorf("%s: manifests must contain Kubernetes API objects with 'kind' and 'apiVersion' set: %s", filename, s)
-				}
-				break
-			}
-			return nil
-		})
-	}
-
-	// any input image with content, referenced in AlwaysInclude, or referenced from image-references is
-	// included, which guarantees the content of a payload can be reproduced
-	forceInclude := append(append([]string{}, o.AlwaysInclude...), ordered...)
-	if err := pruneUnreferencedImageStreams(o.ErrOut, is, metadata, forceInclude); err != nil {
-		return err
-	}
-
-	// use a stable ordering for operators
-	sort.Strings(ordered)
-
-	var operators []string
-	pr, pw := io.Pipe()
-	go func() {
-		var err error
-		operators, err = writePayload(pw, is, cm, ordered, metadata, o.AllowMissingImages, verifiers)
-		pw.CloseWithError(err)
-	}()
-
-	br := bufio.NewReaderSize(pr, 500*1024)
-	if _, err := br.Peek(br.Size()); err != nil && err != io.EOF {
-		return fmt.Errorf("unable to create a release: %v", err)
-	}
-
-	if err := o.write(br, is, now); err != nil {
-		return err
-	}
-
-	sort.Strings(operators)
-	switch {
-	case operators == nil:
-	case len(operators) == 0:
-		fmt.Fprintf(o.ErrOut, "warning: No operator metadata was found, no operators will be part of the release.\n")
-	}
-
-	return nil
-}
-
-func resolveImageStreamTagsToReferenceMode(inputIS, is *imageapi.ImageStream, referenceMode string, exclude sets.String) error {
-	switch referenceMode {
-	case "public", "", "source":
-		forceExternal := referenceMode == "public" || referenceMode == ""
-		internal := inputIS.Status.DockerImageRepository
-		external := inputIS.Status.PublicDockerImageRepository
-		if forceExternal && len(external) == 0 {
-			return fmt.Errorf("only image streams or releases with public image repositories can be the source for releases when using the default --reference-mode")
-		}
-
-		externalFn := func(source, image string) string {
-			// filter source URLs
-			if len(source) > 0 && len(internal) > 0 && strings.HasPrefix(source, internal) {
-				klog.V(2).Infof("Can't use source %s because it points to the internal registry", source)
-				source = ""
-			}
-			// default to the external registry name
-			if (forceExternal || len(source) == 0) && len(external) > 0 {
-				return external + "@" + image
-			}
-			return source
-		}
-
-		covered := sets.NewString()
-		for _, ref := range inputIS.Spec.Tags {
-			if exclude.Has(ref.Name) {
-				klog.V(2).Infof("Excluded spec tag %s", ref.Name)
-				continue
-			}
-
-			if ref.From != nil && ref.From.Kind == "DockerImage" {
-				switch from, err := imagereference.Parse(ref.From.Name); {
-				case err != nil:
-					return err
-
-				case len(from.ID) > 0:
-					source := externalFn(ref.From.Name, from.ID)
-					if len(source) == 0 {
-						klog.V(2).Infof("Can't use spec tag %q because we cannot locate or calculate a source location", ref.Name)
-						continue
-					}
-
-					ref := ref.DeepCopy()
-					ref.From = &corev1.ObjectReference{Kind: "DockerImage", Name: source}
-					is.Spec.Tags = append(is.Spec.Tags, *ref)
-					covered.Insert(ref.Name)
-
-				case len(from.Tag) > 0:
-					tag := findStatusTagEvents(inputIS.Status.Tags, ref.Name)
-					if tag == nil {
-						continue
-					}
-					if len(tag.Items) == 0 {
-						for _, condition := range tag.Conditions {
-							if condition.Type == imageapi.ImportSuccess && condition.Status != metav1.StatusSuccess {
-								return fmt.Errorf("the tag %q in the source input stream has not been imported yet", tag.Tag)
-							}
-						}
-						continue
-					}
-					if ref.Generation != nil && *ref.Generation != tag.Items[0].Generation {
-						return fmt.Errorf("the tag %q in the source input stream has not been imported yet", tag.Tag)
-					}
-					if len(tag.Items[0].Image) == 0 {
-						return fmt.Errorf("the tag %q in the source input stream has no image id", tag.Tag)
-					}
-
-					source := externalFn(tag.Items[0].DockerImageReference, tag.Items[0].Image)
-					ref := ref.DeepCopy()
-					ref.From = &corev1.ObjectReference{Kind: "DockerImage", Name: source}
-					is.Spec.Tags = append(is.Spec.Tags, *ref)
-					covered.Insert(ref.Name)
-				}
-				continue
-			}
-			// TODO: support ImageStreamTag and ImageStreamImage
-		}
-
-		for _, tag := range inputIS.Status.Tags {
-			if covered.Has(tag.Tag) {
-				continue
-			}
-			if exclude.Has(tag.Tag) {
-				klog.V(2).Infof("Excluded status tag %s", tag.Tag)
-				continue
-			}
-
-			// error if we haven't imported anything to this tag, or skip otherwise
-			if len(tag.Items) == 0 {
-				for _, condition := range tag.Conditions {
-					if condition.Type == imageapi.ImportSuccess && condition.Status != metav1.StatusSuccess {
-						return fmt.Errorf("the tag %q in the source input stream has not been imported yet", tag.Tag)
-					}
-				}
-				continue
-			}
-			// skip rather than error (user created a reference spec tag, then deleted it)
-			if len(tag.Items[0].Image) == 0 {
-				klog.V(2).Infof("the tag %q in the source input stream has no image id", tag.Tag)
-				continue
-			}
-
-			// attempt to identify the source image
-			source := externalFn(tag.Items[0].DockerImageReference, tag.Items[0].Image)
-			if len(source) == 0 {
-				klog.V(2).Infof("Can't use tag %q because we cannot locate or calculate a source location", tag.Tag)
-				continue
-			}
-			sourceRef, err := imagereference.Parse(source)
-			if err != nil {
-				return fmt.Errorf("the tag %q points to source %q which is not valid", tag.Tag, source)
-			}
-			sourceRef.Tag = ""
-			sourceRef.ID = tag.Items[0].Image
-			source = sourceRef.Exact()
-
-			ref := &imageapi.TagReference{Name: tag.Tag}
-			ref.From = &corev1.ObjectReference{Kind: "DockerImage", Name: source}
-			is.Spec.Tags = append(is.Spec.Tags, *ref)
-		}
-		return nil
-	default:
-		return fmt.Errorf("supported reference modes are: \"public\" (default) and \"source\"")
-	}
-}
-
-func (o *NewOptions) extractManifests(is *imageapi.ImageStream, name string, metadata map[string]imageData) error {
-	if len(is.Spec.Tags) == 0 {
-		return fmt.Errorf("no component images defined, unable to build a release payload")
-	}
-
-	klog.V(4).Infof("Extracting manifests for release from input images")
-
-	dir := o.Directory
-	if len(dir) == 0 {
-		var err error
-		dir, err = ioutil.TempDir("", fmt.Sprintf("release-image-%s", name))
-		if err != nil {
-			return err
-		}
-		o.cleanupFns = append(o.cleanupFns, func() { os.RemoveAll(dir) })
-		klog.V(2).Infof("Manifests will be extracted to %s\n", dir)
-	}
-
-	verifier := imagemanifest.NewVerifier()
-	var lock sync.Mutex
-	opts := extract.NewOptions(genericclioptions.IOStreams{Out: o.Out, ErrOut: o.ErrOut})
-	opts.SecurityOptions = o.SecurityOptions
-	opts.OnlyFiles = true
-	opts.ParallelOptions = o.ParallelOptions
-	opts.ImageMetadataCallback = func(m *extract.Mapping, dgst, contentDigest digest.Digest, config *dockerv1client.DockerImageConfig) {
-		verifier.Verify(dgst, contentDigest)
-
-		lock.Lock()
-		defer lock.Unlock()
-		metadata[m.Name] = imageData{
-			Directory:     m.To,
-			Ref:           m.ImageRef,
-			Config:        config,
-			Digest:        dgst,
-			ContentDigest: contentDigest,
-		}
-	}
-
-	for i := range is.Spec.Tags {
-		tag := &is.Spec.Tags[i]
-		dstDir := filepath.Join(dir, tag.Name)
-		if tag.From.Kind != "DockerImage" {
-			continue
-		}
-		src := tag.From.Name
-		ref, err := imagereference.Parse(src)
-		if err != nil {
-			return err
-		}
-
-		// when the user provides an override, look at all layers for manifests
-		// in case the user did a layered build and overrode only one. This is
-		// an unsupported release configuration
-		var custom bool
-		filter := extract.NewPositionLayerFilter(-1)
-		if tag.Annotations[annotationReleaseOverride] == "true" {
-			custom = true
-			filter = nil
-		}
-
-		opts.Mappings = append(opts.Mappings, extract.Mapping{
-			Name:     tag.Name,
-			ImageRef: ref,
-
-			From: "manifests/",
-			To:   dstDir,
-
-			LayerFilter: filter,
-
-			ConditionFn: func(m *extract.Mapping, dgst digest.Digest, imageConfig *dockerv1client.DockerImageConfig) (bool, error) {
-				var labels map[string]string
-				if imageConfig.Config != nil {
-					labels = imageConfig.Config.Labels
-				}
-				if tag.Annotations == nil {
-					tag.Annotations = make(map[string]string)
-				}
-				tag.Annotations[annotationBuildSourceCommit] = labels[annotationBuildSourceCommit]
-				tag.Annotations[annotationBuildSourceRef] = labels[annotationBuildSourceRef]
-				tag.Annotations[annotationBuildSourceLocation] = labels[annotationBuildSourceLocation]
-
-				if versions := labels[annotationBuildVersions]; len(versions) > 0 {
-					components, err := parseComponentVersionsLabel(versions)
-					if err != nil {
-						return false, fmt.Errorf("tag %q has an invalid %s label: %v", tag.Name, annotationBuildVersions, err)
-					}
-					// TODO: eventually this can be relaxed
-					for component := range components {
-						if !stringArrContains(o.AllowedComponents, component) {
-							return false, fmt.Errorf("tag %q references a component version %q which is not in the allowed list", tag.Name, component)
-						}
-					}
-					tag.Annotations[annotationBuildVersions] = versions
-				}
-
-				if len(labels[annotationReleaseOperator]) == 0 {
-					klog.V(2).Infof("Image %s has no %s label, skipping", m.ImageRef, annotationReleaseOperator)
-					return false, nil
-				}
-				if err := os.MkdirAll(dstDir, 0777); err != nil {
-					return false, err
-				}
-				if custom {
-					fmt.Fprintf(o.ErrOut, "info: Loading override %s %s\n", m.ImageRef.Exact(), tag.Name)
-				} else {
-					fmt.Fprintf(o.ErrOut, "info: Loading %s %s\n", m.ImageRef.ID, tag.Name)
-				}
-				return true, nil
-			},
-		})
-	}
-	klog.V(4).Infof("Manifests will be extracted from:\n%#v", opts.Mappings)
-	if err := opts.Run(); err != nil {
-		return err
-	}
-
-	if !verifier.Verified() {
-		err := fmt.Errorf("one or more input images failed content verification and may have been tampered with")
-		if !o.SecurityOptions.SkipVerification {
-			return err
-		}
-		fmt.Fprintf(o.ErrOut, "warning: %v\n", err)
-	}
-
-	if len(is.Spec.Tags) > 0 {
-		if err := os.MkdirAll(dir, 0777); err != nil {
-			return err
-		}
-		data, err := json.MarshalIndent(is, "", "  ")
-		if err != nil {
-			return err
-		}
-		if err := ioutil.WriteFile(filepath.Join(dir, "image-references"), data, 0644); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-func (o *NewOptions) mirrorImages(is *imageapi.ImageStream) error {
-	klog.V(4).Infof("Mirroring release contents to %s", o.Mirror)
-	copied := is.DeepCopy()
-	opts := NewMirrorOptions(genericclioptions.IOStreams{Out: o.Out, ErrOut: o.ErrOut})
-	opts.DryRun = o.DryRun
-	opts.ImageStream = copied
-	opts.To = o.Mirror
-	opts.SkipRelease = true
-	opts.SecurityOptions = o.SecurityOptions
-
-	if err := opts.Run(); err != nil {
-		return err
-	}
-
-	targetFn, err := ComponentReferencesForImageStream(copied)
-	if err != nil {
-		return err
-	}
-
-	replacements, err := ReplacementsForImageStream(is, false, targetFn)
-	if err != nil {
-		return err
-	}
-	for i := range is.Spec.Tags {
-		tag := &is.Spec.Tags[i]
-		if tag.From == nil || tag.From.Kind != "DockerImage" {
-			continue
-		}
-		if value, ok := replacements[tag.From.Name]; ok {
-			tag.From.Name = value
-		}
-	}
-	if klog.V(4) {
-		data, _ := json.MarshalIndent(is, "", "  ")
-		klog.Infof("Image references updated to:\n%s", string(data))
-	}
-
-	return nil
-}
-
-func (o *NewOptions) write(r io.Reader, is *imageapi.ImageStream, now time.Time) error {
-	var exitErr error
-	switch {
-	case len(o.ToDir) > 0:
-		klog.V(4).Infof("Writing release contents to directory %s", o.ToDir)
-		if err := os.MkdirAll(o.ToDir, 0777); err != nil {
-			return err
-		}
-		r, err := archive.DecompressStream(r)
-		if err != nil {
-			return err
-		}
-		tr := tar.NewReader(r)
-		for {
-			hdr, err := tr.Next()
-			if err == io.EOF {
-				break
-			}
-			if err != nil {
-				return err
-			}
-			if !strings.HasPrefix(hdr.Name, "release-manifests/") || hdr.Typeflag&tar.TypeReg != tar.TypeReg {
-				continue
-			}
-			name := strings.TrimPrefix(hdr.Name, "release-manifests/")
-			if strings.Count(name, "/") > 0 || name == "." || name == ".." || len(name) == 0 {
-				continue
-			}
-			itemPath := filepath.Join(o.ToDir, name)
-			f, err := os.OpenFile(itemPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644)
-			if err != nil {
-				return err
-			}
-			if _, err := io.Copy(f, tr); err != nil {
-				f.Close()
-				return err
-			}
-			if err := f.Close(); err != nil {
-				return err
-			}
-			if err := os.Chtimes(itemPath, hdr.ModTime, hdr.ModTime); err != nil {
-				klog.V(2).Infof("Unable to update extracted file time: %v", err)
-			}
-		}
-	case len(o.ToFile) > 0:
-		klog.V(4).Infof("Writing release contents to file %s", o.ToFile)
-		var w io.WriteCloser
-		if o.ToFile == "-" {
-			w = nopCloser{o.Out}
-		} else {
-			f, err := os.OpenFile(o.ToFile, os.O_CREATE|os.O_TRUNC|os.O_APPEND|os.O_WRONLY, 0750)
-			if err != nil {
-				return err
-			}
-			w = f
-		}
-		if _, err := io.Copy(w, r); err != nil {
-			w.Close()
-			return err
-		}
-		if err := w.Close(); err != nil {
-			return err
-		}
-		if o.ToFile != "-" {
-			if err := os.Chtimes(o.ToFile, is.CreationTimestamp.Time, is.CreationTimestamp.Time); err != nil {
-				klog.V(2).Infof("Unable to set timestamps on output file: %v", err)
-			}
-		}
-	default:
-		if len(o.ToImage) == 0 {
-			o.DryRun = true
-			o.ToImage = "release:latest"
-		}
-		klog.V(4).Infof("Writing release contents to image %s", o.ToImage)
-		toRef, err := imagereference.Parse(o.ToImage)
-		if err != nil {
-			return fmt.Errorf("--to-image was not valid: %v", err)
-		}
-		if len(toRef.ID) > 0 {
-			return fmt.Errorf("--to-image may only point to a repository or tag, not a digest")
-		}
-		if len(toRef.Tag) == 0 {
-			toRef.Tag = o.Name
-		}
-		toImageBase := o.ToImageBase
-		if len(toImageBase) == 0 && len(o.ToImageBaseTag) > 0 {
-			for _, tag := range is.Spec.Tags {
-				if tag.From != nil && tag.From.Kind == "DockerImage" && tag.Name == o.ToImageBaseTag {
-					toImageBase = tag.From.Name
-				}
-			}
-			if len(toImageBase) == 0 {
-				return fmt.Errorf("--to-image-base-tag did not point to a tag in the input")
-			}
-		}
-
-		verifier := imagemanifest.NewVerifier()
-		options := imageappend.NewAppendImageOptions(genericclioptions.IOStreams{Out: ioutil.Discard, ErrOut: o.ErrOut})
-		options.SecurityOptions = o.SecurityOptions
-		options.DryRun = o.DryRun
-		options.From = toImageBase
-		options.ConfigurationCallback = func(dgst, contentDigest digest.Digest, config *dockerv1client.DockerImageConfig) error {
-			verifier.Verify(dgst, contentDigest)
-			// reset any base image info
-			if len(config.OS) == 0 {
-				config.OS = "linux"
-			}
-			if len(config.Architecture) == 0 {
-				config.Architecture = "amd64"
-			}
-			config.Container = ""
-			config.Parent = ""
-			config.Created = now
-			config.ContainerConfig = docker10.DockerConfig{}
-			config.Config.Labels = make(map[string]string)
-
-			// explicitly set release info
-			config.Config.Labels["io.openshift.release"] = is.Name
-			config.History = []dockerv1client.DockerConfigHistory{
-				{Comment: "Release image for OpenShift", Created: now},
-			}
-			if len(dgst) > 0 {
-				config.Config.Labels[annotationReleaseBaseImageDigest] = dgst.String()
-			}
-			return nil
-		}
-
-		options.LayerStream = r
-		options.To = toRef.Exact()
-		if err := options.Run(); err != nil {
-			return err
-		}
-		if !verifier.Verified() {
-			err := fmt.Errorf("the base image failed content verification and may have been tampered with")
-			if !o.SecurityOptions.SkipVerification {
-				return err
-			}
-			fmt.Fprintf(o.ErrOut, "warning: %v\n", err)
-		}
-		if !o.DryRun {
-			fmt.Fprintf(o.ErrOut, "info: Pushed to %s\n", o.ToImage)
-		}
-
-		if o.VerifyOutputFn != nil {
-			if err := o.VerifyOutputFn(options.ToDigest); err != nil {
-				if o.DryRun {
-					return err
-				}
-				exitErr = err
-			}
-		}
-
-		toRefWithDigest := toRef
-		toRefWithDigest.Tag = ""
-		toRefWithDigest.ID = options.ToDigest.String()
-		msg, err := createReleaseSignatureMessage(fmt.Sprintf("oc-adm-release-new/%s", version.Get().GitCommit), now, options.ToDigest.String(), toRefWithDigest.Exact())
-		if err != nil {
-			return err
-		}
-		if len(o.ToSignature) > 0 {
-			if err := ioutil.WriteFile(o.ToSignature, msg, 0644); err != nil {
-				return fmt.Errorf("unable to write signature file: %v", err)
-			}
-		} else {
-			klog.V(2).Infof("Signature for output:\n%s", string(msg))
-		}
-
-		fmt.Fprintf(o.Out, "%s %s %s\n", options.ToDigest.String(), is.Name, is.CreationTimestamp.Format(time.RFC3339))
-	}
-	return exitErr
-}
-
-func toJSONString(obj interface{}) string {
-	data, err := json.Marshal(obj)
-	if err != nil {
-		panic(err)
-	}
-	return string(data)
-}
-
-type nopCloser struct {
-	io.Writer
-}
-
-func (_ nopCloser) Close() error { return nil }
-
-// writeNestedTarHeader writes a series of nested tar headers, starting with parts[0] and joining each
-// successive part, but only if the path does not exist already.
-func writeNestedTarHeader(tw *tar.Writer, parts []string, existing map[string]struct{}, hdr tar.Header) error {
-	for i := range parts {
-		componentDir := path.Join(parts[:i+1]...)
-		if _, ok := existing[componentDir]; ok {
-			continue
-		}
-		existing[componentDir] = struct{}{}
-		hdr.Name = componentDir
-		if err := tw.WriteHeader(&hdr); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-func writePayload(w io.Writer, is *imageapi.ImageStream, cm *CincinnatiMetadata, ordered []string, metadata map[string]imageData, allowMissingImages bool, verifiers []PayloadVerifier) ([]string, error) {
-	var operators []string
-	directories := make(map[string]struct{})
-	files := make(map[string]int)
-
-	parts := []string{"release-manifests"}
-
-	// find the newest content date in the input
-	var newest time.Time
-	if err := iterateExtractedManifests(ordered, metadata, func(contents []os.FileInfo, name string, image imageData) error {
-		for _, fi := range contents {
-			if fi.IsDir() {
-				continue
-			}
-			if fi.ModTime().After(newest) {
-				newest = fi.ModTime()
-			}
-		}
-		return nil
-	}); err != nil {
-		return nil, err
-	}
-	newest = newest.UTC().Truncate(time.Second)
-	klog.V(4).Infof("Most recent content has date %s", newest.Format(time.RFC3339))
-
-	gw := gzip.NewWriter(w)
-	tw := tar.NewWriter(gw)
-
-	// ensure the directory exists in the tar bundle
-	if err := writeNestedTarHeader(tw, parts, directories, tar.Header{Mode: 0777, ModTime: newest, Typeflag: tar.TypeDir}); err != nil {
-		return nil, err
-	}
-
-	// write image metadata to release-manifests/image-references
-	data, err := json.MarshalIndent(is, "", "  ")
-	if err != nil {
-		return nil, err
-	}
-	if err := tw.WriteHeader(&tar.Header{Mode: 0444, ModTime: newest, Typeflag: tar.TypeReg, Name: path.Join(append(append([]string{}, parts...), "image-references")...), Size: int64(len(data))}); err != nil {
-		return nil, err
-	}
-	if _, err := tw.Write(data); err != nil {
-		return nil, err
-	}
-
-	// write cincinnati metadata to release-manifests/release-metadata
-	if cm != nil {
-		data, err := json.MarshalIndent(cm, "", "  ")
-		if err != nil {
-			return nil, err
-		}
-		if err := tw.WriteHeader(&tar.Header{Mode: 0444, ModTime: newest, Typeflag: tar.TypeReg, Name: path.Join(append(append([]string{}, parts...), "release-metadata")...), Size: int64(len(data))}); err != nil {
-			return nil, err
-		}
-		if _, err := tw.Write(data); err != nil {
-			return nil, err
-		}
-	}
-
-	// read each directory, processing the manifests in order and updating the contents into the tar output
-	if err := iterateExtractedManifests(ordered, metadata, func(contents []os.FileInfo, name string, image imageData) error {
-		transform := NopManifestMapper
-
-		if fi := takeFileByName(&contents, "image-references"); fi != nil {
-			path := filepath.Join(image.Directory, fi.Name())
-			klog.V(2).Infof("Perform image replacement based on inclusion of %s", path)
-			transform, err = NewTransformFromImageStreamFile(path, is, allowMissingImages)
-			if err != nil {
-				return fmt.Errorf("operator %q contained an invalid image-references file: %s", name, err)
-			}
-		}
-
-		for _, fi := range contents {
-			if fi.IsDir() {
-				continue
-			}
-			filename := fi.Name()
-
-			// components that don't declare that they need to be part of the global order
-			// get put in a scoped bucket at the end. Only a few components should need to
-			// be in the global order.
-			if !strings.HasPrefix(filename, "0000_") {
-				filename = fmt.Sprintf("0000_50_%s_%s", name, filename)
-			}
-			if count, ok := files[filename]; ok {
-				ext := path.Ext(path.Base(filename))
-				files[filename] = count + 1
-				filename = fmt.Sprintf("%s_%d%s", strings.TrimSuffix(filename, ext), count+1, ext)
-				files[filename] = 1
-			} else {
-				files[filename] = 1
-			}
-			src := filepath.Join(image.Directory, fi.Name())
-			dst := path.Join(append(append([]string{}, parts...), filename)...)
-			klog.V(4).Infof("Copying %s to %s", src, dst)
-
-			data, err := ioutil.ReadFile(src)
-			if err != nil {
-				return err
-			}
-
-			for _, fn := range verifiers {
-				if err := fn(filepath.Join(filepath.Base(image.Directory), fi.Name()), data); err != nil {
-					return err
-				}
-			}
-
-			modified, err := transform(data)
-			if err != nil {
-				return err
-			}
-			if err := tw.WriteHeader(&tar.Header{Mode: 0444, ModTime: fi.ModTime(), Typeflag: tar.TypeReg, Name: dst, Size: int64(len(modified))}); err != nil {
-				return err
-			}
-			klog.V(6).Infof("Writing payload to %s\n%s", dst, string(modified))
-			if _, err := tw.Write(modified); err != nil {
-				return err
-			}
-		}
-		operators = append(operators, name)
-		return nil
-	}); err != nil {
-		return nil, err
-	}
-
-	if err := tw.Close(); err != nil {
-		return nil, err
-	}
-	if err := gw.Close(); err != nil {
-		return nil, err
-	}
-	return operators, nil
-}
-
-func iterateExtractedManifests(ordered []string, metadata map[string]imageData, fn func(contents []os.FileInfo, name string, image imageData) error) error {
-	for _, name := range ordered {
-		image, ok := metadata[name]
-		if !ok {
-			return fmt.Errorf("missing image data %s", name)
-		}
-
-		// process each manifest in the given directory
-		contents, err := ioutil.ReadDir(image.Directory)
-		if err != nil {
-			return err
-		}
-		if len(contents) == 0 {
-			continue
-		}
-
-		if err := fn(contents, name, image); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-func hasTag(tags []imageapi.TagReference, tag string) *imageapi.TagReference {
-	for i := range tags {
-		if tag == tags[i].Name {
-			return &tags[i]
-		}
-	}
-	return nil
-}
-
-func pruneEmptyDirectories(dir string) error {
-	return filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
-		if err != nil {
-			return err
-		}
-		if !info.IsDir() {
-			return nil
-		}
-		names, err := ioutil.ReadDir(path)
-		if err != nil {
-			return err
-		}
-		if len(names) > 0 {
-			return nil
-		}
-		klog.V(4).Infof("Component %s does not have any manifests", path)
-		return os.Remove(path)
-	})
-}
-
-type Mapping struct {
-	Source      string
-	Destination string
-}
-
-func parseArgs(args []string, overlap map[string]string) ([]Mapping, error) {
-	var mappings []Mapping
-	for _, s := range args {
-		parts := strings.SplitN(s, "=", 2)
-		if len(parts) != 2 {
-			return nil, fmt.Errorf("all arguments must be valid SRC=DST mappings")
-		}
-		if len(parts[0]) == 0 || len(parts[1]) == 0 {
-			return nil, fmt.Errorf("all arguments must be valid SRC=DST mappings")
-		}
-		src := parts[0]
-		dst := parts[1]
-		if _, ok := overlap[src]; ok {
-			return nil, fmt.Errorf("each source tag may only be specified once: %s", dst)
-		}
-		overlap[dst] = src
-
-		mappings = append(mappings, Mapping{Source: src, Destination: dst})
-	}
-	return mappings, nil
-}
-
-func parseFile(filename string, overlap map[string]string) ([]Mapping, error) {
-	var fileMappings []Mapping
-	f, err := os.Open(filename)
-	if err != nil {
-		return nil, err
-	}
-	defer f.Close()
-	s := bufio.NewScanner(f)
-	lineNumber := 0
-	for s.Scan() {
-		line := s.Text()
-		lineNumber++
-
-		// remove comments and whitespace
-		if i := strings.Index(line, "#"); i != -1 {
-			line = line[0:i]
-		}
-		line = strings.TrimSpace(line)
-		if len(line) == 0 {
-			continue
-		}
-
-		args := strings.Split(line, " ")
-		mappings, err := parseArgs(args, overlap)
-		if err != nil {
-			return nil, fmt.Errorf("file %s, line %d: %v", filename, lineNumber, err)
-		}
-		fileMappings = append(fileMappings, mappings...)
-	}
-	if err := s.Err(); err != nil {
-		return nil, err
-	}
-	return fileMappings, nil
-}
-
-func takeFileByName(files *[]os.FileInfo, name string) os.FileInfo {
-	for i, fi := range *files {
-		if fi.IsDir() || fi.Name() != name {
-			continue
-		}
-		*files = append((*files)[:i], (*files)[i+1:]...)
-		return fi
-	}
-	return nil
-}
-
-type PayloadVerifier func(filename string, data []byte) error
-
-func pruneUnreferencedImageStreams(out io.Writer, is *imageapi.ImageStream, metadata map[string]imageData, include []string) error {
-	referenced := make(map[string]struct{})
-	for _, v := range metadata {
-		is, err := parseImageStream(filepath.Join(v.Directory, "image-references"))
-		if os.IsNotExist(err) {
-			continue
-		}
-		if err != nil {
-			return err
-		}
-		for _, tag := range is.Spec.Tags {
-			referenced[tag.Name] = struct{}{}
-		}
-	}
-	for _, name := range include {
-		referenced[name] = struct{}{}
-	}
-	var updated []imageapi.TagReference
-	for _, tag := range is.Spec.Tags {
-		_, ok := referenced[tag.Name]
-		if !ok {
-			klog.V(3).Infof("Excluding tag %s which is not referenced by an operator", tag.Name)
-			continue
-		}
-		updated = append(updated, tag)
-	}
-	if len(updated) != len(is.Spec.Tags) {
-		fmt.Fprintf(out, "info: Included %d images from %d input operators into the release\n", len(updated), len(metadata))
-		is.Spec.Tags = updated
-	}
-	return nil
-}
-
-func filenameContents(s string, in io.Reader) ([]byte, error) {
-	switch {
-	case s == "-":
-		return ioutil.ReadAll(in)
-	case strings.Index(s, "http://") == 0 || strings.Index(s, "https://") == 0:
-		resp, err := http.Get(s)
-		if err != nil {
-			return nil, err
-		}
-		defer resp.Body.Close()
-		switch {
-		case resp.StatusCode >= 200 && resp.StatusCode < 300:
-			return ioutil.ReadAll(resp.Body)
-		default:
-			return nil, fmt.Errorf("unable to load URL: server returned %d: %s", resp.StatusCode, resp.Status)
-		}
-	default:
-		return ioutil.ReadFile(s)
-	}
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/release/release.go b/vendor/github.com/openshift/oc/pkg/cli/admin/release/release.go
deleted file mode 100644
index 65e59e01b96e..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/release/release.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package release
-
-import (
-	"github.com/spf13/cobra"
-
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-)
-
-func NewCmd(f kcmdutil.Factory, parentName string, streams genericclioptions.IOStreams) *cobra.Command {
-	cmd := &cobra.Command{
-		Use:   "release",
-		Short: "Tools for managing the OpenShift release process",
-		Long: templates.LongDesc(`
-			This tool is used by OpenShift release to build images that can update a cluster.
-
-			The subcommands allow you to see information about releases, perform administrative
-			actions inspect the content of the release, and mirror release content across image
-			registries.
-			`),
-		RunE: func(cmd *cobra.Command, args []string) error {
-			return cmd.Help()
-		},
-	}
-	cmd.AddCommand(NewInfo(f, parentName+" release", streams))
-	cmd.AddCommand(NewRelease(f, parentName+" release", streams))
-	cmd.AddCommand(NewExtract(f, parentName+" release", streams))
-	cmd.AddCommand(NewMirror(f, parentName+" release", streams))
-	return cmd
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/release/signature.go b/vendor/github.com/openshift/oc/pkg/cli/admin/release/signature.go
deleted file mode 100644
index d26f52fd5384..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/release/signature.go
+++ /dev/null
@@ -1,72 +0,0 @@
-package release
-
-import (
-	"encoding/json"
-	"fmt"
-	"time"
-)
-
-// createReleaseSignatureMessage creates the core message to sign the release payload.
-func createReleaseSignatureMessage(signer string, now time.Time, releaseDigest, pullSpec string) ([]byte, error) {
-	if len(signer) == 0 || now.IsZero() || len(releaseDigest) == 0 || len(pullSpec) == 0 {
-		return nil, fmt.Errorf("you must specify a signer, current timestamp, release digest, and pull spec to sign")
-	}
-
-	sig := &signature{
-		Critical: criticalSignature{
-			Type: "atomic container signature",
-			Image: criticalImage{
-				DockerManifestDigest: releaseDigest,
-			},
-			Identity: criticalIdentity{
-				DockerReference: pullSpec,
-			},
-		},
-		Optional: optionalSignature{
-			Creator:   signer,
-			Timestamp: now.Unix(),
-		},
-	}
-	return json.MarshalIndent(sig, "", "  ")
-}
-
-// An atomic container signature has the following schema:
-//
-// {
-// 	"critical": {
-// 			"type": "atomic container signature",
-// 			"image": {
-// 					"docker-manifest-digest": "sha256:817a12c32a39bbe394944ba49de563e085f1d3c5266eb8e9723256bc4448680e"
-// 			},
-// 			"identity": {
-// 					"docker-reference": "docker.io/library/busybox:latest"
-// 			}
-// 	},
-// 	"optional": {
-// 			"creator": "some software package v1.0.1-35",
-// 			"timestamp": 1483228800,
-// 	}
-// }
-type signature struct {
-	Critical criticalSignature `json:"critical"`
-	Optional optionalSignature `json:"optional"`
-}
-
-type criticalSignature struct {
-	Type     string           `json:"type"`
-	Image    criticalImage    `json:"image"`
-	Identity criticalIdentity `json:"identity"`
-}
-
-type criticalImage struct {
-	DockerManifestDigest string `json:"docker-manifest-digest"`
-}
-
-type criticalIdentity struct {
-	DockerReference string `json:"docker-reference"`
-}
-
-type optionalSignature struct {
-	Creator   string `json:"creator"`
-	Timestamp int64  `json:"timestamp"`
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/release/test/main.go b/vendor/github.com/openshift/oc/pkg/cli/admin/release/test/main.go
deleted file mode 100644
index ef92b8dbbd06..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/release/test/main.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package main
-
-import (
-	"fmt"
-	"io/ioutil"
-	"log"
-	"os"
-	"regexp"
-)
-
-func main() {
-	re := regexp.MustCompile(`([\W])(quay\.io/coreos[/\w\-]*)(\:[a-zA-Z\d][a-zA-Z\d\-_]*[a-zA-Z\d]|@\w+:\w+)?`)
-	data, err := ioutil.ReadFile(os.Args[1])
-	if err != nil {
-		log.Fatal(err)
-	}
-	out := re.ReplaceAllFunc(data, func(data []byte) []byte {
-		fmt.Fprintf(os.Stderr, "found: %s\n", string(data))
-		return data
-	})
-	fmt.Println(string(out))
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/top/graph.go b/vendor/github.com/openshift/oc/pkg/cli/admin/top/graph.go
deleted file mode 100644
index 14ba13c2f13c..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/top/graph.go
+++ /dev/null
@@ -1,155 +0,0 @@
-package top
-
-import (
-	gonum "github.com/gonum/graph"
-	"k8s.io/klog"
-
-	corev1 "k8s.io/api/core/v1"
-
-	imagev1 "github.com/openshift/api/image/v1"
-	"github.com/openshift/library-go/pkg/image/reference"
-	"github.com/openshift/oc/pkg/helpers/graph/genericgraph"
-	imagegraph "github.com/openshift/oc/pkg/helpers/graph/imagegraph/nodes"
-	kubegraph "github.com/openshift/oc/pkg/helpers/graph/kubegraph/nodes"
-	"github.com/openshift/oc/pkg/helpers/image/dockerlayer"
-)
-
-const (
-	ImageLayerEdgeKind               = "ImageLayer"
-	ImageTopLayerEdgeKind            = "ImageTopLayer"
-	ImageStreamImageEdgeKind         = "ImageStreamImage"
-	HistoricImageStreamImageEdgeKind = "HistoricImageStreamImage"
-	PodImageEdgeKind                 = "PodImage"
-	ParentImageEdgeKind              = "ParentImage"
-)
-
-func getImageNodes(nodes []gonum.Node) []*imagegraph.ImageNode {
-	ret := []*imagegraph.ImageNode{}
-	for i := range nodes {
-		if node, ok := nodes[i].(*imagegraph.ImageNode); ok {
-			ret = append(ret, node)
-		}
-	}
-	return ret
-}
-
-func addImagesToGraph(g genericgraph.Graph, images *imagev1.ImageList) {
-	for i := range images.Items {
-		image := &images.Items[i]
-
-		klog.V(4).Infof("Adding image %q to graph", image.Name)
-		imageNode := imagegraph.EnsureImageNode(g, image)
-
-		topLayerAdded := false
-		// We're looking through layers in reversed order since we need to
-		// find first layer (from top) which is not an empty layer, we're omitting
-		// empty layers because every image has those and they're giving us
-		// false positives about parents. This applies only to schema v1 images
-		// schema v2 does not have that problem.
-		for i := len(image.DockerImageLayers) - 1; i >= 0; i-- {
-			layer := image.DockerImageLayers[i]
-			layerNode := imagegraph.EnsureImageComponentLayerNode(g, layer.Name)
-			edgeKind := ImageLayerEdgeKind
-			if !topLayerAdded && layer.Name != dockerlayer.DigestSha256EmptyTar && layer.Name != dockerlayer.GzippedEmptyLayerDigest {
-				edgeKind = ImageTopLayerEdgeKind
-				topLayerAdded = true
-			}
-			g.AddEdge(imageNode, layerNode, edgeKind)
-			klog.V(4).Infof("Adding image layer %q to graph (%q)", layer.Name, edgeKind)
-		}
-	}
-}
-
-func addImageStreamsToGraph(g genericgraph.Graph, streams *imagev1.ImageStreamList) {
-	for i := range streams.Items {
-		stream := &streams.Items[i]
-		klog.V(4).Infof("Adding ImageStream %s/%s to graph", stream.Namespace, stream.Name)
-		isNode := imagegraph.EnsureImageStreamNode(g, stream)
-		imageStreamNode := isNode.(*imagegraph.ImageStreamNode)
-
-		// connect IS with underlying images
-		for tag, history := range stream.Status.Tags {
-			for i := range history.Items {
-				image := history.Items[i]
-				imageNode := imagegraph.FindImage(g, image.Image)
-				if imageNode == nil {
-					klog.V(2).Infof("Unable to find image %q in graph (from tag=%q, dockerImageReference=%s)",
-						history.Items[i].Image, tag, image.DockerImageReference)
-					continue
-				}
-				klog.V(4).Infof("Adding edge from %q to %q", imageStreamNode.UniqueName(), imageNode.UniqueName())
-				edgeKind := ImageStreamImageEdgeKind
-				if i > 0 {
-					edgeKind = HistoricImageStreamImageEdgeKind
-				}
-				g.AddEdge(imageStreamNode, imageNode, edgeKind)
-			}
-		}
-	}
-}
-
-func addPodsToGraph(g genericgraph.Graph, pods *corev1.PodList) {
-	for i := range pods.Items {
-		pod := &pods.Items[i]
-		if pod.Status.Phase != corev1.PodRunning && pod.Status.Phase != corev1.PodPending {
-			klog.V(4).Infof("Pod %s/%s is not running nor pending - skipping", pod.Namespace, pod.Name)
-			continue
-		}
-
-		klog.V(4).Infof("Adding pod %s/%s to graph", pod.Namespace, pod.Name)
-		podNode := kubegraph.EnsurePodNode(g, pod)
-		addPodSpecToGraph(g, &pod.Spec, podNode)
-	}
-}
-
-func addPodSpecToGraph(g genericgraph.Graph, spec *corev1.PodSpec, predecessor gonum.Node) {
-	for j := range spec.Containers {
-		container := spec.Containers[j]
-
-		klog.V(4).Infof("Examining container image %q", container.Image)
-		ref, err := reference.Parse(container.Image)
-		if err != nil {
-			klog.V(2).Infof("Unable to parse DockerImageReference %q: %v - skipping", container.Image, err)
-			continue
-		}
-
-		if len(ref.ID) == 0 {
-			// ignore not managed images
-			continue
-		}
-
-		imageNode := imagegraph.FindImage(g, ref.ID)
-		if imageNode == nil {
-			klog.V(1).Infof("Unable to find image %q in the graph", ref.ID)
-			continue
-		}
-
-		klog.V(4).Infof("Adding edge from %v to %v", predecessor, imageNode)
-		g.AddEdge(predecessor, imageNode, PodImageEdgeKind)
-	}
-}
-
-func markParentsInGraph(g genericgraph.Graph) {
-	imageNodes := getImageNodes(g.Nodes())
-	for _, in := range imageNodes {
-		// find image's top layer, should be just one
-		for _, e := range g.OutboundEdges(in, ImageTopLayerEdgeKind) {
-			layerNode, _ := e.To().(*imagegraph.ImageComponentNode)
-			// find image's containing this layer but not being their top layer
-			for _, ed := range g.InboundEdges(layerNode, ImageLayerEdgeKind) {
-				childNode, _ := ed.From().(*imagegraph.ImageNode)
-				if in.ID() == childNode.ID() {
-					// don't add self edge, otherwise gonum/graph will panic
-					continue
-				}
-				g.AddEdge(in, childNode, ParentImageEdgeKind)
-			}
-			// TODO: Find image's containing THIS layer being their top layer,
-			// this happens when image contents is not being changed.
-
-			// TODO: If two layers have exactly the same contents the current
-			// mechanism might trip over that as well. We should check for
-			// a series of layers when checking for parents.
-		}
-	}
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/top/images.go b/vendor/github.com/openshift/oc/pkg/cli/admin/top/images.go
deleted file mode 100644
index d6ce762404d8..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/top/images.go
+++ /dev/null
@@ -1,297 +0,0 @@
-package top
-
-import (
-	"fmt"
-	"io"
-	"sort"
-	"strings"
-
-	"github.com/docker/go-units"
-	"github.com/spf13/cobra"
-
-	corev1 "k8s.io/api/core/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/util/sets"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-
-	appsv1 "github.com/openshift/api/apps/v1"
-	buildv1 "github.com/openshift/api/build/v1"
-	dockerv10 "github.com/openshift/api/image/docker10"
-	imagev1 "github.com/openshift/api/image/v1"
-	imagev1client "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1"
-	"github.com/openshift/library-go/pkg/image/imageutil"
-	"github.com/openshift/oc/pkg/helpers/graph/genericgraph"
-	imagegraph "github.com/openshift/oc/pkg/helpers/graph/imagegraph/nodes"
-	kubegraph "github.com/openshift/oc/pkg/helpers/graph/kubegraph/nodes"
-)
-
-const (
-	TopImagesRecommendedName = "images"
-	maxImageIDLength         = 20
-)
-
-var (
-	topImagesLong = templates.LongDesc(`
-		Show usage statistics for Images
-
-		This command analyzes all the Images managed by the platform and presents current
-		usage statistics.`)
-
-	topImagesExample = templates.Examples(`
-		# Show usage statistics for Images
-  	%[1]s %[2]s`)
-)
-
-type TopImagesOptions struct {
-	// internal values
-	Images  *imagev1.ImageList
-	Streams *imagev1.ImageStreamList
-	Pods    *corev1.PodList
-
-	genericclioptions.IOStreams
-}
-
-func NewTopImagesOptions(streams genericclioptions.IOStreams) *TopImagesOptions {
-	return &TopImagesOptions{
-		IOStreams: streams,
-	}
-}
-
-// NewCmdTopImages implements the OpenShift cli top images command.
-func NewCmdTopImages(f kcmdutil.Factory, parentName, name string, streams genericclioptions.IOStreams) *cobra.Command {
-	o := NewTopImagesOptions(streams)
-	cmd := &cobra.Command{
-		Use:     name,
-		Short:   "Show usage statistics for Images",
-		Long:    topImagesLong,
-		Example: fmt.Sprintf(topImagesExample, parentName, name),
-		Run: func(cmd *cobra.Command, args []string) {
-			kcmdutil.CheckErr(o.Complete(f, cmd, args))
-			kcmdutil.CheckErr(o.Validate(cmd))
-			kcmdutil.CheckErr(o.Run())
-		},
-	}
-
-	return cmd
-}
-
-// Complete turns a partially defined TopImagesOptions into a solvent structure
-// which can be validated and used for showing limits usage.
-func (o *TopImagesOptions) Complete(f kcmdutil.Factory, cmd *cobra.Command, args []string) error {
-	clientConfig, err := f.ToRESTConfig()
-	if err != nil {
-		return err
-	}
-	kClient, err := corev1client.NewForConfig(clientConfig)
-	if err != nil {
-		return err
-	}
-	imageClient, err := imagev1client.NewForConfig(clientConfig)
-	if err != nil {
-		return err
-	}
-
-	namespace := cmd.Flag("namespace").Value.String()
-	if len(namespace) == 0 {
-		namespace = metav1.NamespaceAll
-	}
-
-	allImages, err := imageClient.Images().List(metav1.ListOptions{})
-	if err != nil {
-		return err
-	}
-	o.Images = allImages
-
-	allStreams, err := imageClient.ImageStreams(namespace).List(metav1.ListOptions{})
-	if err != nil {
-		return err
-	}
-	o.Streams = allStreams
-
-	allPods, err := kClient.Pods(namespace).List(metav1.ListOptions{})
-	if err != nil {
-		return err
-	}
-	o.Pods = allPods
-
-	return nil
-}
-
-// Validate ensures that a TopImagesOptions is valid and can be used to execute command.
-func (o TopImagesOptions) Validate(cmd *cobra.Command) error {
-	return nil
-}
-
-// Run contains all the necessary functionality to show current image references.
-func (o TopImagesOptions) Run() error {
-	infos := o.imagesTop()
-	Print(o.Out, ImageColumns, infos)
-	return nil
-}
-
-var ImageColumns = []string{"NAME", "IMAGESTREAMTAG", "PARENTS", "USAGE", "METADATA", "STORAGE"}
-
-// imageInfo contains statistic information about Image usage.
-type imageInfo struct {
-	Image           string
-	ImageStreamTags []string
-	Parents         []string
-	Usage           []string
-	Metadata        bool
-	Storage         int64
-}
-
-var _ Info = &imageInfo{}
-
-func (i imageInfo) PrintLine(out io.Writer) {
-	printValue(out, i.Image)
-	printArray(out, i.ImageStreamTags)
-	shortParents := make([]string, len(i.Parents))
-	for i, p := range i.Parents {
-		if len(p) > maxImageIDLength {
-			shortParents[i] = p[:maxImageIDLength-3] + "..."
-		} else {
-			shortParents[i] = p
-		}
-	}
-	printArray(out, shortParents)
-	printArray(out, i.Usage)
-	printBool(out, i.Metadata)
-	printValue(out, units.BytesSize(float64(i.Storage)))
-}
-
-// imagesTop generates Image information from a graph and returns this as a list
-// of imageInfo array.
-func (o TopImagesOptions) imagesTop() []Info {
-	g := genericgraph.New()
-	addImagesToGraph(g, o.Images)
-	addImageStreamsToGraph(g, o.Streams)
-	addPodsToGraph(g, o.Pods)
-	markParentsInGraph(g)
-
-	infos := []Info{}
-	imageNodes := getImageNodes(g.Nodes())
-	for _, in := range imageNodes {
-		image := in.Image
-		istags := getImageStreamTags(g, in)
-		parents := getImageParents(g, in)
-		usage := getImageUsage(g, in)
-		metadata := len(image.DockerImageManifest) != 0 && len(image.DockerImageLayers) != 0
-		storage := getStorage(image)
-		infos = append(infos, imageInfo{
-			Image:           image.Name,
-			ImageStreamTags: istags,
-			Parents:         parents,
-			Usage:           usage,
-			Metadata:        metadata,
-			Storage:         storage,
-		})
-	}
-	sort.Slice(infos, func(i, j int) bool {
-		a, b := infos[i].(imageInfo), infos[j].(imageInfo)
-		if len(a.ImageStreamTags) < len(b.ImageStreamTags) {
-			return false
-		}
-		if len(a.ImageStreamTags) > len(b.ImageStreamTags) {
-			return true
-		}
-		return a.Storage > b.Storage
-	})
-
-	return infos
-}
-
-func getStorage(image *imagev1.Image) int64 {
-	storage := int64(0)
-	blobSet := sets.NewString()
-	for _, layer := range image.DockerImageLayers {
-		if blobSet.Has(layer.Name) {
-			continue
-		}
-		blobSet.Insert(layer.Name)
-		storage += layer.LayerSize
-	}
-	if err := imageutil.ImageWithMetadata(image); err != nil {
-		return storage
-	}
-	dockerImage, ok := image.DockerImageMetadata.Object.(*dockerv10.DockerImage)
-	if !ok {
-		return storage
-	}
-	if len(image.DockerImageConfig) > 0 && !blobSet.Has(dockerImage.ID) {
-		blobSet.Insert(dockerImage.ID)
-		storage += int64(len(image.DockerImageConfig))
-	}
-	return storage
-}
-
-func getImageStreamTags(g genericgraph.Graph, node *imagegraph.ImageNode) []string {
-	istags := []string{}
-	for _, e := range g.InboundEdges(node, ImageStreamImageEdgeKind) {
-		streamNode, ok := e.From().(*imagegraph.ImageStreamNode)
-		if !ok {
-			continue
-		}
-		stream := streamNode.ImageStream
-		tags := getTags(stream, node.Image)
-		istags = append(istags, fmt.Sprintf("%s/%s (%s)", stream.Namespace, stream.Name, strings.Join(tags, ",")))
-	}
-	return istags
-}
-
-func getTags(stream *imagev1.ImageStream, image *imagev1.Image) []string {
-	tags := []string{}
-	for _, tag := range stream.Status.Tags {
-		if len(tag.Items) > 0 && tag.Items[0].Image == image.Name {
-			tags = append(tags, tag.Tag)
-		}
-	}
-	imageutil.PrioritizeTags(tags)
-	return tags
-}
-
-func getImageParents(g genericgraph.Graph, node *imagegraph.ImageNode) []string {
-	parents := []string{}
-	for _, e := range g.InboundEdges(node, ParentImageEdgeKind) {
-		imageNode, ok := e.From().(*imagegraph.ImageNode)
-		if !ok {
-			continue
-		}
-		parents = append(parents, imageNode.Image.Name)
-	}
-	return parents
-}
-
-func getImageUsage(g genericgraph.Graph, node *imagegraph.ImageNode) []string {
-	usage := []string{}
-	for _, e := range g.InboundEdges(node, PodImageEdgeKind) {
-		podNode, ok := e.From().(*kubegraph.PodNode)
-		if !ok {
-			continue
-		}
-		usage = append(usage, getController(podNode.Pod))
-	}
-	return usage
-}
-
-func getController(pod *corev1.Pod) string {
-	controller := ""
-	if pod.Annotations == nil {
-		return controller
-	}
-
-	if bc, ok := pod.Annotations[buildv1.BuildAnnotation]; ok {
-		return fmt.Sprintf("Build: %s/%s", pod.Namespace, bc)
-	}
-	if dc, ok := pod.Annotations[appsv1.DeploymentAnnotation]; ok {
-		return fmt.Sprintf("Deployment: %s/%s", pod.Namespace, dc)
-	}
-	if dc, ok := pod.Annotations[appsv1.DeploymentPodAnnotation]; ok {
-		return fmt.Sprintf("Deployer: %s/%s", pod.Namespace, dc)
-	}
-
-	return controller
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/top/images_test.go b/vendor/github.com/openshift/oc/pkg/cli/admin/top/images_test.go
deleted file mode 100644
index 13f91c217ff3..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/top/images_test.go
+++ /dev/null
@@ -1,755 +0,0 @@
-package top
-
-import (
-	"testing"
-
-	corev1 "k8s.io/api/core/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/runtime"
-
-	appsv1 "github.com/openshift/api/apps/v1"
-	buildv1 "github.com/openshift/api/build/v1"
-	imagev1 "github.com/openshift/api/image/v1"
-	"github.com/openshift/oc/pkg/helpers/image/dockerlayer"
-)
-
-func TestImagesTop(t *testing.T) {
-	testCases := map[string]struct {
-		images   *imagev1.ImageList
-		streams  *imagev1.ImageStreamList
-		pods     *corev1.PodList
-		expected []Info
-	}{
-		"no metadata": {
-			images: &imagev1.ImageList{
-				Items: []imagev1.Image{
-					{ObjectMeta: metav1.ObjectMeta{Name: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
-				},
-			},
-			streams: &imagev1.ImageStreamList{
-				Items: []imagev1.ImageStream{
-					{
-						ObjectMeta: metav1.ObjectMeta{Name: "stream1", Namespace: "ns1"},
-						Status: imagev1.ImageStreamStatus{
-							Tags: []imagev1.NamedTagEventList{
-								{
-									Tag:   "tag1",
-									Items: []imagev1.TagEvent{{Image: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
-								},
-							},
-						},
-					},
-				},
-			},
-			pods: &corev1.PodList{},
-			expected: []Info{
-				imageInfo{
-					Image:           "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a",
-					ImageStreamTags: []string{"ns1/stream1 (tag1)"},
-					Metadata:        false,
-					Parents:         []string{},
-					Usage:           []string{},
-				},
-			},
-		},
-		"with metadata": {
-			images: &imagev1.ImageList{
-				Items: []imagev1.Image{
-					{
-						ObjectMeta: metav1.ObjectMeta{Name: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"},
-						DockerImageLayers: []imagev1.ImageLayer{
-							{Name: "layer1", LayerSize: int64(512)},
-							{Name: "layer2", LayerSize: int64(512)},
-						},
-						DockerImageManifest: "non empty metadata",
-					},
-				},
-			},
-			streams: &imagev1.ImageStreamList{
-				Items: []imagev1.ImageStream{
-					{
-						ObjectMeta: metav1.ObjectMeta{Name: "stream1", Namespace: "ns1"},
-						Status: imagev1.ImageStreamStatus{
-							Tags: []imagev1.NamedTagEventList{
-								{
-									Tag:   "tag1",
-									Items: []imagev1.TagEvent{{Image: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
-								},
-							},
-						},
-					},
-				},
-			},
-			pods: &corev1.PodList{},
-			expected: []Info{
-				imageInfo{
-					Image:           "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a",
-					ImageStreamTags: []string{"ns1/stream1 (tag1)"},
-					Metadata:        true,
-					Parents:         []string{},
-					Usage:           []string{},
-					Storage:         int64(1024),
-				},
-			},
-		},
-		"with metadata and image config": {
-			images: &imagev1.ImageList{
-				Items: []imagev1.Image{
-					{
-						ObjectMeta: metav1.ObjectMeta{Name: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"},
-						DockerImageLayers: []imagev1.ImageLayer{
-							{Name: "layer1", LayerSize: int64(512)},
-							{Name: "layer2", LayerSize: int64(512)},
-						},
-						DockerImageManifest: `{"schemaVersion": 1, "history": [{"v1Compatibility": "{\"id\":\"2d24f826cb16146e2016ff349a8a33ed5830f3b938d45c0f82943f4ab8c097e7\",\"parent\":\"117ee323aaa9d1b136ea55e4421f4ce413dfc6c0cc6b2186dea6c88d93e1ad7c\",\"created\":\"2015-02-21T02:11:06.735146646Z\",\"container\":\"c9a3eda5951d28aa8dbe5933be94c523790721e4f80886d0a8e7a710132a38ec\",\"container_config\":{\"Hostname\":\"43bd710ec89a\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [/bin/bash]\"],\"Image\":\"117ee323aaa9d1b136ea55e4421f4ce413dfc6c0cc6b2186dea6c88d93e1ad7c\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[]},\"docker_version\":\"1.4.1\",\"config\":{\"Hostname\":\"43bd710ec89a\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"117ee323aaa9d1b136ea55e4421f4ce413dfc6c0cc6b2186dea6c88d93e1ad7c\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[]},\"architecture\":\"amd64\",\"os\":\"linux\",\"checksum\":\"tarsum.dev+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\",\"Size\":0}\n"}]}`,
-						DockerImageConfig:   "raw image config",
-						DockerImageMetadata: runtime.RawExtension{
-							Raw: []byte(`{"Id":"manifestConfigID"}`),
-						},
-					},
-				},
-			},
-			streams: &imagev1.ImageStreamList{},
-			pods:    &corev1.PodList{},
-			expected: []Info{
-				imageInfo{
-					Image:    "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a",
-					Metadata: true,
-					Parents:  []string{},
-					Usage:    []string{},
-					Storage:  int64(1024 + len("raw image config")),
-				},
-			},
-		},
-		"with metadata and image config and some layers duplicated": {
-			images: &imagev1.ImageList{
-				Items: []imagev1.Image{
-					{
-						ObjectMeta: metav1.ObjectMeta{Name: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"},
-						DockerImageLayers: []imagev1.ImageLayer{
-							{Name: "layer1", LayerSize: int64(512)},
-							{Name: "layer2", LayerSize: int64(256)},
-							{Name: "layer1", LayerSize: int64(512)},
-						},
-						DockerImageManifest: "non empty metadata",
-						DockerImageConfig:   "raw image config",
-					},
-				},
-			},
-			streams: &imagev1.ImageStreamList{},
-			pods:    &corev1.PodList{},
-			expected: []Info{
-				imageInfo{
-					Image:    "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a",
-					Metadata: true,
-					Parents:  []string{},
-					Usage:    []string{},
-					Storage:  int64(512 + 256),
-				},
-			},
-		},
-		"multiple tags": {
-			images: &imagev1.ImageList{
-				Items: []imagev1.Image{
-					{
-						ObjectMeta:        metav1.ObjectMeta{Name: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"},
-						DockerImageLayers: []imagev1.ImageLayer{{Name: "layer1"}, {Name: "layer2"}},
-					},
-				},
-			},
-			streams: &imagev1.ImageStreamList{
-				Items: []imagev1.ImageStream{
-					{
-						ObjectMeta: metav1.ObjectMeta{Name: "stream1", Namespace: "ns1"},
-						Status: imagev1.ImageStreamStatus{
-							Tags: []imagev1.NamedTagEventList{
-								{
-									Tag:   "tag1",
-									Items: []imagev1.TagEvent{{Image: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
-								},
-								{
-									Tag:   "tag2",
-									Items: []imagev1.TagEvent{{Image: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
-								},
-							},
-						},
-					},
-				},
-			},
-			pods: &corev1.PodList{},
-			expected: []Info{
-				imageInfo{
-					Image:           "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a",
-					ImageStreamTags: []string{"ns1/stream1 (tag1,tag2)"},
-					Metadata:        false,
-					Parents:         []string{},
-					Usage:           []string{},
-				},
-			},
-		},
-		"multiple streams": {
-			images: &imagev1.ImageList{
-				Items: []imagev1.Image{
-					{
-						ObjectMeta:        metav1.ObjectMeta{Name: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"},
-						DockerImageLayers: []imagev1.ImageLayer{{Name: "layer1"}, {Name: "layer2"}},
-					},
-				},
-			},
-			streams: &imagev1.ImageStreamList{
-				Items: []imagev1.ImageStream{
-					{
-						ObjectMeta: metav1.ObjectMeta{Name: "stream1", Namespace: "ns1"},
-						Status: imagev1.ImageStreamStatus{
-							Tags: []imagev1.NamedTagEventList{
-								{
-									Tag:   "tag1",
-									Items: []imagev1.TagEvent{{Image: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
-								},
-								{
-									Tag:   "tag2",
-									Items: []imagev1.TagEvent{{Image: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
-								},
-							},
-						},
-					},
-					{
-						ObjectMeta: metav1.ObjectMeta{Name: "stream2", Namespace: "ns2"},
-						Status: imagev1.ImageStreamStatus{
-							Tags: []imagev1.NamedTagEventList{
-								{
-									Tag:   "tag1",
-									Items: []imagev1.TagEvent{{Image: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
-								},
-							},
-						},
-					},
-				},
-			},
-			pods: &corev1.PodList{},
-			expected: []Info{
-				imageInfo{
-					Image:           "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a",
-					ImageStreamTags: []string{"ns1/stream1 (tag1,tag2)", "ns2/stream2 (tag1)"},
-					Metadata:        false,
-					Parents:         []string{},
-					Usage:           []string{},
-				},
-			},
-		},
-		"image without a stream": {
-			images: &imagev1.ImageList{
-				Items: []imagev1.Image{
-					{ObjectMeta: metav1.ObjectMeta{Name: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
-				},
-			},
-			streams: &imagev1.ImageStreamList{},
-			pods:    &corev1.PodList{},
-			expected: []Info{
-				imageInfo{
-					Image:           "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a",
-					ImageStreamTags: []string{},
-					Metadata:        false,
-					Parents:         []string{},
-					Usage:           []string{},
-				},
-			},
-		},
-		"image parents": {
-			images: &imagev1.ImageList{
-				Items: []imagev1.Image{
-					{
-						ObjectMeta:          metav1.ObjectMeta{Name: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"},
-						DockerImageLayers:   []imagev1.ImageLayer{{Name: "layer1"}},
-						DockerImageManifest: "non empty metadata",
-					},
-					{
-						ObjectMeta: metav1.ObjectMeta{Name: "image2"},
-						DockerImageLayers: []imagev1.ImageLayer{
-							{Name: "layer1"},
-							{Name: "layer2"},
-						},
-						DockerImageManifest: "non empty metadata",
-					},
-				},
-			},
-			streams: &imagev1.ImageStreamList{},
-			pods:    &corev1.PodList{},
-			expected: []Info{
-				imageInfo{
-					Image:           "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a",
-					ImageStreamTags: []string{},
-					Metadata:        true,
-					Parents:         []string{},
-					Usage:           []string{},
-				},
-				imageInfo{
-					Image:           "image2",
-					ImageStreamTags: []string{},
-					Metadata:        true,
-					Parents:         []string{"sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"},
-					Usage:           []string{},
-				},
-			},
-		},
-		"image parents with empty layer": {
-			images: &imagev1.ImageList{
-				Items: []imagev1.Image{
-					{
-						ObjectMeta:          metav1.ObjectMeta{Name: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"},
-						DockerImageLayers:   []imagev1.ImageLayer{{Name: "layer1"}},
-						DockerImageManifest: "non empty metadata",
-					},
-					{
-						ObjectMeta: metav1.ObjectMeta{Name: "image2"},
-						DockerImageLayers: []imagev1.ImageLayer{
-							{Name: "layer1"},
-							{Name: dockerlayer.DigestSha256EmptyTar},
-							{Name: "layer2"},
-						},
-						DockerImageManifest: "non empty metadata",
-					},
-				},
-			},
-			streams: &imagev1.ImageStreamList{},
-			pods:    &corev1.PodList{},
-			expected: []Info{
-				imageInfo{
-					Image:           "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a",
-					ImageStreamTags: []string{},
-					Metadata:        true,
-					Parents:         []string{},
-					Usage:           []string{},
-				},
-				imageInfo{
-					Image:           "image2",
-					ImageStreamTags: []string{},
-					Metadata:        true,
-					Parents:         []string{"sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"},
-					Usage:           []string{},
-				},
-			},
-		},
-		"image parents with gzipped empty layer": {
-			images: &imagev1.ImageList{
-				Items: []imagev1.Image{
-					{
-						ObjectMeta:          metav1.ObjectMeta{Name: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"},
-						DockerImageLayers:   []imagev1.ImageLayer{{Name: "layer1"}},
-						DockerImageManifest: "non empty metadata",
-					},
-					{
-						ObjectMeta: metav1.ObjectMeta{Name: "image2"},
-						DockerImageLayers: []imagev1.ImageLayer{
-							{Name: "layer1"},
-							{Name: dockerlayer.GzippedEmptyLayerDigest},
-							{Name: "layer2"},
-						},
-						DockerImageManifest: "non empty metadata",
-					},
-				},
-			},
-			streams: &imagev1.ImageStreamList{},
-			pods:    &corev1.PodList{},
-			expected: []Info{
-				imageInfo{
-					Image:           "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a",
-					ImageStreamTags: []string{},
-					Metadata:        true,
-					Parents:         []string{},
-					Usage:           []string{},
-				},
-				imageInfo{
-					Image:           "image2",
-					ImageStreamTags: []string{},
-					Metadata:        true,
-					Parents:         []string{"sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"},
-					Usage:           []string{},
-				},
-			},
-		},
-		"build pending": {
-			images: &imagev1.ImageList{
-				Items: []imagev1.Image{
-					{ObjectMeta: metav1.ObjectMeta{Name: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
-				},
-			},
-			streams: &imagev1.ImageStreamList{
-				Items: []imagev1.ImageStream{
-					{
-						ObjectMeta: metav1.ObjectMeta{Name: "stream1", Namespace: "ns1"},
-						Status: imagev1.ImageStreamStatus{
-							Tags: []imagev1.NamedTagEventList{
-								{
-									Tag:   "tag1",
-									Items: []imagev1.TagEvent{{Image: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
-								},
-							},
-						},
-					},
-				},
-			},
-			pods: &corev1.PodList{
-				Items: []corev1.Pod{
-					{
-						ObjectMeta: metav1.ObjectMeta{Namespace: "ns1", Annotations: map[string]string{buildv1.BuildAnnotation: "build1"}},
-						Spec:       corev1.PodSpec{Containers: []corev1.Container{{Image: "image@sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}}},
-						Status:     corev1.PodStatus{Phase: corev1.PodPending},
-					},
-				},
-			},
-			expected: []Info{
-				imageInfo{
-					Image:           "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a",
-					ImageStreamTags: []string{"ns1/stream1 (tag1)"},
-					Metadata:        false,
-					Parents:         []string{},
-					Usage:           []string{"Build: ns1/build1"},
-				},
-			},
-		},
-		"build running": {
-			images: &imagev1.ImageList{
-				Items: []imagev1.Image{
-					{ObjectMeta: metav1.ObjectMeta{Name: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
-				},
-			},
-			streams: &imagev1.ImageStreamList{
-				Items: []imagev1.ImageStream{
-					{
-						ObjectMeta: metav1.ObjectMeta{Name: "stream1", Namespace: "ns1"},
-						Status: imagev1.ImageStreamStatus{
-							Tags: []imagev1.NamedTagEventList{
-								{
-									Tag:   "tag1",
-									Items: []imagev1.TagEvent{{Image: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
-								},
-							},
-						},
-					},
-				},
-			},
-			pods: &corev1.PodList{
-				Items: []corev1.Pod{
-					{
-						ObjectMeta: metav1.ObjectMeta{Namespace: "ns1", Annotations: map[string]string{buildv1.BuildAnnotation: "build1"}},
-						Spec:       corev1.PodSpec{Containers: []corev1.Container{{Image: "image@sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}}},
-						Status:     corev1.PodStatus{Phase: corev1.PodRunning},
-					},
-				},
-			},
-			expected: []Info{
-				imageInfo{
-					Image:           "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a",
-					ImageStreamTags: []string{"ns1/stream1 (tag1)"},
-					Metadata:        false,
-					Parents:         []string{},
-					Usage:           []string{"Build: ns1/build1"},
-				},
-			},
-		},
-		"deployer pending": {
-			images: &imagev1.ImageList{
-				Items: []imagev1.Image{
-					{ObjectMeta: metav1.ObjectMeta{Name: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
-				},
-			},
-			streams: &imagev1.ImageStreamList{
-				Items: []imagev1.ImageStream{
-					{
-						ObjectMeta: metav1.ObjectMeta{Name: "stream1", Namespace: "ns1"},
-						Status: imagev1.ImageStreamStatus{
-							Tags: []imagev1.NamedTagEventList{
-								{
-									Tag:   "tag1",
-									Items: []imagev1.TagEvent{{Image: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
-								},
-							},
-						},
-					},
-				},
-			},
-			pods: &corev1.PodList{
-				Items: []corev1.Pod{
-					{
-						ObjectMeta: metav1.ObjectMeta{Namespace: "ns1", Annotations: map[string]string{appsv1.DeploymentPodAnnotation: "deployer1"}},
-						Spec:       corev1.PodSpec{Containers: []corev1.Container{{Image: "image@sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}}},
-						Status:     corev1.PodStatus{Phase: corev1.PodPending},
-					},
-				},
-			},
-			expected: []Info{
-				imageInfo{
-					Image:           "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a",
-					ImageStreamTags: []string{"ns1/stream1 (tag1)"},
-					Metadata:        false,
-					Parents:         []string{},
-					Usage:           []string{"Deployer: ns1/deployer1"},
-				},
-			},
-		},
-		"deployer running": {
-			images: &imagev1.ImageList{
-				Items: []imagev1.Image{
-					{ObjectMeta: metav1.ObjectMeta{Name: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
-				},
-			},
-			streams: &imagev1.ImageStreamList{
-				Items: []imagev1.ImageStream{
-					{
-						ObjectMeta: metav1.ObjectMeta{Name: "stream1", Namespace: "ns1"},
-						Status: imagev1.ImageStreamStatus{
-							Tags: []imagev1.NamedTagEventList{
-								{
-									Tag:   "tag1",
-									Items: []imagev1.TagEvent{{Image: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
-								},
-							},
-						},
-					},
-				},
-			},
-			pods: &corev1.PodList{
-				Items: []corev1.Pod{
-					{
-						ObjectMeta: metav1.ObjectMeta{Namespace: "ns1", Annotations: map[string]string{appsv1.DeploymentPodAnnotation: "deployer1"}},
-						Spec:       corev1.PodSpec{Containers: []corev1.Container{{Image: "image@sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}}},
-						Status:     corev1.PodStatus{Phase: corev1.PodRunning},
-					},
-				},
-			},
-			expected: []Info{
-				imageInfo{
-					Image:           "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a",
-					ImageStreamTags: []string{"ns1/stream1 (tag1)"},
-					Metadata:        false,
-					Parents:         []string{},
-					Usage:           []string{"Deployer: ns1/deployer1"},
-				},
-			},
-		},
-		"deployement pending": {
-			images: &imagev1.ImageList{
-				Items: []imagev1.Image{
-					{ObjectMeta: metav1.ObjectMeta{Name: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
-				},
-			},
-			streams: &imagev1.ImageStreamList{
-				Items: []imagev1.ImageStream{
-					{
-						ObjectMeta: metav1.ObjectMeta{Name: "stream1", Namespace: "ns1"},
-						Status: imagev1.ImageStreamStatus{
-							Tags: []imagev1.NamedTagEventList{
-								{
-									Tag:   "tag1",
-									Items: []imagev1.TagEvent{{Image: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
-								},
-							},
-						},
-					},
-				},
-			},
-			pods: &corev1.PodList{
-				Items: []corev1.Pod{
-					{
-						ObjectMeta: metav1.ObjectMeta{Namespace: "ns1", Annotations: map[string]string{appsv1.DeploymentAnnotation: "deplyment1"}},
-						Spec:       corev1.PodSpec{Containers: []corev1.Container{{Image: "image@sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}}},
-						Status:     corev1.PodStatus{Phase: corev1.PodPending},
-					},
-				},
-			},
-			expected: []Info{
-				imageInfo{
-					Image:           "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a",
-					ImageStreamTags: []string{"ns1/stream1 (tag1)"},
-					Metadata:        false,
-					Parents:         []string{},
-					Usage:           []string{"Deployment: ns1/deplyment1"},
-				},
-			},
-		},
-		"deployment running": {
-			images: &imagev1.ImageList{
-				Items: []imagev1.Image{
-					{ObjectMeta: metav1.ObjectMeta{Name: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
-				},
-			},
-			streams: &imagev1.ImageStreamList{
-				Items: []imagev1.ImageStream{
-					{
-						ObjectMeta: metav1.ObjectMeta{Name: "stream1", Namespace: "ns1"},
-						Status: imagev1.ImageStreamStatus{
-							Tags: []imagev1.NamedTagEventList{
-								{
-									Tag:   "tag1",
-									Items: []imagev1.TagEvent{{Image: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
-								},
-							},
-						},
-					},
-				},
-			},
-			pods: &corev1.PodList{
-				Items: []corev1.Pod{
-					{
-						ObjectMeta: metav1.ObjectMeta{Namespace: "ns1", Annotations: map[string]string{appsv1.DeploymentAnnotation: "deplyment1"}},
-						Spec:       corev1.PodSpec{Containers: []corev1.Container{{Image: "image@sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}}},
-						Status:     corev1.PodStatus{Phase: corev1.PodRunning},
-					},
-				},
-			},
-			expected: []Info{
-				imageInfo{
-					Image:           "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a",
-					ImageStreamTags: []string{"ns1/stream1 (tag1)"},
-					Metadata:        false,
-					Parents:         []string{},
-					Usage:           []string{"Deployment: ns1/deplyment1"},
-				},
-			},
-		},
-		"unknown controller 1": {
-			images: &imagev1.ImageList{
-				Items: []imagev1.Image{
-					{ObjectMeta: metav1.ObjectMeta{Name: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
-				},
-			},
-			streams: &imagev1.ImageStreamList{
-				Items: []imagev1.ImageStream{
-					{
-						ObjectMeta: metav1.ObjectMeta{Name: "stream1", Namespace: "ns1"},
-						Status: imagev1.ImageStreamStatus{
-							Tags: []imagev1.NamedTagEventList{
-								{
-									Tag:   "tag1",
-									Items: []imagev1.TagEvent{{Image: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
-								},
-							},
-						},
-					},
-				},
-			},
-			pods: &corev1.PodList{
-				Items: []corev1.Pod{
-					{
-						ObjectMeta: metav1.ObjectMeta{Namespace: "ns1"},
-						Spec:       corev1.PodSpec{Containers: []corev1.Container{{Image: "image@sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}}},
-						Status:     corev1.PodStatus{Phase: corev1.PodRunning},
-					},
-				},
-			},
-			expected: []Info{
-				imageInfo{
-					Image:           "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a",
-					ImageStreamTags: []string{"ns1/stream1 (tag1)"},
-					Metadata:        false,
-					Parents:         []string{},
-					Usage:           []string{""},
-				},
-			},
-		},
-		"unknown controller 2": {
-			images: &imagev1.ImageList{
-				Items: []imagev1.Image{
-					{ObjectMeta: metav1.ObjectMeta{Name: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
-				},
-			},
-			streams: &imagev1.ImageStreamList{
-				Items: []imagev1.ImageStream{
-					{
-						ObjectMeta: metav1.ObjectMeta{Name: "stream1", Namespace: "ns1"},
-						Status: imagev1.ImageStreamStatus{
-							Tags: []imagev1.NamedTagEventList{
-								{
-									Tag:   "tag1",
-									Items: []imagev1.TagEvent{{Image: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
-								},
-							},
-						},
-					},
-				},
-			},
-			pods: &corev1.PodList{
-				Items: []corev1.Pod{
-					{
-						ObjectMeta: metav1.ObjectMeta{Namespace: "ns1", Annotations: map[string]string{"unknown controller": "unknown"}},
-						Spec:       corev1.PodSpec{Containers: []corev1.Container{{Image: "image@sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}}},
-						Status:     corev1.PodStatus{Phase: corev1.PodRunning},
-					},
-				},
-			},
-			expected: []Info{
-				imageInfo{
-					Image:           "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a",
-					ImageStreamTags: []string{"ns1/stream1 (tag1)"},
-					Metadata:        false,
-					Parents:         []string{},
-					Usage:           []string{""},
-				},
-			},
-		},
-	}
-
-	for name, test := range testCases {
-		o := TopImagesOptions{
-			Images:  test.images,
-			Streams: test.streams,
-			Pods:    test.pods,
-		}
-		infos := o.imagesTop()
-		if !imageInfosEqual(infos, test.expected) {
-			t.Errorf("%s: unexpected infos, expected %#v, got %#v", name, test.expected, infos)
-		}
-	}
-}
-
-func imageInfosEqual(actual, expected []Info) bool {
-	if len(actual) != len(expected) {
-		return false
-	}
-
-	for _, a := range actual {
-		aii, ok := a.(imageInfo)
-		if !ok {
-			continue
-		}
-		for _, e := range expected {
-			eii, ok := e.(imageInfo)
-			if !ok {
-				continue
-			}
-			if aii.Image != eii.Image {
-				continue
-			}
-			if !stringsEqual(aii.ImageStreamTags, eii.ImageStreamTags) ||
-				!stringsEqual(aii.Parents, eii.Parents) ||
-				!stringsEqual(aii.Usage, eii.Usage) ||
-				aii.Metadata != eii.Metadata ||
-				aii.Storage != eii.Storage {
-				return false
-			}
-			return true
-		}
-	}
-	return false
-}
-
-func stringsEqual(actual, expected []string) bool {
-	if len(actual) != len(expected) {
-		return false
-	}
-
-	for _, a := range actual {
-		found := false
-		for _, e := range expected {
-			if a == e {
-				found = true
-				break
-			}
-		}
-		if !found {
-			return false
-		}
-	}
-	return true
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/top/imagestreams.go b/vendor/github.com/openshift/oc/pkg/cli/admin/top/imagestreams.go
deleted file mode 100644
index b440c419758a..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/top/imagestreams.go
+++ /dev/null
@@ -1,211 +0,0 @@
-package top
-
-import (
-	"fmt"
-	"io"
-	"sort"
-
-	units "github.com/docker/go-units"
-	gonum "github.com/gonum/graph"
-	"github.com/spf13/cobra"
-
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/util/sets"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-
-	dockerv10 "github.com/openshift/api/image/docker10"
-	imagev1 "github.com/openshift/api/image/v1"
-	imagev1client "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1"
-	"github.com/openshift/library-go/pkg/image/imageutil"
-	"github.com/openshift/oc/pkg/helpers/graph/genericgraph"
-	imagegraph "github.com/openshift/oc/pkg/helpers/graph/imagegraph/nodes"
-)
-
-const TopImageStreamsRecommendedName = "imagestreams"
-
-var (
-	topImageStreamsLong = templates.LongDesc(`
-		Show usage statistics for ImageStreams
-
-		This command analyzes all the ImageStreams managed by the platform and presents current
-		usage statistics.`)
-
-	topImageStreamsExample = templates.Examples(`
-		# Show usage statistics for ImageStreams
-  	%[1]s %[2]s`)
-)
-
-type TopImageStreamsOptions struct {
-	// internal values
-	Images  *imagev1.ImageList
-	Streams *imagev1.ImageStreamList
-
-	genericclioptions.IOStreams
-}
-
-func NewTopImageStreamsOptions(streams genericclioptions.IOStreams) *TopImageStreamsOptions {
-	return &TopImageStreamsOptions{
-		IOStreams: streams,
-	}
-}
-
-// NewCmdTopImageStreams implements the OpenShift cli top imagestreams command.
-func NewCmdTopImageStreams(f kcmdutil.Factory, parentName, name string, streams genericclioptions.IOStreams) *cobra.Command {
-	o := NewTopImageStreamsOptions(streams)
-	cmd := &cobra.Command{
-		Use:     name,
-		Short:   "Show usage statistics for ImageStreams",
-		Long:    topImageStreamsLong,
-		Example: fmt.Sprintf(topImageStreamsExample, parentName, name),
-		Run: func(cmd *cobra.Command, args []string) {
-			kcmdutil.CheckErr(o.Complete(f, cmd, args))
-			kcmdutil.CheckErr(o.Validate(cmd))
-			kcmdutil.CheckErr(o.Run())
-		},
-	}
-
-	return cmd
-}
-
-// Complete turns a partially defined TopImageStreamsOptions into a solvent structure
-// which can be validated and used for showing limits usage.
-func (o *TopImageStreamsOptions) Complete(f kcmdutil.Factory, cmd *cobra.Command, args []string) error {
-	namespace := cmd.Flag("namespace").Value.String()
-	if len(namespace) == 0 {
-		namespace = metav1.NamespaceAll
-	}
-	clientConfig, err := f.ToRESTConfig()
-	if err != nil {
-		return err
-	}
-	imageClient, err := imagev1client.NewForConfig(clientConfig)
-	if err != nil {
-		return err
-	}
-
-	allImages, err := imageClient.Images().List(metav1.ListOptions{})
-	if err != nil {
-		return err
-	}
-	o.Images = allImages
-
-	allStreams, err := imageClient.ImageStreams(namespace).List(metav1.ListOptions{})
-	if err != nil {
-		return err
-	}
-	o.Streams = allStreams
-
-	return nil
-}
-
-// Validate ensures that a TopImageStreamsOptions is valid and can be used to execute command.
-func (o TopImageStreamsOptions) Validate(cmd *cobra.Command) error {
-	return nil
-}
-
-// Run contains all the necessary functionality to show current image references.
-func (o TopImageStreamsOptions) Run() error {
-	infos := o.imageStreamsTop()
-	Print(o.Out, ImageStreamColumns, infos)
-	return nil
-}
-
-var ImageStreamColumns = []string{"NAME", "STORAGE", "IMAGES", "LAYERS"}
-
-// imageStreamInfo contains contains statistic information about ImageStream usage.
-type imageStreamInfo struct {
-	ImageStream string
-	Storage     int64
-	Images      int
-	Layers      int
-}
-
-var _ Info = &imageStreamInfo{}
-
-func (i imageStreamInfo) PrintLine(out io.Writer) {
-	printValue(out, i.ImageStream)
-	printValue(out, units.BytesSize(float64(i.Storage)))
-	printValue(out, i.Images)
-	printValue(out, i.Layers)
-}
-
-// imageStreamsTop generates ImageStream information from a graph and
-// returns this as a list of imageStreamInfo array.
-func (o TopImageStreamsOptions) imageStreamsTop() []Info {
-	g := genericgraph.New()
-	addImagesToGraph(g, o.Images)
-	addImageStreamsToGraph(g, o.Streams)
-
-	infos := []Info{}
-	streamNodes := getImageStreamNodes(g.Nodes())
-	for _, sn := range streamNodes {
-		storage, images, layers := getImageStreamSize(g, sn)
-		infos = append(infos, imageStreamInfo{
-			ImageStream: fmt.Sprintf("%s/%s", sn.ImageStream.Namespace, sn.ImageStream.Name),
-			Storage:     storage,
-			Images:      images,
-			Layers:      layers,
-		})
-	}
-	sort.Slice(infos, func(i, j int) bool {
-		a, b := infos[i].(imageStreamInfo), infos[j].(imageStreamInfo)
-		if a.Storage < b.Storage {
-			return false
-		}
-		if a.Storage > b.Storage {
-			return true
-		}
-		return a.Images > b.Images
-	})
-
-	return infos
-}
-
-func getImageStreamSize(g genericgraph.Graph, node *imagegraph.ImageStreamNode) (int64, int, int) {
-	imageEdges := g.OutboundEdges(node, ImageStreamImageEdgeKind)
-	storage := int64(0)
-	images := len(imageEdges)
-	layers := 0
-	blobSet := sets.NewString()
-	for _, e := range imageEdges {
-		imageNode, ok := e.To().(*imagegraph.ImageNode)
-		if !ok {
-			continue
-		}
-		image := imageNode.Image
-		layers += len(image.DockerImageLayers)
-		// we're counting only unique layers per the entire stream
-		for _, layer := range image.DockerImageLayers {
-			if blobSet.Has(layer.Name) {
-				continue
-			}
-			blobSet.Insert(layer.Name)
-			storage += layer.LayerSize
-		}
-		if err := imageutil.ImageWithMetadata(image); err != nil {
-			continue
-		}
-		dockerImage, ok := image.DockerImageMetadata.Object.(*dockerv10.DockerImage)
-		if !ok {
-			continue
-		}
-		if len(image.DockerImageConfig) > 0 && !blobSet.Has(dockerImage.ID) {
-			blobSet.Insert(dockerImage.ID)
-			storage += int64(len(image.DockerImageConfig))
-		}
-	}
-
-	return storage, images, layers
-}
-
-func getImageStreamNodes(nodes []gonum.Node) []*imagegraph.ImageStreamNode {
-	ret := []*imagegraph.ImageStreamNode{}
-	for i := range nodes {
-		if node, ok := nodes[i].(*imagegraph.ImageStreamNode); ok {
-			ret = append(ret, node)
-		}
-	}
-	return ret
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/top/imagestreams_test.go b/vendor/github.com/openshift/oc/pkg/cli/admin/top/imagestreams_test.go
deleted file mode 100644
index c8f6b7ab8897..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/top/imagestreams_test.go
+++ /dev/null
@@ -1,318 +0,0 @@
-package top
-
-import (
-	"testing"
-
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/runtime"
-	kapihelper "k8s.io/kubernetes/pkg/apis/core/helper"
-
-	dockerv10 "github.com/openshift/api/image/docker10"
-	imagev1 "github.com/openshift/api/image/v1"
-)
-
-func TestImageStreamsTop(t *testing.T) {
-	testCases := map[string]struct {
-		images   *imagev1.ImageList
-		streams  *imagev1.ImageStreamList
-		expected []Info
-	}{
-		"empty image stream": {
-			images: &imagev1.ImageList{},
-			streams: &imagev1.ImageStreamList{
-				Items: []imagev1.ImageStream{
-					{
-						ObjectMeta: metav1.ObjectMeta{Name: "stream1", Namespace: "ns1"},
-						Status: imagev1.ImageStreamStatus{
-							Tags: []imagev1.NamedTagEventList{
-								{
-									Tag:   "tag1",
-									Items: []imagev1.TagEvent{{Image: "image1"}},
-								},
-							},
-						},
-					},
-				},
-			},
-			expected: []Info{
-				imageStreamInfo{
-					ImageStream: "ns1/stream1",
-					Images:      0,
-					Layers:      0,
-				},
-			},
-		},
-		"no storage": {
-			images: &imagev1.ImageList{
-				Items: []imagev1.Image{
-					{
-						ObjectMeta:        metav1.ObjectMeta{Name: "image1"},
-						DockerImageLayers: []imagev1.ImageLayer{{Name: "layer1"}},
-					},
-				},
-			},
-			streams: &imagev1.ImageStreamList{
-				Items: []imagev1.ImageStream{
-					{
-						ObjectMeta: metav1.ObjectMeta{Name: "stream1", Namespace: "ns1"},
-						Status: imagev1.ImageStreamStatus{
-							Tags: []imagev1.NamedTagEventList{
-								{
-									Tag:   "tag1",
-									Items: []imagev1.TagEvent{{Image: "image1"}},
-								},
-							},
-						},
-					},
-				},
-			},
-			expected: []Info{
-				imageStreamInfo{
-					ImageStream: "ns1/stream1",
-					Images:      1,
-					Layers:      1,
-				},
-			},
-		},
-		"with storage": {
-			images: &imagev1.ImageList{
-				Items: []imagev1.Image{
-					{
-						ObjectMeta:        metav1.ObjectMeta{Name: "image1"},
-						DockerImageLayers: []imagev1.ImageLayer{{Name: "layer1", LayerSize: int64(1024)}},
-					},
-				},
-			},
-			streams: &imagev1.ImageStreamList{
-				Items: []imagev1.ImageStream{
-					{
-						ObjectMeta: metav1.ObjectMeta{Name: "stream1", Namespace: "ns1"},
-						Status: imagev1.ImageStreamStatus{
-							Tags: []imagev1.NamedTagEventList{
-								{
-									Tag:   "tag1",
-									Items: []imagev1.TagEvent{{Image: "image1"}},
-								},
-							},
-						},
-					},
-				},
-			},
-			expected: []Info{
-				imageStreamInfo{
-					ImageStream: "ns1/stream1",
-					Storage:     int64(1024),
-					Images:      1,
-					Layers:      1,
-				},
-			},
-		},
-		"multiple layers": {
-			images: &imagev1.ImageList{
-				Items: []imagev1.Image{
-					{
-						ObjectMeta: metav1.ObjectMeta{Name: "image1"},
-						DockerImageLayers: []imagev1.ImageLayer{
-							{Name: "layer1", LayerSize: 1024},
-							{Name: "layer2", LayerSize: 512},
-						},
-					},
-				},
-			},
-			streams: &imagev1.ImageStreamList{
-				Items: []imagev1.ImageStream{
-					{
-						ObjectMeta: metav1.ObjectMeta{Name: "stream1", Namespace: "ns1"},
-						Status: imagev1.ImageStreamStatus{
-							Tags: []imagev1.NamedTagEventList{
-								{
-									Tag:   "tag1",
-									Items: []imagev1.TagEvent{{Image: "image1"}},
-								},
-							},
-						},
-					},
-				},
-			},
-			expected: []Info{
-				imageStreamInfo{
-					ImageStream: "ns1/stream1",
-					Storage:     int64(1536),
-					Images:      1,
-					Layers:      2,
-				},
-			},
-		},
-		"multiple images": {
-			images: &imagev1.ImageList{
-				Items: []imagev1.Image{
-					{
-						ObjectMeta:        metav1.ObjectMeta{Name: "image1"},
-						DockerImageLayers: []imagev1.ImageLayer{{Name: "layer1", LayerSize: int64(1024)}},
-					},
-					{
-						ObjectMeta: metav1.ObjectMeta{Name: "image2"},
-						DockerImageLayers: []imagev1.ImageLayer{
-							{Name: "layer1", LayerSize: int64(1024)},
-							{Name: "layer2", LayerSize: int64(128)},
-						},
-					},
-				},
-			},
-			streams: &imagev1.ImageStreamList{
-				Items: []imagev1.ImageStream{
-					{
-						ObjectMeta: metav1.ObjectMeta{Name: "stream1", Namespace: "ns1"},
-						Status: imagev1.ImageStreamStatus{
-							Tags: []imagev1.NamedTagEventList{
-								{
-									Tag:   "tag1",
-									Items: []imagev1.TagEvent{{Image: "image1"}},
-								},
-								{
-									Tag:   "tag2",
-									Items: []imagev1.TagEvent{{Image: "image2"}},
-								},
-							},
-						},
-					},
-				},
-			},
-			expected: []Info{
-				imageStreamInfo{
-					ImageStream: "ns1/stream1",
-					Storage:     int64(1152),
-					Images:      2,
-					Layers:      3,
-				},
-			},
-		},
-		"multiple images with manifest config": {
-			images: &imagev1.ImageList{
-				Items: []imagev1.Image{
-					{
-						ObjectMeta:        metav1.ObjectMeta{Name: "image1"},
-						DockerImageLayers: []imagev1.ImageLayer{{Name: "layer1", LayerSize: int64(1024)}},
-						DockerImageConfig: "raw image config",
-						DockerImageMetadata: runtime.RawExtension{
-							Object: &dockerv10.DockerImage{
-								ID: "manifestConfigID",
-							},
-						},
-					},
-					{
-						ObjectMeta: metav1.ObjectMeta{Name: "image2"},
-						DockerImageLayers: []imagev1.ImageLayer{
-							{Name: "layer1", LayerSize: int64(1024)},
-							{Name: "layer2", LayerSize: int64(128)},
-						},
-						DockerImageConfig: "raw image config",
-						DockerImageMetadata: runtime.RawExtension{
-							Object: &dockerv10.DockerImage{
-								ID: "manifestConfigID",
-							},
-						},
-					},
-				},
-			},
-			streams: &imagev1.ImageStreamList{
-				Items: []imagev1.ImageStream{
-					{
-						ObjectMeta: metav1.ObjectMeta{Name: "stream1", Namespace: "ns1"},
-						Status: imagev1.ImageStreamStatus{
-							Tags: []imagev1.NamedTagEventList{
-								{
-									Tag:   "tag1",
-									Items: []imagev1.TagEvent{{Image: "image1"}},
-								},
-								{
-									Tag:   "tag2",
-									Items: []imagev1.TagEvent{{Image: "image2"}},
-								},
-							},
-						},
-					},
-				},
-			},
-			expected: []Info{
-				imageStreamInfo{
-					ImageStream: "ns1/stream1",
-					Storage:     int64(1152 + len("raw image config")),
-					Images:      2,
-					Layers:      3,
-				},
-			},
-		},
-		"multiple unreferenced images": {
-			images: &imagev1.ImageList{
-				Items: []imagev1.Image{
-					{
-						ObjectMeta:        metav1.ObjectMeta{Name: "image1"},
-						DockerImageLayers: []imagev1.ImageLayer{{Name: "layer1", LayerSize: int64(1024)}},
-					},
-					{
-						ObjectMeta: metav1.ObjectMeta{Name: "image2"},
-						DockerImageLayers: []imagev1.ImageLayer{
-							{Name: "layer1", LayerSize: int64(1024)},
-							{Name: "layer2", LayerSize: int64(128)},
-						},
-					},
-				},
-			},
-			streams: &imagev1.ImageStreamList{
-				Items: []imagev1.ImageStream{
-					{
-						ObjectMeta: metav1.ObjectMeta{Name: "stream1", Namespace: "ns1"},
-						Status: imagev1.ImageStreamStatus{
-							Tags: []imagev1.NamedTagEventList{
-								{
-									Tag:   "tag1",
-									Items: []imagev1.TagEvent{{Image: "image1"}},
-								},
-							},
-						},
-					},
-				},
-			},
-			expected: []Info{
-				imageStreamInfo{
-					ImageStream: "ns1/stream1",
-					Storage:     int64(1024),
-					Images:      1,
-					Layers:      1,
-				},
-			},
-		},
-	}
-
-	for name, test := range testCases {
-		o := TopImageStreamsOptions{
-			Images:  test.images,
-			Streams: test.streams,
-		}
-		infos := o.imageStreamsTop()
-		if !infosEqual(infos, test.expected) {
-			t.Errorf("%s: unexpected infos, expected %#v, got %#v", name, test.expected, infos)
-		}
-	}
-}
-
-func infosEqual(actual, expected []Info) bool {
-	if len(actual) != len(expected) {
-		return false
-	}
-
-	for _, a := range actual {
-		found := false
-		for _, e := range expected {
-			if kapihelper.Semantic.DeepEqual(a, e) {
-				found = true
-				break
-			}
-		}
-		if !found {
-			return false
-		}
-	}
-	return true
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/top/printer.go b/vendor/github.com/openshift/oc/pkg/cli/admin/top/printer.go
deleted file mode 100644
index 514390c4fad8..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/top/printer.go
+++ /dev/null
@@ -1,61 +0,0 @@
-package top
-
-import (
-	"bytes"
-	"fmt"
-	"io"
-	"strings"
-	"text/tabwriter"
-)
-
-type Info interface {
-	PrintLine(out io.Writer)
-}
-
-func Print(out io.Writer, headers []string, infos []Info) {
-	s := tabbedString(func(out *tabwriter.Writer) {
-		printHeader(out, headers)
-		for _, info := range infos {
-			info.PrintLine(out)
-			fmt.Fprintf(out, "\n")
-		}
-	})
-	fmt.Fprintf(out, "%s", s)
-}
-
-func printHeader(out io.Writer, columns []string) {
-	for _, col := range columns {
-		printValue(out, col)
-	}
-	fmt.Fprintf(out, "\n")
-}
-
-func printArray(out io.Writer, values []string) {
-	if len(values) == 0 {
-		printValue(out, "")
-	} else {
-		printValue(out, strings.Join(values, ", "))
-	}
-}
-
-func printValue(out io.Writer, value interface{}) {
-	fmt.Fprintf(out, "%v\t", value)
-}
-
-func printBool(out io.Writer, value bool) {
-	if value {
-		printValue(out, "yes")
-	} else {
-		printValue(out, "no")
-	}
-}
-
-func tabbedString(f func(*tabwriter.Writer)) string {
-	out := new(tabwriter.Writer)
-	buf := &bytes.Buffer{}
-	out.Init(buf, 0, 8, 1, ' ', 0)
-	f(out)
-	out.Flush()
-	str := string(buf.String())
-	return str
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/top/top.go b/vendor/github.com/openshift/oc/pkg/cli/admin/top/top.go
deleted file mode 100644
index adcd30ce84c7..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/top/top.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package top
-
-import (
-	"github.com/spf13/cobra"
-	"k8s.io/kubernetes/pkg/kubectl/cmd/top"
-
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-
-	cmdutil "github.com/openshift/oc/pkg/helpers/cmd"
-)
-
-const (
-	TopRecommendedName = "top"
-)
-
-var topLong = templates.LongDesc(`
-	Show usage statistics of resources on the server
-
-	This command analyzes resources managed by the platform and presents current
-	usage statistics.`)
-
-func NewCommandTop(name, fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	// Parent command to which all subcommands are added.
-	cmds := &cobra.Command{
-		Use:   name,
-		Short: "Show usage statistics of resources on the server",
-		Long:  topLong,
-		Run:   kcmdutil.DefaultSubCommandRun(streams.ErrOut),
-	}
-
-	cmdTopNode := cmdutil.ReplaceCommandName("kubectl", fullName, top.NewCmdTopNode(f, nil, streams))
-	cmdTopPod := cmdutil.ReplaceCommandName("kubectl", fullName, top.NewCmdTopPod(f, nil, streams))
-
-	cmds.AddCommand(NewCmdTopImages(f, fullName, TopImagesRecommendedName, streams))
-	cmds.AddCommand(NewCmdTopImageStreams(f, fullName, TopImageStreamsRecommendedName, streams))
-	cmdTopNode.Long = templates.LongDesc(cmdTopNode.Long)
-	cmdTopNode.Example = templates.Examples(cmdTopNode.Example)
-	cmdTopPod.Long = templates.LongDesc(cmdTopPod.Long)
-	cmdTopPod.Example = templates.Examples(cmdTopPod.Example)
-	cmds.AddCommand(cmdTopNode)
-	cmds.AddCommand(cmdTopPod)
-	return cmds
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/upgrade/upgrade.go b/vendor/github.com/openshift/oc/pkg/cli/admin/upgrade/upgrade.go
deleted file mode 100644
index 2f570c2caf8e..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/upgrade/upgrade.go
+++ /dev/null
@@ -1,385 +0,0 @@
-package upgrade
-
-import (
-	"bytes"
-	"fmt"
-	"io"
-	"sort"
-	"strings"
-	"text/tabwriter"
-
-	"github.com/blang/semver"
-	"github.com/spf13/cobra"
-
-	"k8s.io/apimachinery/pkg/api/errors"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/types"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-
-	configv1 "github.com/openshift/api/config/v1"
-	configv1client "github.com/openshift/client-go/config/clientset/versioned"
-	imagereference "github.com/openshift/library-go/pkg/image/reference"
-)
-
-func NewOptions(streams genericclioptions.IOStreams) *Options {
-	return &Options{
-		IOStreams: streams,
-	}
-}
-
-func New(f kcmdutil.Factory, parentName string, streams genericclioptions.IOStreams) *cobra.Command {
-	o := NewOptions(streams)
-	cmd := &cobra.Command{
-		Use:   "upgrade --to=VERSION",
-		Short: "Upgrade a cluster",
-		Long: templates.LongDesc(`
-			Upgrade the cluster to a newer version
-
-			This command will request that the cluster begin an upgrade. If no arguments are passed
-			the command will retrieve the current version info and display whether an upgrade is
-			in progress or whether any errors might prevent an upgrade, as well as show the suggested
-			updates available to the cluster. Information about compatible updates is periodically
-			retrieved from the update server and cached on the cluster - these are updates that are
-			known to be supported as upgrades from the current version.
-
-			Passing --to=VERSION will upgrade the cluster to one of the available updates or report
-			an error if no such version exists. The cluster will then upgrade itself and report
-			status that is available via "oc get clusterversion" and "oc describe clusterversion".
-
-			If the cluster is already being upgrade, or the cluster version has a failing or invalid
-			state you may pass --force to continue the upgrade anyway.
-
-			If there are no versions available, or a bug in the cluster version operator prevents
-			updates from being retrieved, the more powerful and dangerous --to-image=IMAGE option
-			may be used. This forces the cluster to upgrade to the contents of the specified release
-			image, regardless of whether that upgrade is safe to apply to the current version. While
-			rolling back to a previous micro version (4.0.2 -> 4.0.1) may be safe, upgrading more
-			than one minor version ahead (4.0 -> 4.2) or downgrading one minor version (4.1 -> 4.0)
-			is likely to cause data corruption or to completely break a cluster.
-		`),
-		Run: func(cmd *cobra.Command, args []string) {
-			kcmdutil.CheckErr(o.Complete(f, cmd, args))
-			kcmdutil.CheckErr(o.Run())
-		},
-	}
-	flags := cmd.Flags()
-	flags.StringVar(&o.To, "to", o.To, "Specify the version to upgrade to. The version must be on the list of previous or available updates.")
-	flags.StringVar(&o.ToImage, "to-image", o.ToImage, "Provide a release image to upgrade to. WARNING: This option does not check for upgrade compatibility and may break your cluster.")
-	flags.BoolVar(&o.ToLatestAvailable, "to-latest", o.ToLatestAvailable, "Use the next available version")
-	flags.BoolVar(&o.Clear, "clear", o.Clear, "If an upgrade has been requested but not yet downloaded, cancel the update. This has no effect once the update has started.")
-	flags.BoolVar(&o.Force, "force", o.Force, "Upgrade even if an upgrade is in process or other error is blocking update.")
-	return cmd
-}
-
-type Options struct {
-	genericclioptions.IOStreams
-
-	To                string
-	ToImage           string
-	ToLatestAvailable bool
-
-	Force bool
-	Clear bool
-
-	Client configv1client.Interface
-}
-
-func (o *Options) Complete(f kcmdutil.Factory, cmd *cobra.Command, args []string) error {
-	if o.Clear && (len(o.ToImage) > 0 || len(o.To) > 0 || o.ToLatestAvailable) {
-		return fmt.Errorf("--clear may not be specified with any other flags")
-	}
-	if len(o.To) > 0 && len(o.ToImage) > 0 {
-		return fmt.Errorf("only one of --to or --to-image may be provided")
-	}
-
-	if len(o.To) > 0 {
-		if _, err := semver.Parse(o.To); err != nil {
-			return fmt.Errorf("--to must be a semantic version (e.g. 4.0.1 or 4.1.0-nightly-20181104): %v", err)
-		}
-	}
-	// defend against simple mistakes (4.0.1 is a valid container image)
-	if len(o.ToImage) > 0 {
-		ref, err := imagereference.Parse(o.ToImage)
-		if err != nil {
-			return fmt.Errorf("--to-image must be a valid image pull spec: %v", err)
-		}
-		if len(ref.Registry) == 0 && len(ref.Namespace) == 0 {
-			return fmt.Errorf("--to-image must be a valid image pull spec: no registry or repository specified")
-		}
-		if len(ref.ID) == 0 && len(ref.Tag) == 0 {
-			return fmt.Errorf("--to-image must be a valid image pull spec: no tag or digest specified")
-		}
-	}
-
-	cfg, err := f.ToRESTConfig()
-	if err != nil {
-		return err
-	}
-	client, err := configv1client.NewForConfig(cfg)
-	if err != nil {
-		return err
-	}
-	o.Client = client
-	return nil
-}
-
-func (o *Options) Run() error {
-	cv, err := o.Client.ConfigV1().ClusterVersions().Get("version", metav1.GetOptions{})
-	if err != nil {
-		if errors.IsNotFound(err) {
-			return fmt.Errorf("No cluster version information available - you must be connected to a v4.0 OpenShift server to fetch the current version")
-		}
-		return err
-	}
-
-	switch {
-	case o.Clear:
-		if cv.Spec.DesiredUpdate == nil {
-			fmt.Fprintf(o.Out, "info: No update in progress\n")
-			return nil
-		}
-		original := cv.Spec.DesiredUpdate
-		cv.Spec.DesiredUpdate = nil
-		updated, err := o.Client.ConfigV1().ClusterVersions().Patch(cv.Name, types.MergePatchType, []byte(`{"spec":{"desiredUpdate":null}}`))
-		if err != nil {
-			return fmt.Errorf("Unable to cancel current rollout: %v", err)
-		}
-		if updateIsEquivalent(*original, updated.Status.Desired) {
-			fmt.Fprintf(o.Out, "Cleared the update field, still at %s\n", updateVersionString(updated.Status.Desired))
-		} else {
-			fmt.Fprintf(o.Out, "Cancelled requested upgrade to %s\n", updateVersionString(*original))
-		}
-		return nil
-
-	case o.ToLatestAvailable:
-		if len(cv.Status.AvailableUpdates) == 0 {
-			fmt.Fprintf(o.Out, "info: Cluster is already at the latest available version %s\n", cv.Status.Desired.Version)
-			return nil
-		}
-
-		if !o.Force {
-			if err := checkForUpgrade(cv); err != nil {
-				return err
-			}
-		}
-
-		sortSemanticVersions(cv.Status.AvailableUpdates)
-		update := cv.Status.AvailableUpdates[len(cv.Status.AvailableUpdates)-1]
-		cv.Spec.DesiredUpdate = &update
-
-		_, err := o.Client.ConfigV1().ClusterVersions().Update(cv)
-		if err != nil {
-			return fmt.Errorf("Unable to upgrade to latest version %s: %v", update.Version, err)
-		}
-
-		if len(update.Version) > 0 {
-			fmt.Fprintf(o.Out, "Updating to latest version %s\n", update.Version)
-		} else {
-			fmt.Fprintf(o.Out, "Updating to latest release image %s\n", update.Image)
-		}
-
-		return nil
-
-	case len(o.To) > 0, len(o.ToImage) > 0:
-		var update *configv1.Update
-		if len(o.To) > 0 {
-			if o.To == cv.Status.Desired.Version {
-				fmt.Fprintf(o.Out, "info: Cluster is already at version %s\n", o.To)
-				return nil
-			}
-			for _, available := range cv.Status.AvailableUpdates {
-				if available.Version == o.To {
-					update = &available
-					break
-				}
-			}
-			if update == nil {
-				if len(cv.Status.AvailableUpdates) == 0 {
-					if c := findCondition(cv.Status.Conditions, configv1.RetrievedUpdates); c != nil && c.Status == configv1.ConditionFalse {
-						return fmt.Errorf("Can't look up image for version %s. %v", o.To, c.Message)
-					}
-					return fmt.Errorf("No available updates, specify --to-image or wait for new updates to be available")
-				}
-				return fmt.Errorf("The update %s is not one of the available updates: %s", o.To, strings.Join(versionStrings(cv.Status.AvailableUpdates), ", "))
-			}
-		}
-		if len(o.ToImage) > 0 {
-			if o.ToImage == cv.Status.Desired.Image && !o.Force {
-				fmt.Fprintf(o.Out, "info: Cluster is already using release image %s\n", o.ToImage)
-				return nil
-			}
-			update = &configv1.Update{
-				Version: "",
-				Image:   o.ToImage,
-			}
-		}
-
-		if o.Force {
-			update.Force = true
-		} else {
-			if err := checkForUpgrade(cv); err != nil {
-				return err
-			}
-		}
-
-		cv.Spec.DesiredUpdate = update
-
-		_, err := o.Client.ConfigV1().ClusterVersions().Update(cv)
-		if err != nil {
-			return fmt.Errorf("Unable to upgrade: %v", err)
-		}
-
-		if len(update.Version) > 0 {
-			fmt.Fprintf(o.Out, "Updating to %s\n", update.Version)
-		} else {
-			fmt.Fprintf(o.Out, "Updating to release image %s\n", update.Image)
-		}
-
-		return nil
-
-	default:
-		if c := findCondition(cv.Status.Conditions, configv1.OperatorDegraded); c != nil && c.Status == configv1.ConditionTrue {
-			prefix := "No upgrade is possible due to an error"
-			if c := findCondition(cv.Status.Conditions, configv1.OperatorProgressing); c != nil && c.Status == configv1.ConditionTrue && len(c.Message) > 0 {
-				prefix = c.Message
-			}
-			if len(c.Message) > 0 {
-				return fmt.Errorf("%s:\n\n  Reason: %s\n  Message: %s\n\n", prefix, c.Reason, c.Message)
-			}
-			return fmt.Errorf("The cluster can't be upgraded, see `oc describe clusterversion`")
-		}
-
-		if c := findCondition(cv.Status.Conditions, configv1.OperatorProgressing); c != nil && len(c.Message) > 0 {
-			if c.Status == configv1.ConditionTrue {
-				fmt.Fprintf(o.Out, "info: An upgrade is in progress. %s\n", c.Message)
-			} else {
-				fmt.Fprintln(o.Out, c.Message)
-			}
-		} else {
-			fmt.Fprintln(o.ErrOut, "warning: No current status info, see `oc describe clusterversion` for more details")
-		}
-		fmt.Fprintln(o.Out)
-
-		if len(cv.Status.AvailableUpdates) > 0 {
-			fmt.Fprintf(o.Out, "Updates:\n\n")
-			w := tabwriter.NewWriter(o.Out, 0, 2, 1, ' ', 0)
-			fmt.Fprintf(w, "VERSION\tIMAGE\n")
-			// TODO: add metadata about version
-			for _, update := range cv.Status.AvailableUpdates {
-				fmt.Fprintf(w, "%s\t%s\n", update.Version, update.Image)
-			}
-			w.Flush()
-			if c := findCondition(cv.Status.Conditions, configv1.RetrievedUpdates); c != nil && c.Status == configv1.ConditionFalse {
-				fmt.Fprintf(o.ErrOut, "warning: Cannot refresh available updates:\n  Reason: %s\n  Message: %s\n\n", c.Reason, c.Message)
-			}
-		} else {
-			if c := findCondition(cv.Status.Conditions, configv1.RetrievedUpdates); c != nil && c.Status == configv1.ConditionFalse {
-				fmt.Fprintf(o.ErrOut, "warning: Cannot display available updates:\n  Reason: %s\n  Message: %s\n\n", c.Reason, c.Message)
-			} else {
-				fmt.Fprintf(o.Out, "No updates available. You may force an upgrade to a specific release image, but doing so may not be supported and result in downtime or data loss.\n")
-			}
-		}
-
-		// TODO: print previous versions
-	}
-
-	return nil
-}
-
-func errorList(errs []error) string {
-	if len(errs) == 1 {
-		return errs[0].Error()
-	}
-	buf := &bytes.Buffer{}
-	fmt.Fprintf(buf, "\n\n")
-	for _, err := range errs {
-		fmt.Fprintf(buf, "* %v\n", err)
-	}
-	return buf.String()
-}
-
-func updateVersionString(update configv1.Update) string {
-	if len(update.Version) > 0 {
-		return update.Version
-	}
-	if len(update.Image) > 0 {
-		return update.Image
-	}
-	return ""
-}
-
-func stringArrContains(arr []string, s string) bool {
-	for _, item := range arr {
-		if item == s {
-			return true
-		}
-	}
-	return false
-}
-
-func writeTabSection(out io.Writer, fn func(w io.Writer)) {
-	w := tabwriter.NewWriter(out, 0, 4, 1, ' ', 0)
-	fn(w)
-	w.Flush()
-}
-
-func updateIsEquivalent(a, b configv1.Update) bool {
-	switch {
-	case len(a.Image) > 0 && len(b.Image) > 0:
-		return a.Image == b.Image
-	case len(a.Version) > 0 && len(b.Version) > 0:
-		return a.Version == b.Version
-	default:
-		return false
-	}
-}
-
-// sortSemanticVersions sorts the input slice in increasing order.
-func sortSemanticVersions(versions []configv1.Update) {
-	sort.Slice(versions, func(i, j int) bool {
-		a, errA := semver.Parse(versions[i].Version)
-		b, errB := semver.Parse(versions[j].Version)
-		if errA == nil && errB != nil {
-			return false
-		}
-		if errB == nil && errA != nil {
-			return true
-		}
-		if errA != nil && errB != nil {
-			return versions[i].Version < versions[j].Version
-		}
-		return a.LT(b)
-	})
-}
-
-func versionStrings(updates []configv1.Update) []string {
-	var arr []string
-	for _, update := range updates {
-		arr = append(arr, update.Version)
-	}
-	return arr
-}
-
-func findCondition(conditions []configv1.ClusterOperatorStatusCondition, name configv1.ClusterStatusConditionType) *configv1.ClusterOperatorStatusCondition {
-	for i := range conditions {
-		if conditions[i].Type == name {
-			return &conditions[i]
-		}
-	}
-	return nil
-}
-
-func checkForUpgrade(cv *configv1.ClusterVersion) error {
-	if c := findCondition(cv.Status.Conditions, "Invalid"); c != nil && c.Status == configv1.ConditionTrue {
-		return fmt.Errorf("The cluster version object is invalid, you must correct the invalid state first.\n\n  Reason: %s\n  Message: %s\n\n", c.Reason, c.Message)
-	}
-	if c := findCondition(cv.Status.Conditions, configv1.OperatorDegraded); c != nil && c.Status == configv1.ConditionTrue {
-		return fmt.Errorf("The cluster is experiencing an upgrade-blocking error, use --force to upgrade anyway.\n\n  Reason: %s\n  Message: %s\n\n", c.Reason, c.Message)
-	}
-	if c := findCondition(cv.Status.Conditions, configv1.OperatorProgressing); c != nil && c.Status == configv1.ConditionTrue {
-		return fmt.Errorf("Already upgrading, pass --force to override.\n\n  Reason: %s\n  Message: %s\n\n", c.Reason, c.Message)
-	}
-	return nil
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/upgrade/upgrade_test.go b/vendor/github.com/openshift/oc/pkg/cli/admin/upgrade/upgrade_test.go
deleted file mode 100644
index 7fd2c5fa2a86..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/upgrade/upgrade_test.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package upgrade
-
-import (
-	"math/rand"
-	"reflect"
-	"testing"
-
-	configv1 "github.com/openshift/api/config/v1"
-)
-
-func TestSortSemanticVersions(t *testing.T) {
-	expected := []configv1.Update{
-		{Version: "not-sem-ver-1"},
-		{Version: "not-sem-ver-2"},
-		{Version: "2.0.0"},
-		{Version: "2.0.1"},
-		{Version: "10.0.0"},
-	}
-
-	actual := make([]configv1.Update, len(expected))
-	for i, j := range rand.Perm(len(expected)) {
-		actual[i] = expected[j]
-	}
-
-	sortSemanticVersions(actual)
-	if !reflect.DeepEqual(actual, expected) {
-		t.Errorf("%v != %v", actual, expected)
-	}
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/verifyimagesignature/manifest.go b/vendor/github.com/openshift/oc/pkg/cli/admin/verifyimagesignature/manifest.go
deleted file mode 100644
index e0b0c1650407..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/verifyimagesignature/manifest.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package verifyimagesignature
-
-import (
-	"context"
-	"net/http"
-	"net/url"
-
-	godigest "github.com/opencontainers/go-digest"
-
-	"k8s.io/client-go/rest"
-
-	"github.com/openshift/library-go/pkg/image/registryclient"
-)
-
-// getImageManifestByIDFromRegistry retrieves the image manifest from the registry using the basic
-// authentication using the image ID.
-func getImageManifestByIDFromRegistry(registry *url.URL, repositoryName, imageID, username, password string, insecure bool) ([]byte, error) {
-	ctx := context.Background()
-
-	credentials := registryclient.NewBasicCredentials()
-	credentials.Add(registry, username, password)
-
-	insecureRT, err := rest.TransportFor(&rest.Config{TLSClientConfig: rest.TLSClientConfig{Insecure: true}})
-	if err != nil {
-		return nil, err
-	}
-
-	repo, err := registryclient.NewContext(http.DefaultTransport, insecureRT).
-		WithCredentials(credentials).
-		Repository(ctx, registry, repositoryName, insecure)
-	if err != nil {
-		return nil, err
-	}
-
-	manifests, err := repo.Manifests(ctx, nil)
-	if err != nil {
-		return nil, err
-	}
-
-	manifest, err := manifests.Get(ctx, godigest.Digest(imageID))
-	if err != nil {
-		return nil, err
-	}
-	_, manifestPayload, err := manifest.Payload()
-	if err != nil {
-		return nil, err
-	}
-
-	return manifestPayload, nil
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/admin/verifyimagesignature/verify-signature.go b/vendor/github.com/openshift/oc/pkg/cli/admin/verifyimagesignature/verify-signature.go
deleted file mode 100644
index 219ce4c212c1..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/admin/verifyimagesignature/verify-signature.go
+++ /dev/null
@@ -1,443 +0,0 @@
-package verifyimagesignature
-
-import (
-	"context"
-	"errors"
-	"fmt"
-	"io/ioutil"
-	"net/url"
-	"os"
-	"path/filepath"
-	"strings"
-
-	"github.com/containers/image/docker/policyconfiguration"
-	"github.com/containers/image/docker/reference"
-	"github.com/containers/image/signature"
-	sigtypes "github.com/containers/image/types"
-	"github.com/spf13/cobra"
-
-	corev1 "k8s.io/api/core/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-
-	imagev1 "github.com/openshift/api/image/v1"
-	imagev1typedclient "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1"
-	userv1typedclient "github.com/openshift/client-go/user/clientset/versioned/typed/user/v1"
-	imageref "github.com/openshift/library-go/pkg/image/reference"
-)
-
-var (
-	verifyImageSignatureLongDesc = templates.LongDesc(`
-	Verifies the image signature of an image imported to internal registry using the local public GPG key.
-
-	This command verifies if the image identity contained in the image signature can be trusted
-	by using the public GPG key to verify the signature itself and matching the provided expected identity
-	with the identity (pull spec) of the given image.
-	By default, this command will use the public GPG keyring located in "$GNUPGHOME/.gnupg/pubring.gpg"
-
-	By default, this command will not save the result of the verification back to the image object, to do so
-	user have to specify the "--save" flag. Note that to modify the image signature verification status,
-	user have to have permissions to edit an image object (usually an "image-auditor" role).
-
-	Note that using the "--save" flag on already verified image together with invalid GPG
-	key or invalid expected identity will cause the saved verification status to be removed
-	and the image will become "unverified".
-
-	If this command is outside the cluster, users have to specify the "--registry-url" parameter
-	with the public URL of image registry.
-
-	To remove all verifications, users can use the "--remove-all" flag.
-	`)
-
-	verifyImageSignatureExample = templates.Examples(`
-	# Verify the image signature and identity using the local GPG keychain
-	%[1]s sha256:c841e9b64e4579bd56c794bdd7c36e1c257110fd2404bebbb8b613e4935228c4 \
-			--expected-identity=registry.local:5000/foo/bar:v1
-
-	# Verify the image signature and identity using the local GPG keychain and save the status
-	%[1]s sha256:c841e9b64e4579bd56c794bdd7c36e1c257110fd2404bebbb8b613e4935228c4 \
-			--expected-identity=registry.local:5000/foo/bar:v1 --save
-
-	# Verify the image signature and identity via exposed registry route
-	%[1]s sha256:c841e9b64e4579bd56c794bdd7c36e1c257110fd2404bebbb8b613e4935228c4 \
-			--expected-identity=registry.local:5000/foo/bar:v1 \
-			--registry-url=docker-registry.foo.com
-
-	# Remove all signature verifications from the image
-	%[1]s sha256:c841e9b64e4579bd56c794bdd7c36e1c257110fd2404bebbb8b613e4935228c4 --remove-all
-	`)
-)
-
-const (
-	VerifyRecommendedName = "verify-image-signature"
-)
-
-type VerifyImageSignatureOptions struct {
-	InputImage        string
-	ExpectedIdentity  string
-	PublicKeyFilename string
-	PublicKey         []byte
-	Save              bool
-	RemoveAll         bool
-	CurrentUser       string
-	CurrentUserToken  string
-	RegistryURL       string
-	Insecure          bool
-
-	ImageClient imagev1typedclient.ImageV1Interface
-
-	genericclioptions.IOStreams
-}
-
-func NewVerifyImageSignatureOptions(streams genericclioptions.IOStreams) *VerifyImageSignatureOptions {
-	return &VerifyImageSignatureOptions{
-		// TODO: This improves the error message users get when containers/image is not able
-		// to locate the pubring.gpg file (which is default).
-		// This should be improved/fixed in containers/image.
-		PublicKeyFilename: filepath.Join(os.Getenv("GNUPGHOME"), "pubring.gpg"),
-		IOStreams:         streams,
-	}
-}
-
-func NewCmdVerifyImageSignature(name, fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	o := NewVerifyImageSignatureOptions(streams)
-	cmd := &cobra.Command{
-		Use:     fmt.Sprintf("%s IMAGE --expected-identity=EXPECTED_IDENTITY [--save]", VerifyRecommendedName),
-		Short:   "Verify the image identity contained in the image signature",
-		Long:    verifyImageSignatureLongDesc,
-		Example: fmt.Sprintf(verifyImageSignatureExample, fullName),
-		Run: func(cmd *cobra.Command, args []string) {
-			kcmdutil.CheckErr(o.Validate())
-			kcmdutil.CheckErr(o.Complete(f, cmd, args))
-			kcmdutil.CheckErr(o.Run())
-		},
-	}
-
-	cmd.Flags().StringVar(&o.ExpectedIdentity, "expected-identity", o.ExpectedIdentity, "An expected image docker reference to verify (required).")
-	cmd.Flags().BoolVar(&o.Save, "save", o.Save, "If true, the result of the verification will be saved to an image object.")
-	cmd.Flags().BoolVar(&o.RemoveAll, "remove-all", o.RemoveAll, "If set, all signature verifications will be removed from the given image.")
-	cmd.Flags().StringVar(&o.PublicKeyFilename, "public-key", o.PublicKeyFilename, fmt.Sprintf("A path to a public GPG key to be used for verification. (defaults to %q)", o.PublicKeyFilename))
-	cmd.Flags().StringVar(&o.RegistryURL, "registry-url", o.RegistryURL, "The address to use when contacting the registry, instead of using the internal cluster address. This is useful if you can't resolve or reach the internal registry address.")
-	cmd.Flags().BoolVar(&o.Insecure, "insecure", o.Insecure, "If set, use the insecure protocol for registry communication.")
-	return cmd
-}
-
-func (o *VerifyImageSignatureOptions) Validate() error {
-	if !o.RemoveAll {
-		if len(o.ExpectedIdentity) == 0 {
-			return errors.New("the --expected-identity is required")
-		}
-		if _, err := imageref.Parse(o.ExpectedIdentity); err != nil {
-			return errors.New("the --expected-identity must be valid image reference")
-		}
-	}
-	if o.RemoveAll && len(o.ExpectedIdentity) > 0 {
-		return errors.New("the --expected-identity cannot be used when removing all verifications")
-	}
-	return nil
-}
-func (o *VerifyImageSignatureOptions) Complete(f kcmdutil.Factory, cmd *cobra.Command, args []string) error {
-	if len(args) != 1 {
-		return kcmdutil.UsageErrorf(cmd, "exactly one image must be specified")
-	}
-	o.InputImage = args[0]
-	var err error
-
-	if len(o.PublicKeyFilename) > 0 {
-		if o.PublicKey, err = ioutil.ReadFile(o.PublicKeyFilename); err != nil {
-			return fmt.Errorf("unable to read --public-key: %v", err)
-		}
-	}
-	clientConfig, err := f.ToRESTConfig()
-	if err != nil {
-		return err
-	}
-	o.ImageClient, err = imagev1typedclient.NewForConfig(clientConfig)
-	if err != nil {
-		return err
-	}
-
-	userClient, err := userv1typedclient.NewForConfig(clientConfig)
-	if err != nil {
-		return err
-	}
-
-	// We need the current user name so we can record it into an verification condition and
-	// we need a bearer token so we can fetch the manifest from the registry.
-	// TODO: Add support for external registries (currently only integrated registry will
-	if me, err := userClient.Users().Get("~", metav1.GetOptions{}); err != nil {
-		return err
-	} else {
-		o.CurrentUser = me.Name
-
-		if config, err := f.ToRESTConfig(); err != nil {
-			return err
-		} else {
-			if o.CurrentUserToken = config.BearerToken; len(o.CurrentUserToken) == 0 {
-				return fmt.Errorf("no token is currently in use for this session")
-			}
-		}
-	}
-
-	return nil
-}
-
-func (o VerifyImageSignatureOptions) Run() error {
-	img, err := o.ImageClient.Images().Get(o.InputImage, metav1.GetOptions{})
-	if err != nil {
-		return err
-	}
-	if len(img.Signatures) == 0 {
-		return fmt.Errorf("%s does not have any signature", img.Name)
-	}
-
-	pr, err := signature.NewPRSignedByKeyPath(signature.SBKeyTypeGPGKeys, o.PublicKeyFilename, signature.NewPRMMatchRepoDigestOrExact())
-	if err != nil {
-		return fmt.Errorf("unable to prepare verification policy requirements: %v", err)
-	}
-	policy := signature.Policy{Default: []signature.PolicyRequirement{pr}}
-	pc, err := signature.NewPolicyContext(&policy)
-	if err != nil {
-		return fmt.Errorf("unable to setup policy: %v", err)
-	}
-	defer pc.Destroy()
-
-	if o.RemoveAll {
-		img.Signatures = []imagev1.ImageSignature{}
-	}
-
-	for i, s := range img.Signatures {
-		// Verify the signature against the policy
-		signedBy, err := o.verifySignature(pc, img, s.Content)
-		if err != nil {
-			fmt.Fprintf(o.ErrOut, "error verifying signature %s for image %s (verification status will be removed): %v\n", img.Signatures[i].Name, o.InputImage, err)
-			img.Signatures[i] = imagev1.ImageSignature{}
-			continue
-		}
-		fmt.Fprintf(o.Out, "image %q identity is now confirmed (signed by GPG key %q)\n", o.InputImage, signedBy)
-
-		now := metav1.Now()
-		newConditions := []imagev1.SignatureCondition{
-			{
-				Type:               "Trusted",
-				Status:             corev1.ConditionTrue,
-				LastProbeTime:      now,
-				LastTransitionTime: now,
-				Reason:             "manually verified",
-				Message:            fmt.Sprintf("verified by user %q", o.CurrentUser),
-			},
-			// TODO: This should be not needed (need to relax validation).
-			{
-				Type:               "ForImage",
-				Status:             corev1.ConditionTrue,
-				LastProbeTime:      now,
-				LastTransitionTime: now,
-			},
-		}
-		img.Signatures[i].Conditions = newConditions
-		img.Signatures[i].IssuedBy = &imagev1.SignatureIssuer{}
-		// TODO: This should not be just a key id but a human-readable identity.
-		img.Signatures[i].IssuedBy.CommonName = signedBy
-	}
-
-	if o.Save || o.RemoveAll {
-		_, err := o.ImageClient.Images().Update(img)
-		return err
-	} else {
-		fmt.Fprintf(o.Out, "Neither --save nor --remove-all were passed, image %q not updated to %v\n", o.InputImage, img)
-	}
-	return nil
-}
-
-// getImageManifest fetches the manifest for provided image from the integrated registry.
-func (o *VerifyImageSignatureOptions) getImageManifest(img *imagev1.Image) ([]byte, error) {
-	parsed, err := imageref.Parse(img.DockerImageReference)
-	if err != nil {
-		return nil, err
-	}
-	// TODO(juanvallejo): Add missing methods to DockerImageReference object in library-go helper
-	registryURL := parsed.RegistryURL()
-	if len(o.RegistryURL) > 0 {
-		registryURL = &url.URL{Host: o.RegistryURL, Scheme: "https"}
-		if o.Insecure {
-			registryURL.Scheme = ""
-		}
-	}
-	return getImageManifestByIDFromRegistry(registryURL, parsed.RepositoryName(), img.Name, o.CurrentUser, o.CurrentUserToken, o.Insecure)
-}
-
-// verifySignature takes policy, image and the image signature blob and verifies that the
-// signature was signed by a trusted key, the expected identity matches the one in the
-// signature message and the manifest matches as well.
-// In case the image identity is confirmed, this function returns the matching GPG key in
-// short form, otherwise it returns rejection reason.
-func (o *VerifyImageSignatureOptions) verifySignature(pc *signature.PolicyContext, img *imagev1.Image, sigBlob []byte) (string, error) {
-	manifest, err := o.getImageManifest(img)
-	if err != nil {
-		return "", fmt.Errorf("failed to get image %q manifest: %v", img.Name, err)
-	}
-	allowed, err := pc.IsRunningImageAllowed(newUnparsedImage(o.ExpectedIdentity, sigBlob, manifest))
-	if !allowed && err == nil {
-		return "", errors.New("signature rejected but no error set")
-	}
-	if err != nil {
-		return "", fmt.Errorf("signature rejected: %v", err)
-	}
-	if untrustedInfo, err := signature.GetUntrustedSignatureInformationWithoutVerifying(sigBlob); err != nil {
-		// Tis is treated as an unverified signature. It really shouldn’t happen anyway.
-		return "", fmt.Errorf("error getting signing key identity: %v", err)
-	} else {
-		return untrustedInfo.UntrustedShortKeyIdentifier, nil
-	}
-}
-
-// dummyDockerTransport is containers/image/docker.Transport, except that it only provides identity information.
-var dummyDockerTransport = dockerTransport{}
-
-type dockerTransport struct{}
-
-func (t dockerTransport) Name() string {
-	return "docker"
-}
-
-// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference.
-func (t dockerTransport) ParseReference(reference string) (sigtypes.ImageReference, error) {
-	return parseDockerReference(reference)
-}
-
-// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
-// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
-// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion.
-// scope passed to this function will not be "", that value is always allowed.
-func (t dockerTransport) ValidatePolicyConfigurationScope(scope string) error {
-	// FIXME? We could be verifying the various character set and length restrictions
-	// from docker/distribution/reference.regexp.go, but other than that there
-	// are few semantically invalid strings.
-	return nil
-}
-
-// dummyDockerReference is containers/image/docker.Reference, except that only provides identity information.
-type dummyDockerReference struct{ ref reference.Named }
-
-// parseDockerReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference.
-func parseDockerReference(refString string) (sigtypes.ImageReference, error) {
-	if !strings.HasPrefix(refString, "//") {
-		return nil, fmt.Errorf("docker: image reference %s does not start with //", refString)
-	}
-	ref, err := reference.ParseNormalizedNamed(strings.TrimPrefix(refString, "//"))
-	if err != nil {
-		return nil, err
-	}
-	ref = reference.TagNameOnly(ref)
-
-	if reference.IsNameOnly(ref) {
-		return nil, fmt.Errorf("Docker reference %s has neither a tag nor a digest", reference.FamiliarString(ref))
-	}
-	// A github.com/distribution/reference value can have a tag and a digest at the same time!
-	// The docker/distribution API does not really support that (we can’t ask for an image with a specific
-	// tag and digest), so fail.  This MAY be accepted in the future.
-	// (Even if it were supported, the semantics of policy namespaces are unclear - should we drop
-	// the tag or the digest first?)
-	_, isTagged := ref.(reference.NamedTagged)
-	_, isDigested := ref.(reference.Canonical)
-	if isTagged && isDigested {
-		return nil, fmt.Errorf("Docker references with both a tag and digest are currently not supported")
-	}
-	return dummyDockerReference{
-		ref: ref,
-	}, nil
-}
-
-func (ref dummyDockerReference) Transport() sigtypes.ImageTransport {
-	return dummyDockerTransport
-}
-
-// StringWithinTransport returns a string representation of the reference, which MUST be such that
-// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
-// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
-// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
-// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
-func (ref dummyDockerReference) StringWithinTransport() string {
-	return "//" + reference.FamiliarString(ref.ref)
-}
-
-// DockerReference returns a Docker reference associated with this reference
-// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent,
-// not e.g. after redirect or alias processing), or nil if unknown/not applicable.
-func (ref dummyDockerReference) DockerReference() reference.Named {
-	return ref.ref
-}
-
-// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup.
-// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases;
-// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical
-// (i.e. various references with exactly the same semantics should return the same configuration identity)
-// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but
-// not required/guaranteed that it will be a valid input to Transport().ParseReference().
-// Returns "" if configuration identities for these references are not supported.
-func (ref dummyDockerReference) PolicyConfigurationIdentity() string {
-	res, err := policyconfiguration.DockerReferenceIdentity(ref.ref)
-	if res == "" || err != nil { // Coverage: Should never happen, NewReference above should refuse values which could cause a failure.
-		panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err))
-	}
-	return res
-}
-
-// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search
-// for if explicit configuration for PolicyConfigurationIdentity() is not set.  The list will be processed
-// in order, terminating on first match, and an implicit "" is always checked at the end.
-// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(),
-// and each following element to be a prefix of the element preceding it.
-func (ref dummyDockerReference) PolicyConfigurationNamespaces() []string {
-	return policyconfiguration.DockerReferenceNamespaces(ref.ref)
-}
-
-func (ref dummyDockerReference) NewImage(ctx *sigtypes.SystemContext) (sigtypes.Image, error) {
-	panic("Unimplemented")
-}
-func (ref dummyDockerReference) NewImageSource(ctx *sigtypes.SystemContext, requestedManifestMIMETypes []string) (sigtypes.ImageSource, error) {
-	panic("Unimplemented")
-}
-func (ref dummyDockerReference) NewImageDestination(ctx *sigtypes.SystemContext) (sigtypes.ImageDestination, error) {
-	panic("Unimplemented")
-}
-func (ref dummyDockerReference) DeleteImage(ctx *sigtypes.SystemContext) error {
-	panic("Unimplemented")
-}
-
-// unparsedImage implements sigtypes.UnparsedImage, to allow evaluating the signature policy
-// against an image without having to make it pullable by containers/image
-type unparsedImage struct {
-	ref       sigtypes.ImageReference
-	manifest  []byte
-	signature []byte
-}
-
-func newUnparsedImage(expectedIdentity string, signature, manifest []byte) sigtypes.UnparsedImage {
-	// We check the error in Validate()
-	ref, _ := parseDockerReference("//" + expectedIdentity)
-	return &unparsedImage{ref: ref, manifest: manifest, signature: signature}
-}
-
-// Reference returns the reference used to set up this source, _as specified by the user_
-// (not as the image itself, or its underlying storage, claims).  This can be used e.g. to determine which public keys are trusted for this image.
-func (ui *unparsedImage) Reference() sigtypes.ImageReference {
-	return ui.ref
-}
-
-// Close removes resources associated with an initialized UnparsedImage, if any.
-func (ui *unparsedImage) Close() error {
-	return nil
-}
-
-// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need.
-func (ui *unparsedImage) Manifest() ([]byte, string, error) {
-	return ui.manifest, "", nil
-}
-
-// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need.
-func (ui *unparsedImage) Signatures(context.Context) ([][]byte, error) {
-	return [][]byte{ui.signature}, nil
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/buildlogs/buildlogs.go b/vendor/github.com/openshift/oc/pkg/cli/buildlogs/buildlogs.go
deleted file mode 100644
index 7fef96a0ba3b..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/buildlogs/buildlogs.go
+++ /dev/null
@@ -1,122 +0,0 @@
-package buildlogs
-
-import (
-	"fmt"
-	"io"
-	"net/http"
-
-	"github.com/spf13/cobra"
-
-	"k8s.io/apimachinery/pkg/api/errors"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/scheme"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-
-	buildv1 "github.com/openshift/api/build/v1"
-	buildclient "github.com/openshift/client-go/build/clientset/versioned"
-	"github.com/openshift/oc/pkg/cli/logs"
-	buildmanualclient "github.com/openshift/oc/pkg/helpers/build/client/v1"
-)
-
-var (
-	buildLogsLong = templates.LongDesc(`
-		Retrieve logs for a build
-
-		This command displays the log for the provided build. If the pod that ran the build has been deleted logs
-		will no longer be available. If the build has not yet completed, the build logs will be streamed until the
-		build completes or fails.`)
-
-	buildLogsExample = templates.Examples(`
-		# Stream logs from container
-  	%[1]s build-logs 566bed879d2d`)
-)
-
-type BuildLogsOptions struct {
-	Follow bool
-	NoWait bool
-
-	Name        string
-	Namespace   string
-	BuildClient buildclient.Interface
-
-	genericclioptions.IOStreams
-}
-
-func NewBuildLogsOptions(streams genericclioptions.IOStreams) *BuildLogsOptions {
-	return &BuildLogsOptions{
-		IOStreams: streams,
-		Follow:    true,
-	}
-}
-
-// NewCmdBuildLogs implements the OpenShift cli build-logs command
-func NewCmdBuildLogs(fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	o := NewBuildLogsOptions(streams)
-	cmd := &cobra.Command{
-		Use:        "build-logs BUILD",
-		Short:      "Show logs from a build",
-		Long:       buildLogsLong,
-		Example:    fmt.Sprintf(buildLogsExample, fullName),
-		Deprecated: fmt.Sprintf("use oc %v build/", logs.LogsRecommendedCommandName),
-		Hidden:     true,
-		Run: func(cmd *cobra.Command, args []string) {
-			kcmdutil.CheckErr(o.Complete(f, cmd, args))
-			kcmdutil.CheckErr(o.RunBuildLogs())
-		},
-	}
-	cmd.Flags().BoolVarP(&o.Follow, "follow", "f", o.Follow, "Specify whether logs should be followed; default is true.")
-	cmd.Flags().BoolVarP(&o.NoWait, "nowait", "w", o.NoWait, "Specify whether to return immediately without waiting for logs to be available; default is false.")
-
-	return cmd
-}
-
-func (o *BuildLogsOptions) Complete(f kcmdutil.Factory, cmd *cobra.Command, args []string) error {
-	if len(args) != 1 {
-		return fmt.Errorf("build name is required")
-	}
-	o.Name = args[0]
-
-	var err error
-	o.Namespace, _, err = f.ToRawKubeConfigLoader().Namespace()
-	if err != nil {
-		return err
-	}
-
-	clientConfig, err := f.ToRESTConfig()
-	if err != nil {
-		return err
-	}
-	o.BuildClient, err = buildclient.NewForConfig(clientConfig)
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-// RunBuildLogs contains all the necessary functionality for the OpenShift cli build-logs command
-func (o *BuildLogsOptions) RunBuildLogs() error {
-	opts := buildv1.BuildLogOptions{
-		Follow: o.Follow,
-		NoWait: o.NoWait,
-	}
-	readCloser, err := buildmanualclient.NewBuildLogClient(o.BuildClient.BuildV1().RESTClient(), o.Namespace, scheme.Scheme).Logs(o.Name, opts).Stream()
-	if err != nil {
-		return err
-	}
-	defer readCloser.Close()
-
-	_, err = io.Copy(o.Out, readCloser)
-	if err, ok := err.(errors.APIStatus); ok {
-		if err.Status().Code == http.StatusNotFound {
-			switch err.Status().Details.Kind {
-			case "build":
-				return fmt.Errorf("the build %s could not be found, therefore build logs cannot be retrieved", err.Status().Details.Name)
-			case "pod":
-				return fmt.Errorf("the pod %s for build %s could not be found, therefore build logs cannot be retrieved", err.Status().Details.Name, o.Name)
-			}
-		}
-	}
-	return err
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/cancelbuild/OWNERS b/vendor/github.com/openshift/oc/pkg/cli/cancelbuild/OWNERS
deleted file mode 100644
index 75caac287c99..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/cancelbuild/OWNERS
+++ /dev/null
@@ -1,6 +0,0 @@
-reviewers:
-  - adambkaplan
-  - gmontero
-  - coreydaley
-approvers:
-  - adambkaplan
diff --git a/vendor/github.com/openshift/oc/pkg/cli/cancelbuild/cancelbuild.go b/vendor/github.com/openshift/oc/pkg/cli/cancelbuild/cancelbuild.go
deleted file mode 100644
index e19cdce0a476..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/cancelbuild/cancelbuild.go
+++ /dev/null
@@ -1,371 +0,0 @@
-package cancelbuild
-
-import (
-	"errors"
-	"fmt"
-	"strings"
-	"sync"
-	"time"
-
-	"github.com/spf13/cobra"
-
-	kapierrors "k8s.io/apimachinery/pkg/api/errors"
-	"k8s.io/apimachinery/pkg/api/meta"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/labels"
-	"k8s.io/apimachinery/pkg/util/validation"
-	"k8s.io/apimachinery/pkg/util/wait"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	"k8s.io/cli-runtime/pkg/printers"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/scheme"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-
-	"github.com/openshift/api/build"
-	buildv1 "github.com/openshift/api/build/v1"
-	buildtv1client "github.com/openshift/client-go/build/clientset/versioned/typed/build/v1"
-	ocbuildutil "github.com/openshift/oc/pkg/helpers/build"
-	buildclientv1 "github.com/openshift/oc/pkg/helpers/build/client/v1"
-	cmdutil "github.com/openshift/oc/pkg/helpers/cmd"
-)
-
-// CancelBuildRecommendedCommandName is the recommended command name.
-const CancelBuildRecommendedCommandName = "cancel-build"
-
-var (
-	cancelBuildLong = templates.LongDesc(`
-		Cancel running, pending, or new builds
-
-		This command requests a graceful shutdown of the build. There may be a delay between requesting
-		the build and the time the build is terminated.`)
-
-	cancelBuildExample = templates.Examples(`
-	  # Cancel the build with the given name
-	  %[1]s %[2]s ruby-build-2
-
-	  # Cancel the named build and print the build logs
-	  %[1]s %[2]s ruby-build-2 --dump-logs
-
-	  # Cancel the named build and create a new one with the same parameters
-	  %[1]s %[2]s ruby-build-2 --restart
-
-	  # Cancel multiple builds
-	  %[1]s %[2]s ruby-build-1 ruby-build-2 ruby-build-3
-
-	  # Cancel all builds created from 'ruby-build' build configuration that are in 'new' state
-	  %[1]s %[2]s bc/ruby-build --state=new`)
-)
-
-// CancelBuildOptions contains all the options for running the CancelBuild cli command.
-type CancelBuildOptions struct {
-	DumpLogs   bool
-	Restart    bool
-	States     []string
-	Namespace  string
-	BuildNames []string
-
-	HasError                bool
-	ReportError             func(error)
-	PrinterCancel           printers.ResourcePrinter
-	PrinterCancelInProgress printers.ResourcePrinter
-	PrinterRestart          printers.ResourcePrinter
-	Mapper                  meta.RESTMapper
-	Client                  buildtv1client.BuildV1Interface
-	BuildClient             buildtv1client.BuildInterface
-
-	// timeout is used by unit tests to shorten the polling period
-	timeout time.Duration
-
-	genericclioptions.IOStreams
-}
-
-func NewCancelBuildOptions(streams genericclioptions.IOStreams) *CancelBuildOptions {
-	return &CancelBuildOptions{
-		IOStreams: streams,
-		States:    []string{"new", "pending", "running"},
-	}
-}
-
-// NewCmdCancelBuild implements the OpenShift cli cancel-build command
-func NewCmdCancelBuild(name, baseName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	o := NewCancelBuildOptions(streams)
-	cmd := &cobra.Command{
-		Use:        fmt.Sprintf("%s (BUILD | BUILDCONFIG)", name),
-		Short:      "Cancel running, pending, or new builds",
-		Long:       cancelBuildLong,
-		Example:    fmt.Sprintf(cancelBuildExample, baseName, name),
-		SuggestFor: []string{"builds", "stop-build"},
-		Run: func(cmd *cobra.Command, args []string) {
-			kcmdutil.CheckErr(o.Complete(f, cmd, args))
-			kcmdutil.CheckErr(o.Validate())
-			kcmdutil.CheckErr(o.RunCancelBuild())
-		},
-	}
-
-	cmd.Flags().StringSliceVar(&o.States, "state", o.States, "Only cancel builds in this state")
-	cmd.Flags().BoolVar(&o.DumpLogs, "dump-logs", o.DumpLogs, "Specify if the build logs for the cancelled build should be shown.")
-	cmd.Flags().BoolVar(&o.Restart, "restart", o.Restart, "Specify if a new build should be created after the current build is cancelled.")
-
-	return cmd
-}
-
-// Complete completes all the required options.
-func (o *CancelBuildOptions) Complete(f kcmdutil.Factory, cmd *cobra.Command, args []string) error {
-	if len(args) == 0 {
-		return fmt.Errorf("build or a buildconfig name is required")
-	}
-
-	o.ReportError = func(err error) {
-		o.HasError = true
-		fmt.Fprintf(o.ErrOut, "error: %s\n", err.Error())
-	}
-
-	var err error
-	o.PrinterCancel, err = printers.NewTypeSetter(scheme.Scheme).
-		WrapToPrinter(&printers.NamePrinter{Operation: "cancelled"}, nil)
-	if err != nil {
-		return err
-	}
-	o.PrinterRestart, err = printers.NewTypeSetter(scheme.Scheme).
-		WrapToPrinter(&printers.NamePrinter{Operation: "restarted"}, nil)
-	if err != nil {
-		return err
-	}
-	o.PrinterCancelInProgress, err = printers.NewTypeSetter(scheme.Scheme).
-		WrapToPrinter(&printers.NamePrinter{Operation: "marked for cancellation, waiting to be cancelled"}, nil)
-	if err != nil {
-		return err
-	}
-
-	if o.timeout.Seconds() == 0 {
-		o.timeout = 30 * time.Second
-	}
-
-	o.Namespace, _, err = f.ToRawKubeConfigLoader().Namespace()
-	if err != nil {
-		return err
-	}
-
-	config, err := f.ToRESTConfig()
-	if err != nil {
-		return err
-	}
-
-	o.Client, err = buildtv1client.NewForConfig(config)
-	if err != nil {
-		return err
-	}
-
-	o.BuildClient = o.Client.Builds(o.Namespace)
-	o.Mapper, err = f.ToRESTMapper()
-	if err != nil {
-		return err
-	}
-
-	for _, item := range args {
-		resource, name, err := cmdutil.ResolveResource(build.Resource("builds"), item, o.Mapper)
-		if err != nil {
-			return err
-		}
-
-		switch resource {
-		case build.Resource("buildconfigs"):
-			list, err := buildConfigBuilds(o.Client, o.Namespace, name, nil)
-			if err != nil {
-				return err
-			}
-			for _, b := range list {
-				o.BuildNames = append(o.BuildNames, b.Name)
-			}
-		case build.Resource("builds"):
-			o.BuildNames = append(o.BuildNames, strings.TrimSpace(name))
-		default:
-			return fmt.Errorf("invalid resource provided: %v", resource)
-		}
-	}
-
-	return nil
-}
-
-type buildFilter func(*buildv1.Build) bool
-
-// buildConfigSelector returns a label Selector which can be used to find all
-// builds for a BuildConfig.
-func buildConfigSelector(name string) labels.Selector {
-	return labels.Set{buildv1.BuildConfigLabel: labelValue(name)}.AsSelector()
-}
-
-// labelValue returns a string to use as a value for the Build
-// label in a pod. If the length of the string parameter exceeds
-// the maximum label length, the value will be truncated.
-func labelValue(name string) string {
-	if len(name) <= validation.DNS1123LabelMaxLength {
-		return name
-	}
-	return name[:validation.DNS1123LabelMaxLength]
-}
-
-// BuildConfigBuilds return a list of builds for the given build config.
-// Optionally you can specify a filter function to select only builds that
-// matches your criteria.
-func buildConfigBuilds(c buildtv1client.BuildsGetter, namespace, name string, filterFunc buildFilter) ([]*buildv1.Build, error) {
-	result, err := c.Builds(namespace).List(metav1.ListOptions{LabelSelector: buildConfigSelector(name).String()})
-	if err != nil {
-		return nil, err
-	}
-	builds := make([]*buildv1.Build, len(result.Items))
-	for i := range result.Items {
-		builds[i] = &result.Items[i]
-	}
-	if filterFunc == nil {
-		return builds, nil
-	}
-	var filteredList []*buildv1.Build
-	for _, b := range builds {
-		if filterFunc(b) {
-			filteredList = append(filteredList, b)
-		}
-	}
-	return filteredList, nil
-}
-
-func (o *CancelBuildOptions) Validate() error {
-	for _, state := range o.States {
-		if len(state) > 0 && !isStateCancellable(state) {
-			return fmt.Errorf("invalid --state flag value, must be one of 'new', 'pending', or 'running'")
-		}
-	}
-
-	return nil
-}
-
-// RunCancelBuild implements all the necessary functionality for CancelBuild.
-func (o *CancelBuildOptions) RunCancelBuild() error {
-	var builds []*buildv1.Build
-	for _, name := range o.BuildNames {
-		build, err := o.BuildClient.Get(name, metav1.GetOptions{})
-		if err != nil {
-			o.ReportError(fmt.Errorf("build %s/%s not found", o.Namespace, name))
-			continue
-		}
-
-		stateMatch := false
-		for _, state := range o.States {
-			if strings.ToLower(string(build.Status.Phase)) == state {
-				stateMatch = true
-				break
-			}
-		}
-
-		if stateMatch && !ocbuildutil.IsTerminalPhase(build.Status.Phase) {
-			builds = append(builds, build)
-		}
-	}
-
-	if o.DumpLogs {
-		for _, b := range builds {
-			// Do not attempt to get logs from build that was not scheduled.
-			if b.Status.Phase == buildv1.BuildPhaseNew {
-				continue
-			}
-			logClient := buildclientv1.NewBuildLogClient(o.Client.RESTClient(), o.Namespace, scheme.Scheme)
-			opts := buildv1.BuildLogOptions{NoWait: true}
-			response, err := logClient.Logs(b.Name, opts).Do().Raw()
-			if err != nil {
-				o.ReportError(fmt.Errorf("unable to fetch logs for %s/%s: %v", b.Namespace, b.Name, err))
-				continue
-			}
-			fmt.Fprintf(o.Out, "==== Build %s/%s logs ====\n", b.Namespace, b.Name)
-			fmt.Fprint(o.Out, string(response))
-		}
-	}
-
-	var wg sync.WaitGroup
-	for _, b := range builds {
-		wg.Add(1)
-		go func(build *buildv1.Build) {
-			defer wg.Done()
-			err := wait.Poll(500*time.Millisecond, o.timeout, func() (bool, error) {
-				build.Status.Cancelled = true
-				_, err := o.BuildClient.Update(build)
-				switch {
-				case err == nil:
-					return true, nil
-				case kapierrors.IsConflict(err):
-					build, err = o.BuildClient.Get(build.Name, metav1.GetOptions{})
-					return false, err
-				}
-				return true, err
-			})
-			if err != nil {
-				o.ReportError(fmt.Errorf("build %s/%s failed to update: %v", build.Namespace, build.Name, err))
-				return
-			}
-
-			// ignore exit if error here; the phase verfication below is more important
-			o.PrinterCancelInProgress.PrintObj(build, o.Out)
-
-			// Make sure the build phase is really cancelled.
-			timeout := o.timeout
-			if build.Spec.Strategy.JenkinsPipelineStrategy != nil {
-				//bump the timeout in case we have to wait for Jenkins
-				//to come up so that the sync plugin can actually change
-				//the phase
-				timeout = timeout + (3 * time.Minute)
-			}
-			err = wait.Poll(500*time.Millisecond, timeout, func() (bool, error) {
-				updatedBuild, err := o.BuildClient.Get(build.Name, metav1.GetOptions{})
-				if err != nil {
-					return true, err
-				}
-				return updatedBuild.Status.Phase == buildv1.BuildPhaseCancelled, nil
-			})
-			if err != nil {
-				o.ReportError(fmt.Errorf("build %s/%s failed to cancel: %v", build.Namespace, build.Name, err))
-				return
-			}
-
-			if err := o.PrinterCancel.PrintObj(build, o.Out); err != nil {
-				o.ReportError(fmt.Errorf("build %s/%s failed to print: %v", build.Namespace, build.Name, err))
-				return
-			}
-		}(b)
-	}
-	wg.Wait()
-
-	if o.Restart {
-		for _, b := range builds {
-			request := &buildv1.BuildRequest{ObjectMeta: metav1.ObjectMeta{Namespace: b.Namespace, Name: b.Name}}
-			build, err := o.BuildClient.Clone(request.Name, request)
-			if err != nil {
-				o.ReportError(fmt.Errorf("build %s/%s failed to restart: %v", b.Namespace, b.Name, err))
-				continue
-			}
-			if err := o.PrinterRestart.PrintObj(b, o.Out); err != nil {
-				o.ReportError(fmt.Errorf("build %s/%s failed to print: %v", build.Namespace, build.Name, err))
-				continue
-			}
-		}
-	}
-
-	if o.HasError {
-		return errors.New("failure during the build cancellation")
-	}
-
-	return nil
-}
-
-// isStateCancellable validates the state provided by the '--state' flag.
-func isStateCancellable(state string) bool {
-	cancellablePhases := []string{
-		string(buildv1.BuildPhaseNew),
-		string(buildv1.BuildPhasePending),
-		string(buildv1.BuildPhaseRunning),
-	}
-	for _, p := range cancellablePhases {
-		if state == strings.ToLower(p) {
-			return true
-		}
-	}
-	return false
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/cancelbuild/cancelbuild_test.go b/vendor/github.com/openshift/oc/pkg/cli/cancelbuild/cancelbuild_test.go
deleted file mode 100644
index 510e13757a57..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/cancelbuild/cancelbuild_test.go
+++ /dev/null
@@ -1,227 +0,0 @@
-package cancelbuild
-
-import (
-	"io"
-	"strconv"
-	"strings"
-	"testing"
-	"time"
-
-	"k8s.io/apimachinery/pkg/api/apitesting"
-	"k8s.io/apimachinery/pkg/api/meta/testrestmapper"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/runtime"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	clientgotesting "k8s.io/client-go/testing"
-
-	"github.com/openshift/api"
-	buildv1 "github.com/openshift/api/build/v1"
-	buildfake "github.com/openshift/client-go/build/clientset/versioned/fake"
-)
-
-// TestCancelBuildDefaultFlags ensures that flags default values are set.
-func TestCancelBuildDefaultFlags(t *testing.T) {
-	o := NewCancelBuildOptions(genericclioptions.NewTestIOStreamsDiscard())
-
-	tests := map[string]struct {
-		flagName   string
-		defaultVal string
-	}{
-		"state": {
-			flagName:   "state",
-			defaultVal: "[" + strings.Join(o.States, ",") + "]",
-		},
-		"dump-logs": {
-			flagName:   "dump-logs",
-			defaultVal: strconv.FormatBool(o.DumpLogs),
-		},
-		"restart": {
-			flagName:   "restart",
-			defaultVal: strconv.FormatBool(o.Restart),
-		},
-	}
-
-	cmd := NewCmdCancelBuild("oc", CancelBuildRecommendedCommandName, nil, genericclioptions.NewTestIOStreamsDiscard())
-
-	for _, v := range tests {
-		f := cmd.Flag(v.flagName)
-		if f == nil {
-			t.Fatalf("expected flag %s to be registered but found none", v.flagName)
-		}
-
-		if f.DefValue != v.defaultVal {
-			t.Errorf("expected default value of %s for %s but found %s", v.defaultVal, v.flagName, f.DefValue)
-		}
-	}
-}
-
-// TestCancelBuildRun ensures that RunCancelBuild command calls the right actions.
-func TestCancelBuildRun(t *testing.T) {
-	tests := map[string]struct {
-		opts            *CancelBuildOptions
-		phase           buildv1.BuildPhase
-		expectedActions []testAction
-		expectedErr     error
-	}{
-		"cancelled": {
-			opts: &CancelBuildOptions{
-				PrinterCancel:           &discardingPrinter{},
-				PrinterCancelInProgress: &discardingPrinter{},
-				PrinterRestart:          &discardingPrinter{},
-				IOStreams:               genericclioptions.NewTestIOStreamsDiscard(),
-				Namespace:               "test",
-				States:                  []string{"new", "pending", "running"},
-			},
-			phase: buildv1.BuildPhaseCancelled,
-			expectedActions: []testAction{
-				{verb: "get", resource: "builds"},
-			},
-			expectedErr: nil,
-		},
-		"complete": {
-			opts: &CancelBuildOptions{
-				PrinterCancel:           &discardingPrinter{},
-				PrinterCancelInProgress: &discardingPrinter{},
-				PrinterRestart:          &discardingPrinter{},
-				IOStreams:               genericclioptions.NewTestIOStreamsDiscard(),
-				Namespace:               "test",
-			},
-			phase: buildv1.BuildPhaseComplete,
-			expectedActions: []testAction{
-				{verb: "get", resource: "builds"},
-			},
-			expectedErr: nil,
-		},
-		"new": {
-			opts: &CancelBuildOptions{
-				PrinterCancel:           &discardingPrinter{},
-				PrinterCancelInProgress: &discardingPrinter{},
-				PrinterRestart:          &discardingPrinter{},
-				IOStreams:               genericclioptions.NewTestIOStreamsDiscard(),
-				Namespace:               "test",
-			},
-			phase: buildv1.BuildPhaseNew,
-			expectedActions: []testAction{
-				{verb: "get", resource: "builds"},
-				{verb: "update", resource: "builds"},
-				{verb: "get", resource: "builds"},
-			},
-			expectedErr: nil,
-		},
-		"pending": {
-			opts: &CancelBuildOptions{
-				PrinterCancel:           &discardingPrinter{},
-				PrinterCancelInProgress: &discardingPrinter{},
-				PrinterRestart:          &discardingPrinter{},
-				IOStreams:               genericclioptions.NewTestIOStreamsDiscard(),
-				Namespace:               "test",
-			},
-			phase: buildv1.BuildPhaseNew,
-			expectedActions: []testAction{
-				{verb: "get", resource: "builds"},
-				{verb: "update", resource: "builds"},
-				{verb: "get", resource: "builds"},
-			},
-			expectedErr: nil,
-		},
-		"running and restart": {
-			opts: &CancelBuildOptions{
-				PrinterCancel:           &discardingPrinter{},
-				PrinterCancelInProgress: &discardingPrinter{},
-				PrinterRestart:          &discardingPrinter{},
-				IOStreams:               genericclioptions.NewTestIOStreamsDiscard(),
-				Namespace:               "test",
-				Restart:                 true,
-			},
-			phase: buildv1.BuildPhaseNew,
-			expectedActions: []testAction{
-				{verb: "get", resource: "builds"},
-				{verb: "update", resource: "builds"},
-				{verb: "get", resource: "builds"},
-				{verb: "create", resource: "builds"},
-			},
-			expectedErr: nil,
-		},
-	}
-
-	for testName, test := range tests {
-		build := genBuild(test.phase)
-		// FIXME: we have to fake out a BuildRequest so the fake client will let us
-		// pass this test. It considers 'create builds/clone' to be an update on the
-		// main resource (builds), but uses the resource from the clone function,
-		// which is a BuildRequest. It needs to be able to "update"/"get" a
-		// BuildRequest, so we stub one out here.
-		stubbedBuildRequest := &buildv1.BuildRequest{
-			ObjectMeta: metav1.ObjectMeta{
-				Namespace: test.opts.Namespace,
-				Name:      build.Name,
-			},
-		}
-		client := buildfake.NewSimpleClientset(build, stubbedBuildRequest)
-		client.PrependReactor("get", "builds", func(action clientgotesting.Action) (handled bool, ret runtime.Object, err error) {
-			return true, build, nil
-		})
-		client.PrependReactor("update", "builds", func(action clientgotesting.Action) (handled bool, ret runtime.Object, err error) {
-			if build.Status.Cancelled == true {
-				build.Status.Phase = buildv1.BuildPhaseCancelled
-			}
-			return false, build, nil
-		})
-		client.PrependReactor("create", "builds", func(action clientgotesting.Action) (handled bool, ret runtime.Object, err error) {
-			if action.GetSubresource() != "clone" {
-				return false, nil, nil
-			}
-			return true, build, nil
-		})
-
-		test.opts.timeout = 1 * time.Second
-		test.opts.Client = client.BuildV1()
-		test.opts.BuildClient = client.BuildV1().Builds(test.opts.Namespace)
-		test.opts.ReportError = func(err error) {
-			test.opts.HasError = true
-			t.Logf("got error: %v", err)
-		}
-		scheme, _ := apitesting.SchemeForOrDie(api.Install)
-		test.opts.Mapper = testrestmapper.TestOnlyStaticRESTMapper(scheme)
-		test.opts.BuildNames = []string{"ruby-ex"}
-		test.opts.States = []string{"new", "pending", "running"}
-
-		if err := test.opts.RunCancelBuild(); err != test.expectedErr {
-			t.Fatalf("%s: error mismatch: expected %v, got %v", testName, test.expectedErr, err)
-		}
-
-		got := client.Actions()
-		if len(test.expectedActions) != len(got) {
-			t.Fatalf("%s: action length mismatch: expected %d, got %d", testName, len(test.expectedActions), len(got))
-		}
-
-		for i, action := range test.expectedActions {
-			if !got[i].Matches(action.verb, action.resource) {
-				t.Errorf("%s: action mismatch: expected %s %s, got %s %s", testName, action.verb, action.resource, got[i].GetVerb(), got[i].GetResource())
-			}
-		}
-	}
-}
-
-type discardingPrinter struct{}
-
-func (*discardingPrinter) PrintObj(runtime.Object, io.Writer) error {
-	return nil
-}
-
-func genBuild(phase buildv1.BuildPhase) *buildv1.Build {
-	build := buildv1.Build{
-		ObjectMeta: metav1.ObjectMeta{
-			Name:      "ruby-ex",
-			Namespace: "test",
-		},
-		Status: buildv1.BuildStatus{
-			Phase: phase,
-		},
-	}
-	return &build
-}
-
-type testAction struct {
-	verb, resource string
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/cli.go b/vendor/github.com/openshift/oc/pkg/cli/cli.go
deleted file mode 100644
index f5343dad0db5..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/cli.go
+++ /dev/null
@@ -1,405 +0,0 @@
-package cli
-
-import (
-	"flag"
-	"fmt"
-	"io"
-	"os"
-	"runtime"
-	"strings"
-
-	"github.com/MakeNowJust/heredoc"
-	"github.com/spf13/cobra"
-
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	kubecmd "k8s.io/kubernetes/pkg/kubectl/cmd"
-	"k8s.io/kubernetes/pkg/kubectl/cmd/diff"
-	"k8s.io/kubernetes/pkg/kubectl/cmd/kustomize"
-	"k8s.io/kubernetes/pkg/kubectl/cmd/plugin"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	ktemplates "k8s.io/kubernetes/pkg/kubectl/util/templates"
-
-	"github.com/openshift/oc/pkg/cli/admin"
-	"github.com/openshift/oc/pkg/cli/admin/buildchain"
-	"github.com/openshift/oc/pkg/cli/admin/groups/sync"
-	"github.com/openshift/oc/pkg/cli/buildlogs"
-	"github.com/openshift/oc/pkg/cli/cancelbuild"
-	"github.com/openshift/oc/pkg/cli/debug"
-	"github.com/openshift/oc/pkg/cli/deployer"
-	"github.com/openshift/oc/pkg/cli/experimental/dockergc"
-	"github.com/openshift/oc/pkg/cli/expose"
-	"github.com/openshift/oc/pkg/cli/extract"
-	"github.com/openshift/oc/pkg/cli/idle"
-	"github.com/openshift/oc/pkg/cli/image"
-	"github.com/openshift/oc/pkg/cli/importimage"
-	"github.com/openshift/oc/pkg/cli/kubectlwrappers"
-	"github.com/openshift/oc/pkg/cli/login"
-	"github.com/openshift/oc/pkg/cli/logout"
-	"github.com/openshift/oc/pkg/cli/logs"
-	"github.com/openshift/oc/pkg/cli/newapp"
-	"github.com/openshift/oc/pkg/cli/newbuild"
-	"github.com/openshift/oc/pkg/cli/observe"
-	"github.com/openshift/oc/pkg/cli/options"
-	"github.com/openshift/oc/pkg/cli/policy"
-	"github.com/openshift/oc/pkg/cli/process"
-	"github.com/openshift/oc/pkg/cli/project"
-	"github.com/openshift/oc/pkg/cli/projects"
-	"github.com/openshift/oc/pkg/cli/recycle"
-	"github.com/openshift/oc/pkg/cli/registry"
-	"github.com/openshift/oc/pkg/cli/requestproject"
-	"github.com/openshift/oc/pkg/cli/rollback"
-	"github.com/openshift/oc/pkg/cli/rollout"
-	"github.com/openshift/oc/pkg/cli/rsh"
-	"github.com/openshift/oc/pkg/cli/rsync"
-	"github.com/openshift/oc/pkg/cli/secrets"
-	"github.com/openshift/oc/pkg/cli/serviceaccounts"
-	"github.com/openshift/oc/pkg/cli/set"
-	"github.com/openshift/oc/pkg/cli/startbuild"
-	"github.com/openshift/oc/pkg/cli/status"
-	"github.com/openshift/oc/pkg/cli/tag"
-	"github.com/openshift/oc/pkg/cli/version"
-	"github.com/openshift/oc/pkg/cli/whoami"
-	cmdutil "github.com/openshift/oc/pkg/helpers/cmd"
-	"github.com/openshift/oc/pkg/helpers/term"
-)
-
-const productName = `OpenShift`
-
-var (
-	cliLong = heredoc.Doc(`
-    ` + productName + ` Client
-
-    This client helps you develop, build, deploy, and run your applications on any
-    OpenShift or Kubernetes cluster. It also includes the administrative
-    commands for managing a cluster under the 'adm' subcommand.`)
-
-	cliExplain = heredoc.Doc(`
-    To familiarize yourself with OpenShift, login to your cluster and try creating a sample application:
-
-        %[1]s login mycluster.mycompany.com
-        %[1]s new-project my-example
-        %[1]s new-app django-psql-example
-        %[1]s logs -f bc/django-psql-example
-
-    To see what has been created, run:
-
-        %[1]s status
-
-    and get a command shell inside one of the created containers with:
-
-        %[1]s rsh dc/postgresql
-
-    To see the list of available toolchains for building applications, run:
-
-        %[1]s new-app -L
-
-    Since OpenShift runs on top of Kubernetes, your favorite kubectl commands are also present in oc,
-    allowing you to quickly switch between development and debugging. You can also run kubectl directly
-    against any OpenShift cluster using the kubeconfig file created by 'oc login'.
-
-    For more on OpenShift, see the documentation at https://docs.openshift.com.
-
-    To see the full list of commands supported, run '%[1]s --help'.`)
-)
-
-func NewDefaultOcCommand(name, fullName string, in io.Reader, out, errout io.Writer) *cobra.Command {
-	cmd := NewOcCommand(name, fullName, in, out, errout)
-
-	if len(os.Args) <= 1 {
-		return cmd
-	}
-
-	cmdPathPieces := os.Args[1:]
-	pluginHandler := kubecmd.NewDefaultPluginHandler(plugin.ValidPluginFilenamePrefixes)
-
-	// only look for suitable extension executables if
-	// the specified command does not already exist
-	if _, _, err := cmd.Find(cmdPathPieces); err != nil {
-		if err := kubecmd.HandlePluginCommand(pluginHandler, cmdPathPieces); err != nil {
-			fmt.Fprintf(errout, "%v\n", err)
-			os.Exit(1)
-		}
-	}
-
-	return cmd
-}
-
-func NewOcCommand(name, fullName string, in io.Reader, out, errout io.Writer) *cobra.Command {
-	// Main command
-	cmds := &cobra.Command{
-		Use:   name,
-		Short: "Command line tools for managing applications",
-		Long:  cliLong,
-		Run: func(c *cobra.Command, args []string) {
-			explainOut := term.NewResponsiveWriter(out)
-			c.SetOutput(explainOut)
-			kcmdutil.RequireNoArguments(c, args)
-			fmt.Fprintf(explainOut, "%s\n\n%s\n", cliLong, fmt.Sprintf(cliExplain, fullName))
-		},
-		BashCompletionFunction: bashCompletionFunc,
-	}
-
-	kubeConfigFlags := genericclioptions.NewConfigFlags(true)
-	kubeConfigFlags.AddFlags(cmds.PersistentFlags())
-	matchVersionKubeConfigFlags := kcmdutil.NewMatchVersionFlags(kubeConfigFlags)
-	matchVersionKubeConfigFlags.AddFlags(cmds.PersistentFlags())
-	cmds.PersistentFlags().AddGoFlagSet(flag.CommandLine)
-	f := kcmdutil.NewFactory(matchVersionKubeConfigFlags)
-
-	ioStreams := genericclioptions.IOStreams{In: in, Out: out, ErrOut: errout}
-
-	loginCmd := login.NewCmdLogin(fullName, f, ioStreams)
-	secretcmds := secrets.NewCmdSecrets(secrets.SecretsRecommendedName, fullName+" "+secrets.SecretsRecommendedName, f, ioStreams)
-
-	groups := ktemplates.CommandGroups{
-		{
-			Message: "Basic Commands:",
-			Commands: []*cobra.Command{
-				loginCmd,
-				requestproject.NewCmdRequestProject(fullName, f, ioStreams),
-				newapp.NewCmdNewApplication(newapp.NewAppRecommendedCommandName, fullName, f, ioStreams),
-				status.NewCmdStatus(status.StatusRecommendedName, fullName, fullName+" "+status.StatusRecommendedName, f, ioStreams),
-				project.NewCmdProject(fullName, f, ioStreams),
-				projects.NewCmdProjects(fullName, f, ioStreams),
-				kubectlwrappers.NewCmdExplain(fullName, f, ioStreams),
-			},
-		},
-		{
-			Message: "Build and Deploy Commands:",
-			Commands: []*cobra.Command{
-				rollout.NewCmdRollout(fullName, f, ioStreams),
-				rollback.NewCmdRollback(fullName, f, ioStreams),
-				newbuild.NewCmdNewBuild(newbuild.NewBuildRecommendedCommandName, fullName, f, ioStreams),
-				startbuild.NewCmdStartBuild(fullName, f, ioStreams),
-				cancelbuild.NewCmdCancelBuild(cancelbuild.CancelBuildRecommendedCommandName, fullName, f, ioStreams),
-				importimage.NewCmdImportImage(fullName, f, ioStreams),
-				tag.NewCmdTag(fullName, f, ioStreams),
-			},
-		},
-		{
-			Message: "Application Management Commands:",
-			Commands: []*cobra.Command{
-				kubectlwrappers.NewCmdCreate(fullName, f, ioStreams),
-				kubectlwrappers.NewCmdApply(fullName, f, ioStreams),
-				kubectlwrappers.NewCmdGet(fullName, f, ioStreams),
-				kubectlwrappers.NewCmdDescribe(fullName, f, ioStreams),
-				kubectlwrappers.NewCmdEdit(fullName, f, ioStreams),
-				set.NewCmdSet(fullName, f, ioStreams),
-				kubectlwrappers.NewCmdLabel(fullName, f, ioStreams),
-				kubectlwrappers.NewCmdAnnotate(fullName, f, ioStreams),
-				expose.NewCmdExpose(fullName, f, ioStreams),
-				kubectlwrappers.NewCmdDelete(fullName, f, ioStreams),
-				kubectlwrappers.NewCmdScale(fullName, f, ioStreams),
-				kubectlwrappers.NewCmdAutoscale(fullName, f, ioStreams),
-				secretcmds,
-				serviceaccounts.NewCmdServiceAccounts(serviceaccounts.ServiceAccountsRecommendedName, fullName+" "+serviceaccounts.ServiceAccountsRecommendedName, f, ioStreams),
-			},
-		},
-		{
-			Message: "Troubleshooting and Debugging Commands:",
-			Commands: []*cobra.Command{
-				logs.NewCmdLogs(logs.LogsRecommendedCommandName, fullName, f, ioStreams),
-				rsh.NewCmdRsh(rsh.RshRecommendedName, fullName, f, ioStreams),
-				rsync.NewCmdRsync(rsync.RsyncRecommendedName, fullName, f, ioStreams),
-				kubectlwrappers.NewCmdPortForward(fullName, f, ioStreams),
-				debug.NewCmdDebug(fullName, f, ioStreams),
-				kubectlwrappers.NewCmdExec(fullName, f, ioStreams),
-				kubectlwrappers.NewCmdProxy(fullName, f, ioStreams),
-				kubectlwrappers.NewCmdAttach(fullName, f, ioStreams),
-				kubectlwrappers.NewCmdRun(fullName, f, ioStreams),
-				kubectlwrappers.NewCmdCp(fullName, f, ioStreams),
-				kubectlwrappers.NewCmdWait(fullName, f, ioStreams),
-			},
-		},
-		{
-			Message: "Advanced Commands:",
-			Commands: []*cobra.Command{
-				admin.NewCommandAdmin("adm", fullName+" "+"adm", f, ioStreams),
-				kubectlwrappers.NewCmdReplace(fullName, f, ioStreams),
-				kubectlwrappers.NewCmdPatch(fullName, f, ioStreams),
-				process.NewCmdProcess(fullName, f, ioStreams),
-				extract.NewCmdExtract(fullName, f, ioStreams),
-				observe.NewCmdObserve(fullName, f, ioStreams),
-				policy.NewCmdPolicy(policy.PolicyRecommendedName, fullName+" "+policy.PolicyRecommendedName, f, ioStreams),
-				kubectlwrappers.NewCmdAuth(fullName, f, ioStreams),
-				kubectlwrappers.NewCmdConvert(fullName, f, ioStreams),
-				image.NewCmdImage(fullName, f, ioStreams),
-				registry.NewCmd(fullName, f, ioStreams),
-				idle.NewCmdIdle(fullName, f, ioStreams),
-				kubectlwrappers.NewCmdApiVersions(fullName, f, ioStreams),
-				kubectlwrappers.NewCmdApiResources(fullName, f, ioStreams),
-				kubectlwrappers.NewCmdClusterInfo(fullName, f, ioStreams),
-				diff.NewCmdDiff(f, ioStreams),
-				kustomize.NewCmdKustomize(ioStreams),
-			},
-		},
-		{
-			Message: "Settings Commands:",
-			Commands: []*cobra.Command{
-				logout.NewCmdLogout("logout", fullName+" logout", fullName+" login", f, ioStreams),
-				kubectlwrappers.NewCmdConfig(fullName, "config", f, ioStreams),
-				whoami.NewCmdWhoAmI(whoami.WhoAmIRecommendedCommandName, fullName+" "+whoami.WhoAmIRecommendedCommandName, f, ioStreams),
-				kubectlwrappers.NewCmdCompletion(fullName, ioStreams),
-			},
-		},
-	}
-	groups.Add(cmds)
-
-	ocEditFullName := fullName + " edit"
-	ocSecretsFullName := fullName + " " + secrets.SecretsRecommendedName
-	ocSecretsNewFullName := ocSecretsFullName + " " + secrets.NewSecretRecommendedCommandName
-
-	filters := []string{
-		"options",
-		"deploy",
-		// These commands are deprecated and should not appear in help
-		moved(fullName, "logs", cmds, buildlogs.NewCmdBuildLogs(fullName, f, ioStreams)),
-		moved(fullName, "secrets link", secretcmds, secrets.NewCmdLinkSecret("add", fullName, f, ioStreams)),
-		moved(fullName, "create secret", secretcmds, secrets.NewCmdCreateSecret(secrets.NewSecretRecommendedCommandName, fullName, f, ioStreams)),
-		moved(fullName, "create secret", secretcmds, secrets.NewCmdCreateDockerConfigSecret(secrets.CreateDockerConfigSecretRecommendedName, fullName, f, ioStreams, ocSecretsNewFullName, ocEditFullName)),
-		moved(fullName, "create secret", secretcmds, secrets.NewCmdCreateBasicAuthSecret(secrets.CreateBasicAuthSecretRecommendedCommandName, fullName, f, ioStreams, ocSecretsNewFullName, ocEditFullName)),
-		moved(fullName, "create secret", secretcmds, secrets.NewCmdCreateSSHAuthSecret(secrets.CreateSSHAuthSecretRecommendedCommandName, fullName, f, ioStreams, ocSecretsNewFullName, ocEditFullName)),
-	}
-
-	changeSharedFlagDefaults(cmds)
-	cmdutil.ActsAsRootCommand(cmds, filters, groups...).
-		ExposeFlags(loginCmd, "certificate-authority", "insecure-skip-tls-verify", "token")
-
-	cmds.AddCommand(newExperimentalCommand("ex", name+" ex", f, ioStreams))
-
-	cmds.AddCommand(kubectlwrappers.NewCmdPlugin(fullName, f, ioStreams))
-	cmds.AddCommand(version.NewCmdVersion(fullName, f, ioStreams))
-	cmds.AddCommand(options.NewCmdOptions(ioStreams))
-
-	if cmds.Flag("namespace") != nil {
-		if cmds.Flag("namespace").Annotations == nil {
-			cmds.Flag("namespace").Annotations = map[string][]string{}
-		}
-		cmds.Flag("namespace").Annotations[cobra.BashCompCustom] = append(
-			cmds.Flag("namespace").Annotations[cobra.BashCompCustom],
-			"__oc_get_namespaces",
-		)
-	}
-
-	return cmds
-}
-
-func moved(fullName, to string, parent, cmd *cobra.Command) string {
-	cmd.Long = fmt.Sprintf("DEPRECATED: This command has been moved to \"%s %s\"", fullName, to)
-	cmd.Short = fmt.Sprintf("DEPRECATED: %s", to)
-	parent.AddCommand(cmd)
-	return cmd.Name()
-}
-
-// changeSharedFlagDefaults changes values of shared flags that we disagree with.  This can't be done in godep code because
-// that would change behavior in our `kubectl` symlink. Defend each change.
-// 1. show-all - the most interesting pods are terminated/failed pods.  We don't want to exclude them from printing
-func changeSharedFlagDefaults(rootCmd *cobra.Command) {
-	cmds := []*cobra.Command{rootCmd}
-
-	for i := 0; i < len(cmds); i++ {
-		currCmd := cmds[i]
-		cmds = append(cmds, currCmd.Commands()...)
-
-		if showAllFlag := currCmd.Flags().Lookup("show-all"); showAllFlag != nil {
-			showAllFlag.DefValue = "true"
-			showAllFlag.Value.Set("true")
-			showAllFlag.Changed = false
-			showAllFlag.Usage = "When printing, show all resources (false means hide terminated pods.)"
-		}
-
-		// we want to disable the --validate flag by default when we're running kube commands from oc.  We want to make sure
-		// that we're only getting the upstream --validate flags, so check both the flag and the usage
-		if validateFlag := currCmd.Flags().Lookup("validate"); (validateFlag != nil) && (validateFlag.Usage == "If true, use a schema to validate the input before sending it") {
-			validateFlag.DefValue = "false"
-			validateFlag.Value.Set("false")
-			validateFlag.Changed = false
-		}
-	}
-}
-
-func newExperimentalCommand(name, fullName string, f kcmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command {
-	experimental := &cobra.Command{
-		Use:   name,
-		Short: "Experimental commands under active development",
-		Long:  "The commands grouped here are under development and may change without notice.",
-		Run: func(c *cobra.Command, args []string) {
-			c.SetOutput(ioStreams.Out)
-			c.Help()
-		},
-		BashCompletionFunction: admin.BashCompletionFunc,
-	}
-
-	experimental.AddCommand(dockergc.NewCmdDockerGCConfig(f, fullName, "dockergc", ioStreams))
-	experimental.AddCommand(buildchain.NewCmdBuildChain(name, fullName+" "+buildchain.BuildChainRecommendedCommandName, f, ioStreams))
-	experimental.AddCommand(options.NewCmdOptions(ioStreams))
-
-	// these groups also live under `oc adm groups {sync,prune}` and are here only for backwards compatibility
-	experimental.AddCommand(sync.NewCmdSync("sync-groups", fullName+" "+"sync-groups", f, ioStreams))
-	experimental.AddCommand(sync.NewCmdPrune("prune-groups", fullName+" "+"prune-groups", f, ioStreams))
-	return experimental
-}
-
-// CommandFor returns the appropriate command for this base name,
-// or the OpenShift CLI command.
-func CommandFor(basename string) *cobra.Command {
-	var cmd *cobra.Command
-
-	in, out, errout := os.Stdin, os.Stdout, os.Stderr
-
-	// Make case-insensitive and strip executable suffix if present
-	if runtime.GOOS == "windows" {
-		basename = strings.ToLower(basename)
-		basename = strings.TrimSuffix(basename, ".exe")
-	}
-
-	switch basename {
-	case "kubectl":
-		cmd = kubecmd.NewDefaultKubectlCommand()
-	case "openshift-deploy":
-		cmd = deployer.NewCommandDeployer(basename)
-	case "openshift-recycle":
-		cmd = recycle.NewCommandRecycle(basename, out)
-	default:
-		shimKubectlForOc()
-		cmd = NewDefaultOcCommand("oc", "oc", in, out, errout)
-
-		// treat oc as a kubectl plugin
-		if strings.HasPrefix(basename, "kubectl-") {
-			args := strings.Split(strings.TrimPrefix(basename, "kubectl-"), "-")
-
-			// the plugin mechanism interprets "_" as dashes. Convert any "_" our basename
-			// might have in order to find the appropriate command in the `oc` tree.
-			for i := range args {
-				args[i] = strings.Replace(args[i], "_", "-", -1)
-			}
-
-			if targetCmd, _, err := cmd.Find(args); targetCmd != nil && err == nil {
-				// since cobra refuses to execute a child command, executing its root
-				// any time Execute() is called, we must create a completely new command
-				// and "deep copy" the targetCmd information to it.
-				newParent := &cobra.Command{
-					Use:     targetCmd.Use,
-					Short:   targetCmd.Short,
-					Long:    targetCmd.Long,
-					Example: targetCmd.Example,
-					Run:     targetCmd.Run,
-				}
-
-				// copy flags
-				newParent.Flags().AddFlagSet(cmd.Flags())
-				newParent.Flags().AddFlagSet(targetCmd.Flags())
-				newParent.PersistentFlags().AddFlagSet(targetCmd.PersistentFlags())
-
-				// copy subcommands
-				newParent.AddCommand(targetCmd.Commands()...)
-				cmd = newParent
-			}
-		}
-	}
-
-	if cmd.UsageFunc() == nil {
-		cmdutil.ActsAsRootCommand(cmd, []string{"options"})
-	}
-	return cmd
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/cli_bashcomp_func.go b/vendor/github.com/openshift/oc/pkg/cli/cli_bashcomp_func.go
deleted file mode 100644
index 9dc320e6f93a..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/cli_bashcomp_func.go
+++ /dev/null
@@ -1,154 +0,0 @@
-package cli
-
-const (
-	bashCompletionFunc = `# call oc get $1,
-__oc_override_flag_list=(config cluster user context namespace server)
-__oc_override_flags()
-{
-    local ${__oc_override_flag_list[*]} two_word_of of
-    for w in "${words[@]}"; do
-        if [ -n "${two_word_of}" ]; then
-            eval "${two_word_of}=\"--${two_word_of}=\${w}\""
-            two_word_of=
-            continue
-        fi
-        for of in "${__oc_override_flag_list[@]}"; do
-            case "${w}" in
-                --${of}=*)
-                    eval "${of}=\"${w}\""
-                    ;;
-                --${of})
-                    two_word_of="${of}"
-                    ;;
-            esac
-        done
-    done
-    for of in "${__oc_override_flag_list[@]}"; do
-        if eval "test -n \"\$${of}\""; then
-            eval "echo \${${of}}"
-        fi
-    done
-}
-__oc_parse_get()
-{
-
-    local template
-    template="{{ range .items  }}{{ .metadata.name }} {{ end }}"
-    local oc_out
-    if oc_out=$(oc get $(__oc_override_flags) -o template --template="${template}" "$1" 2>/dev/null); then
-        COMPREPLY=( $( compgen -W "${oc_out[*]}" -- "$cur" ) )
-    fi
-}
-
-__oc_get_namespaces()
-{
-    local template oc_out
-    template="{{ range .items  }}{{ .metadata.name }} {{ end }}"
-    if oc_out=$(oc get -o template --template="${template}" namespace 2>/dev/null); then
-        COMPREPLY=( $( compgen -W "${oc_out[*]}" -- "$cur" ) )
-    fi
-}
-
-__oc_get_resource()
-{
-    if [[ ${#nouns[@]} -eq 0 ]]; then
-      local oc_out
-      if oc_out=$(oc api-resources $(__oc_override_flags) -o name --cached --request-timeout=5s --verbs=get 2>/dev/null); then
-          COMPREPLY=( $( compgen -W "${oc_out[*]}" -- "$cur" ) )
-          return 0
-      fi
-      return 1
-    fi
-    __oc_parse_get "${nouns[${#nouns[@]} -1]}"
-}
-
-# $1 is the name of the pod we want to get the list of containers inside
-__oc_get_containers()
-{
-    local template
-    template="{{ range .spec.containers  }}{{ .name }} {{ end }}"
-    __oc_debug "${FUNCNAME} nouns are ${nouns[@]}"
-
-    local len="${#nouns[@]}"
-    if [[ ${len} -ne 1 ]]; then
-        return
-    fi
-    local last=${nouns[${len} -1]}
-    local oc_out
-    if oc_out=$(oc get -o template --template="${template}" pods "${last}" 2>/dev/null); then
-        COMPREPLY=( $( compgen -W "${oc_out[*]}" -- "$cur" ) )
-    fi
-}
-
-# Require both a pod and a container to be specified
-__oc_require_pod_and_container()
-{
-    if [[ ${#nouns[@]} -eq 0 ]]; then
-        __oc_parse_get pods
-        return 0
-    fi;
-    __oc_get_containers
-    return 0
-}
-
-__custom_func() {
-    case ${last_command} in
- 
-        # first arg is the kind according to ValidArgs, second is resource name
-        oc_get | oc_describe | oc_delete | oc_label | oc_expose | oc_export | oc_patch | oc_annotate | oc_edit | oc_scale | oc_autoscale | oc_observe )
-            __oc_get_resource
-            return
-            ;;
-
-        # first arg is a pod name
-        oc_rsh | oc_exec | oc_port-forward | oc_attach)
-            if [[ ${#nouns[@]} -eq 0 ]]; then
-                __oc_parse_get pods
-            fi;
-            return
-            ;;
- 
-        # first arg is a pod name, second is a container name
-        oc_logs)
-            __oc_require_pod_and_container
-            return
-            ;;
- 
-        # first arg is a build config name
-        oc_start-build | oc_cancel-build)
-            if [[ ${#nouns[@]} -eq 0 ]]; then
-                __oc_parse_get buildconfigs
-            fi;
-            return
-            ;;
- 
-        # first arg is a deployment config OR deployment
-        oc_rollback)
-            if [[ ${#nouns[@]} -eq 0 ]]; then
-                __oc_parse_get deploymentconfigs,replicationcontrollers
-            fi;
-            return
-            ;;
-
-        # first arg is a project name
-        oc_project)
-            if [[ ${#nouns[@]} -eq 0 ]]; then
-                __oc_parse_get projects
-            fi;
-            return
-            ;;
- 
-        # first arg is an image stream
-        oc_import-image)
-            if [[ ${#nouns[@]} -eq 0 ]]; then
-                __oc_parse_get imagestreams
-            fi;
-            return
-            ;;
- 
-        *)
-            ;;
-    esac
-}
-`
-)
diff --git a/vendor/github.com/openshift/oc/pkg/cli/create/clusterquota.go b/vendor/github.com/openshift/oc/pkg/cli/create/clusterquota.go
deleted file mode 100644
index bcad4424bb4f..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/create/clusterquota.go
+++ /dev/null
@@ -1,163 +0,0 @@
-package create
-
-import (
-	"encoding/csv"
-	"fmt"
-	"strings"
-
-	"github.com/spf13/cobra"
-
-	corev1 "k8s.io/api/core/v1"
-	"k8s.io/apimachinery/pkg/api/resource"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-
-	quotav1 "github.com/openshift/api/quota/v1"
-	quotav1client "github.com/openshift/client-go/quota/clientset/versioned/typed/quota/v1"
-)
-
-const ClusterQuotaRecommendedName = "clusterresourcequota"
-
-var (
-	clusterQuotaLong = templates.LongDesc(`
-		Create a cluster resource quota that controls certain resources.
-
-		Cluster resource quota objects defined quota restrictions that span multiple projects based on label selectors.`)
-
-	clusterQuotaExample = templates.Examples(`
-		# Create a cluster resource quota limited to 10 pods
-  	%[1]s limit-bob --project-annotation-selector=openshift.io/requester=user-bob --hard=pods=10`)
-)
-
-type CreateClusterQuotaOptions struct {
-	CreateSubcommandOptions *CreateSubcommandOptions
-
-	LabelSelectorStr      string
-	AnnotationSelectorStr string
-	Hard                  []string
-
-	ProjectLabelSelectorStr      *metav1.LabelSelector
-	ProjectAnnotationSelectorStr map[string]string
-
-	Client quotav1client.ClusterResourceQuotasGetter
-}
-
-func NewCreateClusterQuotaOptions(streams genericclioptions.IOStreams) *CreateClusterQuotaOptions {
-	return &CreateClusterQuotaOptions{
-		CreateSubcommandOptions: NewCreateSubcommandOptions(streams),
-	}
-}
-
-// NewCmdCreateClusterQuota is a macro command to create a new cluster quota.
-func NewCmdCreateClusterQuota(name, fullName string, f genericclioptions.RESTClientGetter, streams genericclioptions.IOStreams) *cobra.Command {
-	o := NewCreateClusterQuotaOptions(streams)
-	cmd := &cobra.Command{
-		Use:     name + " NAME --project-label-selector=key=value [--hard=RESOURCE=QUANTITY]...",
-		Short:   "Create cluster resource quota resource.",
-		Long:    clusterQuotaLong,
-		Example: fmt.Sprintf(clusterQuotaExample, fullName),
-		Run: func(cmd *cobra.Command, args []string) {
-			cmdutil.CheckErr(o.Complete(cmd, f, args))
-			cmdutil.CheckErr(o.Run())
-		},
-		Aliases: []string{"clusterquota"},
-	}
-	cmd.Flags().StringVar(&o.LabelSelectorStr, "project-label-selector", o.LabelSelectorStr, "The project label selector for the cluster resource quota")
-	cmd.Flags().StringVar(&o.AnnotationSelectorStr, "project-annotation-selector", o.AnnotationSelectorStr, "The project annotation selector for the cluster resource quota")
-	cmd.Flags().StringSliceVar(&o.Hard, "hard", o.Hard, "The resource to constrain: RESOURCE=QUANTITY (pods=10)")
-
-	o.CreateSubcommandOptions.PrintFlags.AddFlags(cmd)
-	cmdutil.AddDryRunFlag(cmd)
-
-	return cmd
-}
-
-func (o *CreateClusterQuotaOptions) Complete(cmd *cobra.Command, f genericclioptions.RESTClientGetter, args []string) error {
-	var err error
-	if len(o.LabelSelectorStr) > 0 {
-		o.ProjectLabelSelectorStr, err = metav1.ParseToLabelSelector(o.LabelSelectorStr)
-		if err != nil {
-			return err
-		}
-	}
-
-	o.ProjectAnnotationSelectorStr, err = parseAnnotationSelector(o.AnnotationSelectorStr)
-	if err != nil {
-		return err
-	}
-
-	clientConfig, err := f.ToRESTConfig()
-	if err != nil {
-		return err
-	}
-	o.Client, err = quotav1client.NewForConfig(clientConfig)
-	if err != nil {
-		return err
-	}
-
-	return o.CreateSubcommandOptions.Complete(f, cmd, args)
-}
-
-func (o *CreateClusterQuotaOptions) Run() error {
-	clusterQuota := "av1.ClusterResourceQuota{
-		// this is ok because we know exactly how we want to be serialized
-		TypeMeta:   metav1.TypeMeta{APIVersion: quotav1.SchemeGroupVersion.String(), Kind: "ClusterResourceQuota"},
-		ObjectMeta: metav1.ObjectMeta{Name: o.CreateSubcommandOptions.Name},
-		Spec: quotav1.ClusterResourceQuotaSpec{
-			Selector: quotav1.ClusterResourceQuotaSelector{
-				LabelSelector:      o.ProjectLabelSelectorStr,
-				AnnotationSelector: o.ProjectAnnotationSelectorStr,
-			},
-			Quota: corev1.ResourceQuotaSpec{
-				Hard: corev1.ResourceList{},
-			},
-		},
-	}
-
-	for _, resourceCount := range o.Hard {
-		tokens := strings.Split(resourceCount, "=")
-		if len(tokens) != 2 {
-			return fmt.Errorf("%v must in the form of resource=quantity", resourceCount)
-		}
-		quantity, err := resource.ParseQuantity(tokens[1])
-		if err != nil {
-			return err
-		}
-		clusterQuota.Spec.Quota.Hard[corev1.ResourceName(tokens[0])] = quantity
-	}
-
-	if !o.CreateSubcommandOptions.DryRun {
-		var err error
-		clusterQuota, err = o.Client.ClusterResourceQuotas().Create(clusterQuota)
-		if err != nil {
-			return err
-		}
-	}
-
-	return o.CreateSubcommandOptions.Printer.PrintObj(clusterQuota, o.CreateSubcommandOptions.Out)
-}
-
-// parseAnnotationSelector just parses key=value,key=value=...,
-// further validation is left to be done server-side.
-func parseAnnotationSelector(s string) (map[string]string, error) {
-	if len(s) == 0 {
-		return nil, nil
-	}
-	stringReader := strings.NewReader(s)
-	csvReader := csv.NewReader(stringReader)
-	annotations, err := csvReader.Read()
-	if err != nil {
-		return nil, err
-	}
-	parsed := map[string]string{}
-	for _, annotation := range annotations {
-		parts := strings.SplitN(annotation, "=", 2)
-		if len(parts) != 2 {
-			return nil, fmt.Errorf("Malformed annotation selector, expected %q: %s", "key=value", annotation)
-		}
-		parsed[parts[0]] = parts[1]
-	}
-	return parsed, nil
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/create/clusterquota_test.go b/vendor/github.com/openshift/oc/pkg/cli/create/clusterquota_test.go
deleted file mode 100644
index a0f33eaae7ba..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/create/clusterquota_test.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package create
-
-import (
-	"reflect"
-	"testing"
-)
-
-func TestParseAnnotationSelector(t *testing.T) {
-	tests := []struct {
-		input         string
-		parsed        map[string]string
-		errorExpected bool
-	}{
-		{
-			input:  "",
-			parsed: nil,
-		},
-		{
-			input: "foo=bar",
-			parsed: map[string]string{
-				"foo": "bar",
-			},
-		},
-		{
-			input: "deads=deads,liggitt=liggitt",
-			parsed: map[string]string{
-				"deads":   "deads",
-				"liggitt": "liggitt",
-			},
-		},
-		{
-			input:         "liggitt=liggitt,deadliggitt",
-			errorExpected: true,
-		},
-		{
-			input: `"us=deads,liggitt,ffranz"`,
-			parsed: map[string]string{
-				"us": "deads,liggitt,ffranz",
-			},
-		},
-		{
-			input: `"us=deads,liggitt,ffranz",deadliggitt=deadliggitt`,
-			parsed: map[string]string{
-				"us":          "deads,liggitt,ffranz",
-				"deadliggitt": "deadliggitt",
-			},
-		},
-	}
-	for _, test := range tests {
-		parsed, err := parseAnnotationSelector(test.input)
-		if err != nil {
-			if !test.errorExpected {
-				t.Fatalf("unexpected error: %s", err)
-			}
-			continue
-		}
-		if test.errorExpected {
-			t.Fatalf("expected error, got a parsed output: %q", parsed)
-		}
-		if !reflect.DeepEqual(parsed, test.parsed) {
-			t.Error("expected parsed annotation selector ", test.parsed, ", got ", parsed)
-		}
-	}
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/create/create.go b/vendor/github.com/openshift/oc/pkg/cli/create/create.go
deleted file mode 100644
index 6088a4e03e8f..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/create/create.go
+++ /dev/null
@@ -1,72 +0,0 @@
-package create
-
-import (
-	"github.com/spf13/cobra"
-
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	"k8s.io/cli-runtime/pkg/printers"
-	cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/scheme"
-)
-
-// CreateSubcommandOptions is an options struct to support create subcommands
-type CreateSubcommandOptions struct {
-	genericclioptions.IOStreams
-
-	// PrintFlags holds options necessary for obtaining a printer
-	PrintFlags *genericclioptions.PrintFlags
-	// Name of resource being created
-	Name string
-	// DryRun is true if the command should be simulated but not run against the server
-	DryRun bool
-
-	Namespace        string
-	EnforceNamespace bool
-
-	Printer printers.ResourcePrinter
-}
-
-func NewCreateSubcommandOptions(ioStreams genericclioptions.IOStreams) *CreateSubcommandOptions {
-	return &CreateSubcommandOptions{
-		PrintFlags: genericclioptions.NewPrintFlags("created").WithTypeSetter(scheme.Scheme),
-		IOStreams:  ioStreams,
-	}
-}
-
-func (o *CreateSubcommandOptions) Complete(f genericclioptions.RESTClientGetter, cmd *cobra.Command, args []string) error {
-	name, err := NameFromCommandArgs(cmd, args)
-	if err != nil {
-		return err
-	}
-
-	o.Name = name
-	o.Namespace, o.EnforceNamespace, err = f.ToRawKubeConfigLoader().Namespace()
-	if err != nil {
-		return err
-	}
-
-	o.DryRun = cmdutil.GetDryRunFlag(cmd)
-	if o.DryRun {
-		o.PrintFlags.Complete("%s (dry run)")
-	}
-	o.Printer, err = o.PrintFlags.ToPrinter()
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-// NameFromCommandArgs is a utility function for commands that assume the first argument is a resource name
-func NameFromCommandArgs(cmd *cobra.Command, args []string) (string, error) {
-	argsLen := cmd.ArgsLenAtDash()
-	// ArgsLenAtDash returns -1 when -- was not specified
-	if argsLen == -1 {
-		argsLen = len(args)
-	}
-	if argsLen != 1 {
-		return "", cmdutil.UsageErrorf(cmd, "exactly one NAME is required, got %d", argsLen)
-	}
-	return args[0], nil
-
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/create/deploymentconfig.go b/vendor/github.com/openshift/oc/pkg/cli/create/deploymentconfig.go
deleted file mode 100644
index 0d65d522f9b3..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/create/deploymentconfig.go
+++ /dev/null
@@ -1,115 +0,0 @@
-package create
-
-import (
-	"fmt"
-
-	"github.com/spf13/cobra"
-
-	corev1 "k8s.io/api/core/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-
-	appsv1 "github.com/openshift/api/apps/v1"
-	appsv1client "github.com/openshift/client-go/apps/clientset/versioned/typed/apps/v1"
-)
-
-var DeploymentConfigRecommendedName = "deploymentconfig"
-
-var (
-	deploymentConfigLong = templates.LongDesc(`
-		Create a deployment config that uses a given image.
-
-		Deployment configs define the template for a pod and manages deploying new images or configuration changes.`)
-
-	deploymentConfigExample = templates.Examples(`
-		# Create an nginx deployment config named my-nginx
-  	%[1]s my-nginx --image=nginx`)
-)
-
-type CreateDeploymentConfigOptions struct {
-	CreateSubcommandOptions *CreateSubcommandOptions
-
-	Image string
-	Args  []string
-
-	Client appsv1client.DeploymentConfigsGetter
-}
-
-// NewCmdCreateDeploymentConfig is a macro command to create a new deployment config.
-func NewCmdCreateDeploymentConfig(name, fullName string, f genericclioptions.RESTClientGetter, streams genericclioptions.IOStreams) *cobra.Command {
-	o := &CreateDeploymentConfigOptions{
-		CreateSubcommandOptions: NewCreateSubcommandOptions(streams),
-	}
-	cmd := &cobra.Command{
-		Use:     name + " NAME --image=IMAGE -- [COMMAND] [args...]",
-		Short:   "Create deployment config with default options that uses a given image.",
-		Long:    deploymentConfigLong,
-		Example: fmt.Sprintf(deploymentConfigExample, fullName),
-		Run: func(cmd *cobra.Command, args []string) {
-			cmdutil.CheckErr(o.Complete(cmd, f, args))
-			cmdutil.CheckErr(o.Run())
-		},
-		Aliases: []string{"dc"},
-	}
-	cmd.Flags().StringVar(&o.Image, "image", o.Image, "The image for the container to run.")
-	cmd.MarkFlagRequired("image")
-
-	o.CreateSubcommandOptions.PrintFlags.AddFlags(cmd)
-	cmdutil.AddDryRunFlag(cmd)
-
-	return cmd
-}
-
-func (o *CreateDeploymentConfigOptions) Complete(cmd *cobra.Command, f genericclioptions.RESTClientGetter, args []string) error {
-	if len(args) > 1 {
-		o.Args = args[1:]
-	}
-
-	clientConfig, err := f.ToRESTConfig()
-	if err != nil {
-		return err
-	}
-	o.Client, err = appsv1client.NewForConfig(clientConfig)
-	if err != nil {
-		return err
-	}
-
-	return o.CreateSubcommandOptions.Complete(f, cmd, args)
-}
-
-func (o *CreateDeploymentConfigOptions) Run() error {
-	labels := map[string]string{"deployment-config.name": o.CreateSubcommandOptions.Name}
-	deploymentConfig := &appsv1.DeploymentConfig{
-		// this is ok because we know exactly how we want to be serialized
-		TypeMeta:   metav1.TypeMeta{APIVersion: appsv1.SchemeGroupVersion.String(), Kind: "DeploymentConfig"},
-		ObjectMeta: metav1.ObjectMeta{Name: o.CreateSubcommandOptions.Name},
-		Spec: appsv1.DeploymentConfigSpec{
-			Selector: labels,
-			Replicas: 1,
-			Template: &corev1.PodTemplateSpec{
-				ObjectMeta: metav1.ObjectMeta{Labels: labels},
-				Spec: corev1.PodSpec{
-					Containers: []corev1.Container{
-						{
-							Name:  "default-container",
-							Image: o.Image,
-							Args:  o.Args,
-						},
-					},
-				},
-			},
-		},
-	}
-
-	if !o.CreateSubcommandOptions.DryRun {
-		var err error
-		deploymentConfig, err = o.Client.DeploymentConfigs(o.CreateSubcommandOptions.Namespace).Create(deploymentConfig)
-		if err != nil {
-			return err
-		}
-	}
-
-	return o.CreateSubcommandOptions.Printer.PrintObj(deploymentConfig, o.CreateSubcommandOptions.Out)
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/create/identity.go b/vendor/github.com/openshift/oc/pkg/cli/create/identity.go
deleted file mode 100644
index de13c0a5f84e..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/create/identity.go
+++ /dev/null
@@ -1,108 +0,0 @@
-package create
-
-import (
-	"fmt"
-	"strings"
-
-	"github.com/spf13/cobra"
-
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-
-	userv1 "github.com/openshift/api/user/v1"
-	userv1client "github.com/openshift/client-go/user/clientset/versioned/typed/user/v1"
-)
-
-const IdentityRecommendedName = "identity"
-
-var (
-	identityLong = templates.LongDesc(`
-		This command can be used to create an identity object.
-
-		Typically, identities are created automatically during login. If automatic
-		creation is disabled (by using the "lookup" mapping method), identities must
-		be created manually.
-
-		Corresponding user and useridentitymapping objects must also be created
-		to allow logging in with the created identity.`)
-
-	identityExample = templates.Examples(`
-		# Create an identity with identity provider "acme_ldap" and the identity provider username "adamjones"
-  	%[1]s acme_ldap:adamjones`)
-)
-
-type CreateIdentityOptions struct {
-	CreateSubcommandOptions *CreateSubcommandOptions
-
-	ProviderName     string
-	ProviderUserName string
-
-	IdentityClient userv1client.IdentitiesGetter
-}
-
-// NewCmdCreateIdentity is a macro command to create a new identity
-func NewCmdCreateIdentity(name, fullName string, f genericclioptions.RESTClientGetter, streams genericclioptions.IOStreams) *cobra.Command {
-	o := &CreateIdentityOptions{
-		CreateSubcommandOptions: NewCreateSubcommandOptions(streams),
-	}
-	cmd := &cobra.Command{
-		Use:     name + " :",
-		Short:   "Manually create an identity (only needed if automatic creation is disabled).",
-		Long:    identityLong,
-		Example: fmt.Sprintf(identityExample, fullName),
-		Run: func(cmd *cobra.Command, args []string) {
-			cmdutil.CheckErr(o.Complete(cmd, f, args))
-			cmdutil.CheckErr(o.Run())
-		},
-	}
-
-	o.CreateSubcommandOptions.PrintFlags.AddFlags(cmd)
-	cmdutil.AddDryRunFlag(cmd)
-
-	return cmd
-}
-
-func (o *CreateIdentityOptions) Complete(cmd *cobra.Command, f genericclioptions.RESTClientGetter, args []string) error {
-	clientConfig, err := f.ToRESTConfig()
-	if err != nil {
-		return err
-	}
-	o.IdentityClient, err = userv1client.NewForConfig(clientConfig)
-	if err != nil {
-		return err
-	}
-
-	if err := o.CreateSubcommandOptions.Complete(f, cmd, args); err != nil {
-		return err
-	}
-
-	parts := strings.Split(o.CreateSubcommandOptions.Name, ":")
-	if len(parts) != 2 {
-		return fmt.Errorf("identity name in the format : is required")
-	}
-	o.ProviderName = parts[0]
-	o.ProviderUserName = parts[1]
-
-	return nil
-}
-
-func (o *CreateIdentityOptions) Run() error {
-	identity := &userv1.Identity{
-		// this is ok because we know exactly how we want to be serialized
-		TypeMeta:         metav1.TypeMeta{APIVersion: userv1.SchemeGroupVersion.String(), Kind: "Identity"},
-		ProviderName:     o.ProviderName,
-		ProviderUserName: o.ProviderUserName,
-	}
-
-	if !o.CreateSubcommandOptions.DryRun {
-		var err error
-		identity, err = o.IdentityClient.Identities().Create(identity)
-		if err != nil {
-			return err
-		}
-	}
-
-	return o.CreateSubcommandOptions.Printer.PrintObj(identity, o.CreateSubcommandOptions.Out)
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/create/imagestream.go b/vendor/github.com/openshift/oc/pkg/cli/create/imagestream.go
deleted file mode 100644
index a81c85d855f0..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/create/imagestream.go
+++ /dev/null
@@ -1,103 +0,0 @@
-package create
-
-import (
-	"fmt"
-
-	"github.com/spf13/cobra"
-
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-
-	imagev1 "github.com/openshift/api/image/v1"
-	imagev1client "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1"
-)
-
-const ImageStreamRecommendedName = "imagestream"
-
-var (
-	imageStreamLong = templates.LongDesc(`
-		Create a new image stream
-
-		Image streams allow you to track, tag, and import images from other registries. They also define an
-		access controlled destination that you can push images to. An image stream can reference images
-		from many different registries and control how those images are referenced by pods, deployments,
-		and builds.
-
-		If --lookup-local is passed, the image stream will be used as the source when pods reference
-		it by name. For example, if stream 'mysql' resolves local names, a pod that points to
-		'mysql:latest' will use the image the image stream points to under the "latest" tag.`)
-
-	imageStreamExample = templates.Examples(`
-		# Create a new image stream
-  	%[1]s mysql`)
-)
-
-type CreateImageStreamOptions struct {
-	CreateSubcommandOptions *CreateSubcommandOptions
-
-	LookupLocal bool
-
-	Client imagev1client.ImageStreamsGetter
-}
-
-// NewCmdCreateImageStream is a macro command to create a new image stream
-func NewCmdCreateImageStream(name, fullName string, f genericclioptions.RESTClientGetter, streams genericclioptions.IOStreams) *cobra.Command {
-	o := &CreateImageStreamOptions{
-		CreateSubcommandOptions: NewCreateSubcommandOptions(streams),
-	}
-	cmd := &cobra.Command{
-		Use:     name + " NAME",
-		Short:   "Create a new empty image stream.",
-		Long:    imageStreamLong,
-		Example: fmt.Sprintf(imageStreamExample, fullName),
-		Run: func(cmd *cobra.Command, args []string) {
-			cmdutil.CheckErr(o.Complete(cmd, f, args))
-			cmdutil.CheckErr(o.Run())
-		},
-		Aliases: []string{"is"},
-	}
-	cmd.Flags().BoolVar(&o.LookupLocal, "lookup-local", o.LookupLocal, "If true, the image stream will be the source for any top-level image reference in this project.")
-
-	o.CreateSubcommandOptions.PrintFlags.AddFlags(cmd)
-	cmdutil.AddDryRunFlag(cmd)
-
-	return cmd
-}
-
-func (o *CreateImageStreamOptions) Complete(cmd *cobra.Command, f genericclioptions.RESTClientGetter, args []string) error {
-	clientConfig, err := f.ToRESTConfig()
-	if err != nil {
-		return err
-	}
-	o.Client, err = imagev1client.NewForConfig(clientConfig)
-	if err != nil {
-		return err
-	}
-
-	return o.CreateSubcommandOptions.Complete(f, cmd, args)
-}
-
-func (o *CreateImageStreamOptions) Run() error {
-	imageStream := &imagev1.ImageStream{
-		// this is ok because we know exactly how we want to be serialized
-		TypeMeta:   metav1.TypeMeta{APIVersion: imagev1.SchemeGroupVersion.String(), Kind: "ImageStream"},
-		ObjectMeta: metav1.ObjectMeta{Name: o.CreateSubcommandOptions.Name},
-		Spec: imagev1.ImageStreamSpec{
-			LookupPolicy: imagev1.ImageLookupPolicy{
-				Local: o.LookupLocal,
-			},
-		},
-	}
-
-	if !o.CreateSubcommandOptions.DryRun {
-		var err error
-		imageStream, err = o.Client.ImageStreams(o.CreateSubcommandOptions.Namespace).Create(imageStream)
-		if err != nil {
-			return err
-		}
-	}
-
-	return o.CreateSubcommandOptions.Printer.PrintObj(imageStream, o.CreateSubcommandOptions.Out)
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/create/imagestreamtag.go b/vendor/github.com/openshift/oc/pkg/cli/create/imagestreamtag.go
deleted file mode 100644
index 98dfffaf1873..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/create/imagestreamtag.go
+++ /dev/null
@@ -1,185 +0,0 @@
-package create
-
-import (
-	"fmt"
-	"strings"
-
-	"github.com/spf13/cobra"
-
-	corev1 "k8s.io/api/core/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-
-	imagev1 "github.com/openshift/api/image/v1"
-	imagev1client "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1"
-	"github.com/openshift/library-go/pkg/image/reference"
-	utilenv "github.com/openshift/oc/pkg/helpers/env"
-)
-
-const ImageStreamTagRecommendedName = "imagestreamtag"
-
-var (
-	imageStreamTagLong = templates.LongDesc(`
-		Create a new image stream tag
-
-		Image streams tags allow you to track, tag, and import images from other registries. They also
-		define an access controlled destination that you can push images to. An image stream tag can
-		reference images from many different registries and control how those images are referenced by
-		pods, deployments, and builds.
-
-		If --resolve-local is passed, the image stream will be used as the source when pods reference
-		it by name. For example, if stream 'mysql' resolves local names, a pod that points to
-		'mysql:latest' will use the image the image stream points to under the "latest" tag.`)
-
-	imageStreamTagExample = templates.Examples(`
-		# Create a new image stream tag based on an image on a remote registry
-		%[1]s mysql:latest --from-image=myregistry.local/mysql/mysql:5.0
-		`)
-)
-
-type CreateImageStreamTagOptions struct {
-	CreateSubcommandOptions *CreateSubcommandOptions
-
-	Client imagev1client.ImageStreamTagsGetter
-
-	FromImage          string
-	From               string
-	Annotations        []string
-	Scheduled          bool
-	Insecure           bool
-	Reference          bool
-	ReferencePolicyStr string
-	ReferencePolicy    imagev1.TagReferencePolicyType
-}
-
-// NewCmdCreateImageStreamTag is a command to create a new image stream tag.
-func NewCmdCreateImageStreamTag(name, fullName string, f genericclioptions.RESTClientGetter, streams genericclioptions.IOStreams) *cobra.Command {
-	o := &CreateImageStreamTagOptions{
-		CreateSubcommandOptions: NewCreateSubcommandOptions(streams),
-	}
-	cmd := &cobra.Command{
-		Use:     name + " NAME",
-		Short:   "Create a new image stream tag.",
-		Long:    imageStreamTagLong,
-		Example: fmt.Sprintf(imageStreamTagExample, fullName),
-		Run: func(cmd *cobra.Command, args []string) {
-			cmdutil.CheckErr(o.Complete(cmd, f, args))
-			cmdutil.CheckErr(o.Run())
-		},
-		Aliases: []string{"istag"},
-	}
-	cmd.Flags().StringVar(&o.FromImage, "from-image", o.FromImage, "Use the provided remote image with this tag.")
-	cmd.Flags().StringVar(&o.From, "from", o.From, "Use the provided image stream tag or image stream image as the source: [/]name[:|@]")
-	cmd.Flags().StringSliceVarP(&o.Annotations, "annotation", "A", o.Annotations, "Set an annotation on this image stream tag.")
-	cmd.Flags().MarkShorthandDeprecated("annotation", "please use --annotation instead.")
-	cmd.Flags().BoolVar(&o.Scheduled, "scheduled", o.Scheduled, "If set the remote source of this image will be periodically checked for imports.")
-	cmd.Flags().BoolVar(&o.Insecure, "insecure", o.Insecure, "Allow importing from registries that are not fully secured by HTTPS.")
-	cmd.Flags().StringVar(&o.ReferencePolicyStr, "reference-policy", o.ReferencePolicyStr, "If set to 'Local', referenced images will be pulled from the integrated registry. Ignored when reference is true.")
-	cmd.Flags().BoolVar(&o.Reference, "reference", o.Reference, "If true, the tag value will be used whenever the image stream tag is referenced.")
-
-	o.CreateSubcommandOptions.PrintFlags.AddFlags(cmd)
-	cmdutil.AddDryRunFlag(cmd)
-
-	return cmd
-}
-
-func (o *CreateImageStreamTagOptions) Complete(cmd *cobra.Command, f genericclioptions.RESTClientGetter, args []string) error {
-	if len(o.ReferencePolicyStr) > 0 {
-		switch strings.ToLower(o.ReferencePolicyStr) {
-		case "source":
-			o.ReferencePolicy = imagev1.SourceTagReferencePolicy
-		case "local":
-			o.ReferencePolicy = imagev1.LocalTagReferencePolicy
-		default:
-			return fmt.Errorf("valid values for --reference-policy are: source, local")
-		}
-	}
-
-	clientConfig, err := f.ToRESTConfig()
-	if err != nil {
-		return err
-	}
-	o.Client, err = imagev1client.NewForConfig(clientConfig)
-	if err != nil {
-		return err
-	}
-
-	return o.CreateSubcommandOptions.Complete(f, cmd, args)
-}
-
-func (o *CreateImageStreamTagOptions) Run() error {
-	isTag := &imagev1.ImageStreamTag{
-		// this is ok because we know exactly how we want to be serialized
-		TypeMeta: metav1.TypeMeta{APIVersion: imagev1.SchemeGroupVersion.String(), Kind: "ImageStreamTag"},
-		ObjectMeta: metav1.ObjectMeta{
-			Name: o.CreateSubcommandOptions.Name,
-		},
-		Tag: &imagev1.TagReference{
-			ImportPolicy: imagev1.TagImportPolicy{
-				Scheduled: o.Scheduled,
-				Insecure:  o.Insecure,
-			},
-			ReferencePolicy: imagev1.TagReferencePolicy{
-				Type: o.ReferencePolicy,
-			},
-			Reference: o.Reference,
-		},
-	}
-
-	annotations, remove, err := utilenv.ParseAnnotation(o.Annotations, nil)
-	if err != nil {
-		return err
-	}
-	if len(remove) > 0 {
-		return fmt.Errorf("annotations must be of the form name=value")
-	}
-
-	// to preserve backwards compatibility we are forced to set this
-	isTag.Annotations = annotations
-	isTag.Tag.Annotations = annotations
-
-	switch {
-	case len(o.FromImage) > 0 && len(o.From) > 0:
-		return fmt.Errorf("--from and --from-image may not be used together")
-	case len(o.FromImage) > 0:
-		isTag.Tag.From = &corev1.ObjectReference{
-			Name: o.FromImage,
-			Kind: "DockerImage",
-		}
-	case len(o.From) > 0:
-		var name string
-		ref, err := reference.Parse(o.From)
-		if err != nil {
-			if !strings.HasPrefix(o.From, ":") {
-				return fmt.Errorf("Invalid --from, must be a valid image stream tag or image stream image: %v", err)
-			}
-			ref = reference.DockerImageReference{Tag: o.From[1:]}
-			name = o.From[1:]
-		} else {
-			name = ref.NameString()
-		}
-		if len(ref.Registry) > 0 {
-			return fmt.Errorf("Invalid --from, registry may not be specified")
-		}
-		kind := "ImageStreamTag"
-		if len(ref.ID) > 0 {
-			kind = "ImageStreamImage"
-		}
-		isTag.Tag.From = &corev1.ObjectReference{
-			Kind:      kind,
-			Name:      name,
-			Namespace: ref.Namespace,
-		}
-	}
-
-	if !o.CreateSubcommandOptions.DryRun {
-		isTag, err = o.Client.ImageStreamTags(o.CreateSubcommandOptions.Namespace).Create(isTag)
-		if err != nil {
-			return err
-		}
-	}
-
-	return o.CreateSubcommandOptions.Printer.PrintObj(isTag, o.CreateSubcommandOptions.Out)
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/create/route.go b/vendor/github.com/openshift/oc/pkg/cli/create/route.go
deleted file mode 100644
index f6158cdf3752..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/create/route.go
+++ /dev/null
@@ -1,125 +0,0 @@
-package create
-
-import (
-	"fmt"
-
-	"github.com/spf13/cobra"
-
-	"k8s.io/apimachinery/pkg/api/meta"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	"k8s.io/cli-runtime/pkg/printers"
-	corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/scheme"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-
-	routev1client "github.com/openshift/client-go/route/clientset/versioned/typed/route/v1"
-)
-
-var (
-	routeLong = templates.LongDesc(`
-		Expose containers externally via secured routes
-
-		Three types of secured routes are supported: edge, passthrough, and reencrypt.
-		If you wish to create unsecured routes, see "%[1]s expose -h"`)
-)
-
-// NewCmdCreateRoute is a macro command to create a secured route.
-func NewCmdCreateRoute(fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	cmd := &cobra.Command{
-		Use:   "route",
-		Short: "Expose containers externally via secured routes",
-		Long:  fmt.Sprintf(routeLong, fullName),
-		Run:   kcmdutil.DefaultSubCommandRun(streams.ErrOut),
-	}
-
-	cmd.AddCommand(NewCmdCreateEdgeRoute(fullName, f, streams))
-	cmd.AddCommand(NewCmdCreatePassthroughRoute(fullName, f, streams))
-	cmd.AddCommand(NewCmdCreateReencryptRoute(fullName, f, streams))
-
-	return cmd
-}
-
-// CreateRouteSubcommandOptions is an options struct to support create subcommands
-type CreateRouteSubcommandOptions struct {
-	// PrintFlags holds options necessary for obtaining a printer
-	PrintFlags *genericclioptions.PrintFlags
-	// Name of resource being created
-	Name        string
-	ServiceName string
-	// DryRun is true if the command should be simulated but not run against the server
-	DryRun bool
-
-	Namespace        string
-	EnforceNamespace bool
-
-	Mapper meta.RESTMapper
-
-	Printer printers.ResourcePrinter
-
-	Client     routev1client.RoutesGetter
-	CoreClient corev1client.CoreV1Interface
-
-	genericclioptions.IOStreams
-}
-
-func NewCreateRouteSubcommandOptions(ioStreams genericclioptions.IOStreams) *CreateRouteSubcommandOptions {
-	return &CreateRouteSubcommandOptions{
-		PrintFlags: genericclioptions.NewPrintFlags("created").WithTypeSetter(scheme.Scheme),
-		IOStreams:  ioStreams,
-	}
-}
-
-func (o *CreateRouteSubcommandOptions) Complete(f kcmdutil.Factory, cmd *cobra.Command, args []string) error {
-	var err error
-	o.Name, err = resolveRouteName(args)
-	if err != nil {
-		return err
-	}
-
-	clientConfig, err := f.ToRESTConfig()
-	if err != nil {
-		return err
-	}
-
-	o.CoreClient, err = corev1client.NewForConfig(clientConfig)
-	if err != nil {
-		return err
-	}
-
-	o.Client, err = routev1client.NewForConfig(clientConfig)
-	if err != nil {
-		return err
-	}
-
-	o.Mapper, err = f.ToRESTMapper()
-	if err != nil {
-		return err
-	}
-	o.Namespace, o.EnforceNamespace, err = f.ToRawKubeConfigLoader().Namespace()
-	if err != nil {
-		return err
-	}
-
-	o.DryRun = kcmdutil.GetDryRunFlag(cmd)
-	if o.DryRun {
-		o.PrintFlags.Complete("%s (dry run)")
-	}
-	o.Printer, err = o.PrintFlags.ToPrinter()
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func resolveRouteName(args []string) (string, error) {
-	switch len(args) {
-	case 0:
-	case 1:
-		return args[0], nil
-	default:
-		return "", fmt.Errorf("multiple names provided. Please specify at most one")
-	}
-	return "", nil
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/create/route/route.go b/vendor/github.com/openshift/oc/pkg/cli/create/route/route.go
deleted file mode 100644
index 3c1d3f0f8d94..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/create/route/route.go
+++ /dev/null
@@ -1,105 +0,0 @@
-package route
-
-import (
-	"fmt"
-	"strconv"
-
-	corev1 "k8s.io/api/core/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/util/intstr"
-	corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
-
-	routev1 "github.com/openshift/api/route/v1"
-)
-
-// UnsecuredRoute will return a route with enough info so that it can direct traffic to
-// the service provided by --service. Callers of this helper are responsible for providing
-// tls configuration, path, and the hostname of the route.
-// forcePort always sets a port, even when there is only one and it has no name.
-// The kubernetes generator, when no port is present incorrectly selects the service Port
-// instead of the service TargetPort for the route TargetPort.
-func UnsecuredRoute(kc corev1client.CoreV1Interface, namespace, routeName, serviceName, portString string, forcePort bool) (*routev1.Route, error) {
-	if len(routeName) == 0 {
-		routeName = serviceName
-	}
-
-	svc, err := kc.Services(namespace).Get(serviceName, metav1.GetOptions{})
-	if err != nil {
-		if len(portString) == 0 {
-			return nil, fmt.Errorf("you need to provide a route port via --port when exposing a non-existent service")
-		}
-		return &routev1.Route{
-			// this is ok because we know exactly how we want to be serialized
-			TypeMeta: metav1.TypeMeta{APIVersion: routev1.SchemeGroupVersion.String(), Kind: "Route"},
-			ObjectMeta: metav1.ObjectMeta{
-				Name: routeName,
-			},
-			Spec: routev1.RouteSpec{
-				To: routev1.RouteTargetReference{
-					Name: serviceName,
-				},
-				Port: resolveRoutePort(portString),
-			},
-		}, nil
-	}
-
-	ok, port := supportsTCP(svc)
-	if !ok {
-		return nil, fmt.Errorf("service %q doesn't support TCP", svc.Name)
-	}
-
-	route := &routev1.Route{
-		ObjectMeta: metav1.ObjectMeta{
-			Name:   routeName,
-			Labels: svc.Labels,
-		},
-		Spec: routev1.RouteSpec{
-			To: routev1.RouteTargetReference{
-				Name: serviceName,
-			},
-		},
-	}
-
-	// When route.Spec.Port is not set, the generator will pick a service port.
-
-	// If the user didn't specify --port, and either the service has a port.Name
-	// or forcePort is set, set route.Spec.Port
-	if (len(port.Name) > 0 || forcePort) && len(portString) == 0 {
-		if len(port.Name) == 0 {
-			route.Spec.Port = resolveRoutePort(svc.Spec.Ports[0].TargetPort.String())
-		} else {
-			route.Spec.Port = resolveRoutePort(port.Name)
-		}
-	}
-	// --port uber alles
-	if len(portString) > 0 {
-		route.Spec.Port = resolveRoutePort(portString)
-	}
-
-	return route, nil
-}
-
-func resolveRoutePort(portString string) *routev1.RoutePort {
-	if len(portString) == 0 {
-		return nil
-	}
-	var routePort intstr.IntOrString
-	integer, err := strconv.Atoi(portString)
-	if err != nil {
-		routePort = intstr.FromString(portString)
-	} else {
-		routePort = intstr.FromInt(integer)
-	}
-	return &routev1.RoutePort{
-		TargetPort: routePort,
-	}
-}
-
-func supportsTCP(svc *corev1.Service) (bool, corev1.ServicePort) {
-	for _, port := range svc.Spec.Ports {
-		if port.Protocol == corev1.ProtocolTCP {
-			return true, port
-		}
-	}
-	return false, corev1.ServicePort{}
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/create/routeedge.go b/vendor/github.com/openshift/oc/pkg/cli/create/routeedge.go
deleted file mode 100644
index 2a744b63afb4..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/create/routeedge.go
+++ /dev/null
@@ -1,152 +0,0 @@
-package create
-
-import (
-	"fmt"
-
-	"github.com/spf13/cobra"
-
-	"k8s.io/apimachinery/pkg/api/meta"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	kapi "k8s.io/kubernetes/pkg/apis/core"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-
-	routev1 "github.com/openshift/api/route/v1"
-	"github.com/openshift/oc/pkg/cli/create/route"
-	cmdutil "github.com/openshift/oc/pkg/helpers/cmd"
-	fileutil "github.com/openshift/oc/pkg/helpers/file"
-)
-
-var (
-	edgeRouteLong = templates.LongDesc(`
-		Create a route that uses edge TLS termination
-
-		Specify the service (either just its name or using type/name syntax) that the
-		generated route should expose via the --service flag.`)
-
-	edgeRouteExample = templates.Examples(`
-		# Create an edge route named "my-route" that exposes frontend service.
-	  %[1]s create route edge my-route --service=frontend
-
-	  # Create an edge route that exposes the frontend service and specify a path.
-	  # If the route name is omitted, the service name will be re-used.
-	  %[1]s create route edge --service=frontend --path /assets`)
-)
-
-type CreateEdgeRouteOptions struct {
-	CreateRouteSubcommandOptions *CreateRouteSubcommandOptions
-
-	Hostname       string
-	Port           string
-	InsecurePolicy string
-	Service        string
-	Path           string
-	Cert           string
-	Key            string
-	CACert         string
-	WildcardPolicy string
-}
-
-// NewCmdCreateEdgeRoute is a macro command to create an edge route.
-func NewCmdCreateEdgeRoute(fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	o := &CreateEdgeRouteOptions{
-		CreateRouteSubcommandOptions: NewCreateRouteSubcommandOptions(streams),
-	}
-	cmd := &cobra.Command{
-		Use:     "edge [NAME] --service=SERVICE",
-		Short:   "Create a route that uses edge TLS termination",
-		Long:    edgeRouteLong,
-		Example: fmt.Sprintf(edgeRouteExample, fullName),
-		Run: func(cmd *cobra.Command, args []string) {
-			kcmdutil.CheckErr(o.Complete(f, cmd, args))
-			kcmdutil.CheckErr(o.Run())
-		},
-	}
-
-	cmd.Flags().StringVar(&o.Hostname, "hostname", o.Hostname, "Set a hostname for the new route")
-	cmd.Flags().StringVar(&o.Port, "port", o.Port, "Name of the service port or number of the container port the route will route traffic to")
-	cmd.Flags().StringVar(&o.InsecurePolicy, "insecure-policy", o.InsecurePolicy, "Set an insecure policy for the new route")
-	cmd.Flags().StringVar(&o.Service, "service", o.Service, "Name of the service that the new route is exposing")
-	cmd.MarkFlagRequired("service")
-	cmd.Flags().StringVar(&o.Path, "path", o.Path, "Path that the router watches to route traffic to the service.")
-	cmd.Flags().StringVar(&o.Cert, "cert", o.Cert, "Path to a certificate file.")
-	cmd.MarkFlagFilename("cert")
-	cmd.Flags().StringVar(&o.Key, "key", o.Key, "Path to a key file.")
-	cmd.MarkFlagFilename("key")
-	cmd.Flags().StringVar(&o.CACert, "ca-cert", o.CACert, "Path to a CA certificate file.")
-	cmd.MarkFlagFilename("ca-cert")
-	cmd.Flags().StringVar(&o.WildcardPolicy, "wildcard-policy", o.WildcardPolicy, "Sets the WilcardPolicy for the hostname, the default is \"None\". valid values are \"None\" and \"Subdomain\"")
-
-	kcmdutil.AddValidateFlags(cmd)
-	o.CreateRouteSubcommandOptions.PrintFlags.AddFlags(cmd)
-	kcmdutil.AddDryRunFlag(cmd)
-
-	return cmd
-}
-
-func (o *CreateEdgeRouteOptions) Complete(f kcmdutil.Factory, cmd *cobra.Command, args []string) error {
-	return o.CreateRouteSubcommandOptions.Complete(f, cmd, args)
-}
-
-func (o *CreateEdgeRouteOptions) Run() error {
-	serviceName, err := resolveServiceName(o.CreateRouteSubcommandOptions.Mapper, o.Service)
-	if err != nil {
-		return err
-	}
-	route, err := route.UnsecuredRoute(o.CreateRouteSubcommandOptions.CoreClient, o.CreateRouteSubcommandOptions.Namespace, o.CreateRouteSubcommandOptions.Name, serviceName, o.Port, false)
-	if err != nil {
-		return err
-	}
-
-	if len(o.WildcardPolicy) > 0 {
-		route.Spec.WildcardPolicy = routev1.WildcardPolicyType(o.WildcardPolicy)
-	}
-
-	route.Spec.Host = o.Hostname
-	route.Spec.Path = o.Path
-
-	route.Spec.TLS = new(routev1.TLSConfig)
-	route.Spec.TLS.Termination = routev1.TLSTerminationEdge
-	cert, err := fileutil.LoadData(o.Cert)
-	if err != nil {
-		return err
-	}
-	route.Spec.TLS.Certificate = string(cert)
-	key, err := fileutil.LoadData(o.Key)
-	if err != nil {
-		return err
-	}
-	route.Spec.TLS.Key = string(key)
-	caCert, err := fileutil.LoadData(o.CACert)
-	if err != nil {
-		return err
-	}
-	route.Spec.TLS.CACertificate = string(caCert)
-
-	if len(o.InsecurePolicy) > 0 {
-		route.Spec.TLS.InsecureEdgeTerminationPolicy = routev1.InsecureEdgeTerminationPolicyType(o.InsecurePolicy)
-	}
-
-	if !o.CreateRouteSubcommandOptions.DryRun {
-		route, err = o.CreateRouteSubcommandOptions.Client.Routes(o.CreateRouteSubcommandOptions.Namespace).Create(route)
-		if err != nil {
-			return err
-		}
-	}
-
-	return o.CreateRouteSubcommandOptions.Printer.PrintObj(route, o.CreateRouteSubcommandOptions.Out)
-}
-
-func resolveServiceName(mapper meta.RESTMapper, resource string) (string, error) {
-	if len(resource) == 0 {
-		return "", fmt.Errorf("you need to provide a service name via --service")
-	}
-	rType, name, err := cmdutil.ResolveResource(kapi.Resource("services"), resource, mapper)
-	if err != nil {
-		return "", err
-	}
-	if rType != kapi.Resource("services") {
-		return "", fmt.Errorf("cannot expose %v as routes", rType)
-	}
-	return name, nil
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/create/routepassthrough.go b/vendor/github.com/openshift/oc/pkg/cli/create/routepassthrough.go
deleted file mode 100644
index 9b95804b8407..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/create/routepassthrough.go
+++ /dev/null
@@ -1,105 +0,0 @@
-package create
-
-import (
-	"fmt"
-
-	"github.com/spf13/cobra"
-
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-
-	routev1 "github.com/openshift/api/route/v1"
-	"github.com/openshift/oc/pkg/cli/create/route"
-)
-
-var (
-	passthroughRouteLong = templates.LongDesc(`
-		Create a route that uses passthrough TLS termination
-
-		Specify the service (either just its name or using type/name syntax) that the
-		generated route should expose via the --service flag.`)
-
-	passthroughRouteExample = templates.Examples(`
-		# Create a passthrough route named "my-route" that exposes the frontend service.
-	  %[1]s create route passthrough my-route --service=frontend
-
-	  # Create a passthrough route that exposes the frontend service and specify
-	  # a hostname. If the route name is omitted, the service name will be re-used.
-	  %[1]s create route passthrough --service=frontend --hostname=www.example.com`)
-)
-
-type CreatePassthroughRouteOptions struct {
-	CreateRouteSubcommandOptions *CreateRouteSubcommandOptions
-
-	Hostname       string
-	Port           string
-	InsecurePolicy string
-	Service        string
-	WildcardPolicy string
-}
-
-// NewCmdCreatePassthroughRoute is a macro command to create a passthrough route.
-func NewCmdCreatePassthroughRoute(fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	o := &CreatePassthroughRouteOptions{
-		CreateRouteSubcommandOptions: NewCreateRouteSubcommandOptions(streams),
-	}
-	cmd := &cobra.Command{
-		Use:     "passthrough [NAME] --service=SERVICE",
-		Short:   "Create a route that uses passthrough TLS termination",
-		Long:    passthroughRouteLong,
-		Example: fmt.Sprintf(passthroughRouteExample, fullName),
-		Run: func(cmd *cobra.Command, args []string) {
-			kcmdutil.CheckErr(o.Complete(f, cmd, args))
-			kcmdutil.CheckErr(o.Run())
-		},
-	}
-	cmd.Flags().StringVar(&o.Hostname, "hostname", o.Hostname, "Set a hostname for the new route")
-	cmd.Flags().StringVar(&o.Port, "port", o.Port, "Name of the service port or number of the container port the route will route traffic to")
-	cmd.Flags().StringVar(&o.InsecurePolicy, "insecure-policy", o.InsecurePolicy, "Set an insecure policy for the new route")
-	cmd.Flags().StringVar(&o.Service, "service", o.Service, "Name of the service that the new route is exposing")
-	cmd.MarkFlagRequired("service")
-	cmd.Flags().StringVar(&o.WildcardPolicy, "wildcard-policy", o.WildcardPolicy, "Sets the WilcardPolicy for the hostname, the default is \"None\". valid values are \"None\" and \"Subdomain\"")
-
-	kcmdutil.AddValidateFlags(cmd)
-	o.CreateRouteSubcommandOptions.PrintFlags.AddFlags(cmd)
-	kcmdutil.AddDryRunFlag(cmd)
-
-	return cmd
-}
-
-func (o *CreatePassthroughRouteOptions) Complete(f kcmdutil.Factory, cmd *cobra.Command, args []string) error {
-	return o.CreateRouteSubcommandOptions.Complete(f, cmd, args)
-}
-
-func (o *CreatePassthroughRouteOptions) Run() error {
-	serviceName, err := resolveServiceName(o.CreateRouteSubcommandOptions.Mapper, o.Service)
-	if err != nil {
-		return err
-	}
-	route, err := route.UnsecuredRoute(o.CreateRouteSubcommandOptions.CoreClient, o.CreateRouteSubcommandOptions.Namespace, o.CreateRouteSubcommandOptions.Name, serviceName, o.Port, false)
-	if err != nil {
-		return err
-	}
-
-	if len(o.WildcardPolicy) > 0 {
-		route.Spec.WildcardPolicy = routev1.WildcardPolicyType(o.WildcardPolicy)
-	}
-
-	route.Spec.Host = o.Hostname
-	route.Spec.TLS = new(routev1.TLSConfig)
-	route.Spec.TLS.Termination = routev1.TLSTerminationPassthrough
-
-	if len(o.InsecurePolicy) > 0 {
-		route.Spec.TLS.InsecureEdgeTerminationPolicy = routev1.InsecureEdgeTerminationPolicyType(o.InsecurePolicy)
-	}
-
-	if !o.CreateRouteSubcommandOptions.DryRun {
-		route, err = o.CreateRouteSubcommandOptions.Client.Routes(o.CreateRouteSubcommandOptions.Namespace).Create(route)
-		if err != nil {
-			return err
-		}
-	}
-
-	return o.CreateRouteSubcommandOptions.Printer.PrintObj(route, o.CreateRouteSubcommandOptions.Out)
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/create/routereenecrypt.go b/vendor/github.com/openshift/oc/pkg/cli/create/routereenecrypt.go
deleted file mode 100644
index 10ee14864848..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/create/routereenecrypt.go
+++ /dev/null
@@ -1,145 +0,0 @@
-package create
-
-import (
-	"fmt"
-
-	"github.com/spf13/cobra"
-
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-
-	routev1 "github.com/openshift/api/route/v1"
-	"github.com/openshift/oc/pkg/cli/create/route"
-	fileutil "github.com/openshift/oc/pkg/helpers/file"
-)
-
-var (
-	reencryptRouteLong = templates.LongDesc(`
-		Create a route that uses reencrypt TLS termination
-
-		Specify the service (either just its name or using type/name syntax) that the
-		generated route should expose via the --service flag. A destination CA certificate
-		is needed for reencrypt routes, specify one with the --dest-ca-cert flag.`)
-
-	reencryptRouteExample = templates.Examples(`
-		# Create a route named "my-route" that exposes the frontend service.
-	  %[1]s create route reencrypt my-route --service=frontend --dest-ca-cert cert.cert
-
-	  # Create a reencrypt route that exposes the frontend service and re-use
-	  # the service name as the route name.
-	  %[1]s create route reencrypt --service=frontend --dest-ca-cert cert.cert`)
-)
-
-type CreateReencryptRouteOptions struct {
-	CreateRouteSubcommandOptions *CreateRouteSubcommandOptions
-
-	Hostname       string
-	Port           string
-	InsecurePolicy string
-	Service        string
-	Path           string
-	Cert           string
-	Key            string
-	CACert         string
-	DestCACert     string
-	WildcardPolicy string
-}
-
-// NewCmdCreateReencryptRoute is a macro command to create a reencrypt route.
-func NewCmdCreateReencryptRoute(fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	o := &CreateReencryptRouteOptions{
-		CreateRouteSubcommandOptions: NewCreateRouteSubcommandOptions(streams),
-	}
-	cmd := &cobra.Command{
-		Use:     "reencrypt [NAME] --dest-ca-cert=FILENAME --service=SERVICE",
-		Short:   "Create a route that uses reencrypt TLS termination",
-		Long:    reencryptRouteLong,
-		Example: fmt.Sprintf(reencryptRouteExample, fullName),
-		Run: func(cmd *cobra.Command, args []string) {
-			kcmdutil.CheckErr(o.Complete(f, cmd, args))
-			kcmdutil.CheckErr(o.Run())
-		},
-	}
-
-	cmd.Flags().StringVar(&o.Hostname, "hostname", o.Hostname, "Set a hostname for the new route")
-	cmd.Flags().StringVar(&o.Port, "port", o.Port, "Name of the service port or number of the container port the route will route traffic to")
-	cmd.Flags().StringVar(&o.InsecurePolicy, "insecure-policy", o.InsecurePolicy, "Set an insecure policy for the new route")
-	cmd.Flags().StringVar(&o.Service, "service", o.Service, "Name of the service that the new route is exposing")
-	cmd.MarkFlagRequired("service")
-	cmd.Flags().StringVar(&o.Path, "path", o.Path, "Path that the router watches to route traffic to the service.")
-	cmd.Flags().StringVar(&o.Cert, "cert", o.Cert, "Path to a certificate file.")
-	cmd.MarkFlagFilename("cert")
-	cmd.Flags().StringVar(&o.Key, "key", o.Key, "Path to a key file.")
-	cmd.MarkFlagFilename("key")
-	cmd.Flags().StringVar(&o.CACert, "ca-cert", o.CACert, "Path to a CA certificate file.")
-	cmd.MarkFlagFilename("ca-cert")
-	cmd.Flags().StringVar(&o.DestCACert, "dest-ca-cert", o.DestCACert, "Path to a CA certificate file, used for securing the connection from the router to the destination.")
-	cmd.MarkFlagFilename("dest-ca-cert")
-	cmd.Flags().StringVar(&o.WildcardPolicy, "wildcard-policy", o.WildcardPolicy, "Sets the WilcardPolicy for the hostname, the default is \"None\". valid values are \"None\" and \"Subdomain\"")
-
-	kcmdutil.AddValidateFlags(cmd)
-	o.CreateRouteSubcommandOptions.PrintFlags.AddFlags(cmd)
-	kcmdutil.AddDryRunFlag(cmd)
-
-	return cmd
-}
-
-func (o *CreateReencryptRouteOptions) Complete(f kcmdutil.Factory, cmd *cobra.Command, args []string) error {
-	return o.CreateRouteSubcommandOptions.Complete(f, cmd, args)
-}
-
-func (o *CreateReencryptRouteOptions) Run() error {
-	serviceName, err := resolveServiceName(o.CreateRouteSubcommandOptions.Mapper, o.Service)
-	if err != nil {
-		return err
-	}
-	route, err := route.UnsecuredRoute(o.CreateRouteSubcommandOptions.CoreClient, o.CreateRouteSubcommandOptions.Namespace, o.CreateRouteSubcommandOptions.Name, serviceName, o.Port, false)
-	if err != nil {
-		return err
-	}
-
-	if len(o.WildcardPolicy) > 0 {
-		route.Spec.WildcardPolicy = routev1.WildcardPolicyType(o.WildcardPolicy)
-	}
-
-	route.Spec.Host = o.Hostname
-	route.Spec.Path = o.Path
-
-	route.Spec.TLS = new(routev1.TLSConfig)
-	route.Spec.TLS.Termination = routev1.TLSTerminationReencrypt
-
-	cert, err := fileutil.LoadData(o.Cert)
-	if err != nil {
-		return err
-	}
-	route.Spec.TLS.Certificate = string(cert)
-	key, err := fileutil.LoadData(o.Key)
-	if err != nil {
-		return err
-	}
-	route.Spec.TLS.Key = string(key)
-	caCert, err := fileutil.LoadData(o.CACert)
-	if err != nil {
-		return err
-	}
-	route.Spec.TLS.CACertificate = string(caCert)
-	destCACert, err := fileutil.LoadData(o.DestCACert)
-	if err != nil {
-		return err
-	}
-	route.Spec.TLS.DestinationCACertificate = string(destCACert)
-
-	if len(o.InsecurePolicy) > 0 {
-		route.Spec.TLS.InsecureEdgeTerminationPolicy = routev1.InsecureEdgeTerminationPolicyType(o.InsecurePolicy)
-	}
-
-	if !o.CreateRouteSubcommandOptions.DryRun {
-		route, err = o.CreateRouteSubcommandOptions.Client.Routes(o.CreateRouteSubcommandOptions.Namespace).Create(route)
-		if err != nil {
-			return err
-		}
-	}
-
-	return o.CreateRouteSubcommandOptions.Printer.PrintObj(route, o.CreateRouteSubcommandOptions.Out)
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/create/user.go b/vendor/github.com/openshift/oc/pkg/cli/create/user.go
deleted file mode 100644
index cec1089cb155..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/create/user.go
+++ /dev/null
@@ -1,97 +0,0 @@
-package create
-
-import (
-	"fmt"
-
-	"github.com/spf13/cobra"
-
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-
-	userv1 "github.com/openshift/api/user/v1"
-	userv1client "github.com/openshift/client-go/user/clientset/versioned/typed/user/v1"
-)
-
-const UserRecommendedName = "user"
-
-var (
-	userLong = templates.LongDesc(`
-		This command can be used to create a user object.
-
-		Typically, users are created automatically during login. If automatic
-		creation is disabled (by using the "lookup" mapping method), users must
-		be created manually.
-
-		Corresponding identity and useridentitymapping objects must also be created
-		to allow logging in as the created user.`)
-
-	userExample = templates.Examples(`
-		# Create a user with the username "ajones" and the display name "Adam Jones"
-  	%[1]s ajones --full-name="Adam Jones"`)
-)
-
-type CreateUserOptions struct {
-	CreateSubcommandOptions *CreateSubcommandOptions
-
-	FullName string
-
-	UserClient userv1client.UsersGetter
-}
-
-// NewCmdCreateUser is a macro command to create a new user
-func NewCmdCreateUser(name, fullName string, f genericclioptions.RESTClientGetter, streams genericclioptions.IOStreams) *cobra.Command {
-	o := &CreateUserOptions{
-		CreateSubcommandOptions: NewCreateSubcommandOptions(streams),
-	}
-	cmd := &cobra.Command{
-		Use:     name + " USERNAME",
-		Short:   "Manually create a user (only needed if automatic creation is disabled).",
-		Long:    userLong,
-		Example: fmt.Sprintf(userExample, fullName),
-		Run: func(cmd *cobra.Command, args []string) {
-			cmdutil.CheckErr(o.Complete(cmd, f, args))
-			cmdutil.CheckErr(o.Run())
-		},
-	}
-	cmd.Flags().StringVar(&o.FullName, "full-name", o.FullName, "Display name of the user")
-
-	o.CreateSubcommandOptions.PrintFlags.AddFlags(cmd)
-	cmdutil.AddDryRunFlag(cmd)
-
-	return cmd
-}
-
-func (o *CreateUserOptions) Complete(cmd *cobra.Command, f genericclioptions.RESTClientGetter, args []string) error {
-	clientConfig, err := f.ToRESTConfig()
-	if err != nil {
-		return err
-	}
-	o.UserClient, err = userv1client.NewForConfig(clientConfig)
-	if err != nil {
-		return err
-	}
-
-	return o.CreateSubcommandOptions.Complete(f, cmd, args)
-}
-
-func (o *CreateUserOptions) Run() error {
-	user := &userv1.User{
-		// this is ok because we know exactly how we want to be serialized
-		TypeMeta: metav1.TypeMeta{APIVersion: userv1.SchemeGroupVersion.String(), Kind: "User"},
-		ObjectMeta: metav1.ObjectMeta{
-			Name: o.CreateSubcommandOptions.Name,
-		},
-		FullName: o.FullName,
-	}
-	var err error
-	if !o.CreateSubcommandOptions.DryRun {
-		user, err = o.UserClient.Users().Create(user)
-		if err != nil {
-			return err
-		}
-	}
-
-	return o.CreateSubcommandOptions.Printer.PrintObj(user, o.CreateSubcommandOptions.Out)
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/create/user_identity_mapping.go b/vendor/github.com/openshift/oc/pkg/cli/create/user_identity_mapping.go
deleted file mode 100644
index 558816ab8f70..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/create/user_identity_mapping.go
+++ /dev/null
@@ -1,123 +0,0 @@
-package create
-
-import (
-	"fmt"
-
-	"github.com/spf13/cobra"
-
-	corev1 "k8s.io/api/core/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-
-	userv1 "github.com/openshift/api/user/v1"
-	userv1client "github.com/openshift/client-go/user/clientset/versioned/typed/user/v1"
-)
-
-const UserIdentityMappingRecommendedName = "useridentitymapping"
-
-var (
-	userIdentityMappingLong = templates.LongDesc(`
-		Typically, identities are automatically mapped to users during login. If automatic
-		mapping is disabled (by using the "lookup" mapping method), or a mapping needs to
-		be manually established between an identity and a user, this command can be used
-		to create a useridentitymapping object.`)
-
-	userIdentityMappingExample = templates.Examples(`
-		# Map the identity "acme_ldap:adamjones" to the user "ajones"
-  	%[1]s acme_ldap:adamjones ajones`)
-)
-
-type CreateUserIdentityMappingOptions struct {
-	CreateSubcommandOptions *CreateSubcommandOptions
-
-	User     string
-	Identity string
-
-	UserIdentityMappingClient userv1client.UserIdentityMappingsGetter
-}
-
-func NewCreateUserIdentityMappingOptions(streams genericclioptions.IOStreams) *CreateUserIdentityMappingOptions {
-	return &CreateUserIdentityMappingOptions{
-		CreateSubcommandOptions: NewCreateSubcommandOptions(streams),
-	}
-}
-
-// NewCmdCreateUserIdentityMapping is a macro command to create a new identity
-func NewCmdCreateUserIdentityMapping(name, fullName string, f genericclioptions.RESTClientGetter, streams genericclioptions.IOStreams) *cobra.Command {
-	o := NewCreateUserIdentityMappingOptions(streams)
-	cmd := &cobra.Command{
-		Use:     name + "  ",
-		Short:   "Manually map an identity to a user.",
-		Long:    userIdentityMappingLong,
-		Example: fmt.Sprintf(userIdentityMappingExample, fullName),
-		Run: func(cmd *cobra.Command, args []string) {
-			cmdutil.CheckErr(o.Complete(cmd, f, args))
-			cmdutil.CheckErr(o.Run())
-		},
-	}
-
-	o.CreateSubcommandOptions.PrintFlags.AddFlags(cmd)
-	cmdutil.AddDryRunFlag(cmd)
-
-	return cmd
-}
-
-func (o *CreateUserIdentityMappingOptions) Complete(cmd *cobra.Command, f genericclioptions.RESTClientGetter, args []string) error {
-	switch len(args) {
-	case 0:
-		return fmt.Errorf("identity is required")
-	case 1:
-		return fmt.Errorf("user name is required")
-	case 2:
-		o.Identity = args[0]
-		o.User = args[1]
-	default:
-		return fmt.Errorf("exactly two arguments (identity and user name) are supported, not: %v", args)
-	}
-
-	clientConfig, err := f.ToRESTConfig()
-	if err != nil {
-		return err
-	}
-	o.UserIdentityMappingClient, err = userv1client.NewForConfig(clientConfig)
-	if err != nil {
-		return err
-	}
-
-	o.CreateSubcommandOptions.Namespace, o.CreateSubcommandOptions.EnforceNamespace, err = f.ToRawKubeConfigLoader().Namespace()
-	if err != nil {
-		return err
-	}
-
-	o.CreateSubcommandOptions.DryRun = cmdutil.GetDryRunFlag(cmd)
-	if o.CreateSubcommandOptions.DryRun {
-		o.CreateSubcommandOptions.PrintFlags.Complete("%s (dry run)")
-	}
-	o.CreateSubcommandOptions.Printer, err = o.CreateSubcommandOptions.PrintFlags.ToPrinter()
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (o *CreateUserIdentityMappingOptions) Run() error {
-	mapping := &userv1.UserIdentityMapping{
-		// this is ok because we know exactly how we want to be serialized
-		TypeMeta: metav1.TypeMeta{APIVersion: userv1.SchemeGroupVersion.String(), Kind: "UserIdentityMapping"},
-		Identity: corev1.ObjectReference{Name: o.Identity},
-		User:     corev1.ObjectReference{Name: o.User},
-	}
-
-	var err error
-	if !o.CreateSubcommandOptions.DryRun {
-		mapping, err = o.UserIdentityMappingClient.UserIdentityMappings().Create(mapping)
-		if err != nil {
-			return err
-		}
-	}
-
-	return o.CreateSubcommandOptions.Printer.PrintObj(mapping, o.CreateSubcommandOptions.Out)
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/debug/debug.go b/vendor/github.com/openshift/oc/pkg/cli/debug/debug.go
deleted file mode 100644
index 117c08e6aea8..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/debug/debug.go
+++ /dev/null
@@ -1,913 +0,0 @@
-package debug
-
-import (
-	"context"
-	"fmt"
-	"os"
-	"reflect"
-	"strings"
-	"time"
-
-	"github.com/spf13/cobra"
-	"k8s.io/klog"
-
-	kappsv1 "k8s.io/api/apps/v1"
-	kappsv1beta1 "k8s.io/api/apps/v1beta1"
-	kappsv1beta2 "k8s.io/api/apps/v1beta2"
-	batchv1 "k8s.io/api/batch/v1"
-	batchv1beta1 "k8s.io/api/batch/v1beta1"
-	batchv2alpha1 "k8s.io/api/batch/v2alpha1"
-	corev1 "k8s.io/api/core/v1"
-	extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
-	kapierrors "k8s.io/apimachinery/pkg/api/errors"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/labels"
-	"k8s.io/apimachinery/pkg/runtime"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	"k8s.io/cli-runtime/pkg/printers"
-	"k8s.io/cli-runtime/pkg/resource"
-	corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
-	watchtools "k8s.io/client-go/tools/watch"
-	"k8s.io/kubernetes/pkg/kubectl"
-	"k8s.io/kubernetes/pkg/kubectl/cmd/attach"
-	"k8s.io/kubernetes/pkg/kubectl/cmd/logs"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/polymorphichelpers"
-	"k8s.io/kubernetes/pkg/kubectl/scheme"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-	"k8s.io/kubernetes/pkg/kubectl/util/term"
-	"k8s.io/kubernetes/pkg/util/interrupt"
-
-	appsv1 "github.com/openshift/api/apps/v1"
-	dockerv10 "github.com/openshift/api/image/docker10"
-	imagev1 "github.com/openshift/api/image/v1"
-	appsv1client "github.com/openshift/client-go/apps/clientset/versioned/typed/apps/v1"
-	imagev1client "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1"
-	"github.com/openshift/library-go/pkg/apps/appsutil"
-	"github.com/openshift/library-go/pkg/image/imageutil"
-	"github.com/openshift/library-go/pkg/image/reference"
-	"github.com/openshift/oc/pkg/helpers/conditions"
-	utilenv "github.com/openshift/oc/pkg/helpers/env"
-	generateapp "github.com/openshift/oc/pkg/helpers/newapp/app"
-)
-
-const (
-	debugPodAnnotationSourceContainer = "debug.openshift.io/source-container"
-	debugPodAnnotationSourceResource  = "debug.openshift.io/source-resource"
-)
-
-var (
-	debugLong = templates.LongDesc(`
-		Launch a command shell to debug a running application
-
-		When debugging images and setup problems, it's useful to get an exact copy of a running
-		pod configuration and troubleshoot with a shell. Since a pod that is failing may not be
-		started and not accessible to 'rsh' or 'exec', the 'debug' command makes it easy to
-		create a carbon copy of that setup.
-
-		The default mode is to start a shell inside of the first container of the referenced pod,
-		replication controller, or deployment config. The started pod will be a copy of your
-		source pod, with labels stripped, the command changed to '/bin/sh', and readiness and
-		liveness checks disabled. If you just want to run a command, add '--' and a command to
-		run. Passing a command will not create a TTY or send STDIN by default. Other flags are
-		supported for altering the container or pod in common ways.
-
-		A common problem running containers is a security policy that prohibits you from running
-		as a root user on the cluster. You can use this command to test running a pod as
-		non-root (with --as-user) or to run a non-root pod as root (with --as-root).
-
-		The debug pod is deleted when the the remote command completes or the user interrupts
-		the shell.`)
-
-	debugExample = templates.Examples(`
-	  # Debug a currently running deployment
-	  %[1]s dc/test
-
-	  # Test running a deployment as a non-root user
-	  %[1]s dc/test --as-user=1000000
-
-	  # Debug a specific failing container by running the env command in the 'second' container
-	  %[1]s dc/test -c second -- /bin/env
-
-	  # See the pod that would be created to debug
-	  %[1]s dc/test -o yaml`)
-)
-
-type DebugOptions struct {
-	PrintFlags *genericclioptions.PrintFlags
-
-	Attach attach.AttachOptions
-
-	CoreClient  corev1client.CoreV1Interface
-	AppsClient  appsv1client.AppsV1Interface
-	ImageClient imagev1client.ImageV1Interface
-
-	Printer          printers.ResourcePrinter
-	LogsForObject    polymorphichelpers.LogsForObjectFunc
-	RESTClientGetter genericclioptions.RESTClientGetter
-
-	NoStdin    bool
-	ForceTTY   bool
-	DisableTTY bool
-	Timeout    time.Duration
-
-	Command            []string
-	Annotations        map[string]string
-	AsRoot             bool
-	AsNonRoot          bool
-	AsUser             int64
-	KeepLabels         bool // TODO: evaluate selecting the right labels automatically
-	KeepAnnotations    bool
-	KeepLiveness       bool
-	KeepReadiness      bool
-	KeepInitContainers bool
-	OneContainer       bool
-	NodeName           string
-	AddEnv             []corev1.EnvVar
-	RemoveEnv          []string
-	Resources          []string
-	Builder            func() *resource.Builder
-	Namespace          string
-	ExplicitNamespace  bool
-	DryRun             bool
-	FullCmdName        string
-	Image              string
-
-	// IsNode is set after we see the object we're debugging.  We use it to be able to print pertinent advice.
-	IsNode bool
-
-	resource.FilenameOptions
-	genericclioptions.IOStreams
-}
-
-func NewDebugOptions(streams genericclioptions.IOStreams) *DebugOptions {
-	attachOpts := attach.NewAttachOptions(streams)
-	attachOpts.TTY = true
-	attachOpts.Stdin = true
-	return &DebugOptions{
-		PrintFlags:         genericclioptions.NewPrintFlags("").WithTypeSetter(scheme.Scheme),
-		IOStreams:          streams,
-		Timeout:            15 * time.Minute,
-		KeepInitContainers: true,
-		AsUser:             -1,
-		Attach:             *attachOpts,
-		LogsForObject:      polymorphichelpers.LogsForObjectFn,
-	}
-}
-
-// NewCmdDebug creates a command for debugging pods.
-func NewCmdDebug(fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	o := NewDebugOptions(streams)
-	cmd := &cobra.Command{
-		Use:     "debug RESOURCE/NAME [ENV1=VAL1 ...] [-c CONTAINER] [flags] [-- COMMAND]",
-		Short:   "Launch a new instance of a pod for debugging",
-		Long:    debugLong,
-		Example: fmt.Sprintf(debugExample, fmt.Sprintf("%s debug", fullName)),
-		Run: func(cmd *cobra.Command, args []string) {
-			kcmdutil.CheckErr(o.Complete(cmd, f, args))
-			kcmdutil.CheckErr(o.Validate())
-			kcmdutil.CheckErr(o.RunDebug())
-		},
-	}
-
-	usage := "to read a template"
-	kcmdutil.AddFilenameOptionFlags(cmd, &o.FilenameOptions, usage)
-
-	// FIXME-REBASE: we need to wire jsonpath here and other printers
-	cmd.Flags().Bool("no-headers", false, "If true, when using the default output, don't print headers.")
-	cmd.Flags().MarkHidden("no-headers")
-	cmd.Flags().String("sort-by", "", "If non-empty, sort list types using this field specification.  The field specification is expressed as a JSONPath expression (e.g. 'ObjectMeta.Name'). The field in the API resource specified by this JSONPath expression must be an integer or a string.")
-	cmd.Flags().MarkHidden("sort-by")
-	cmd.Flags().Bool("show-all", true, "When printing, show all resources (default hide terminated pods.)")
-	cmd.Flags().MarkHidden("show-all")
-	cmd.Flags().Bool("show-labels", false, "When printing, show all labels as the last column (default hide labels column)")
-
-	cmd.Flags().BoolVarP(&o.NoStdin, "no-stdin", "I", o.NoStdin, "Bypasses passing STDIN to the container, defaults to true if no command specified")
-	cmd.Flags().BoolVarP(&o.ForceTTY, "tty", "t", o.ForceTTY, "Force a pseudo-terminal to be allocated")
-	cmd.Flags().BoolVarP(&o.DisableTTY, "no-tty", "T", o.DisableTTY, "Disable pseudo-terminal allocation")
-	cmd.Flags().StringVarP(&o.Attach.ContainerName, "container", "c", o.Attach.ContainerName, "Container name; defaults to first container")
-	cmd.Flags().BoolVar(&o.KeepAnnotations, "keep-annotations", o.KeepAnnotations, "If true, keep the original pod annotations")
-	cmd.Flags().BoolVar(&o.KeepLiveness, "keep-liveness", o.KeepLiveness, "If true, keep the original pod liveness probes")
-	cmd.Flags().BoolVar(&o.KeepInitContainers, "keep-init-containers", o.KeepInitContainers, "Run the init containers for the pod. Defaults to true.")
-	cmd.Flags().BoolVar(&o.KeepReadiness, "keep-readiness", o.KeepReadiness, "If true, keep the original pod readiness probes")
-	cmd.Flags().BoolVar(&o.OneContainer, "one-container", o.OneContainer, "If true, run only the selected container, remove all others")
-	cmd.Flags().StringVar(&o.NodeName, "node-name", o.NodeName, "Set a specific node to run on - by default the pod will run on any valid node")
-	cmd.Flags().BoolVar(&o.AsRoot, "as-root", o.AsRoot, "If true, try to run the container as the root user")
-	cmd.Flags().Int64Var(&o.AsUser, "as-user", o.AsUser, "Try to run the container as a specific user UID (note: admins may limit your ability to use this flag)")
-	cmd.Flags().StringVar(&o.Image, "image", o.Image, "Override the image used by the targeted container.")
-
-	o.PrintFlags.AddFlags(cmd)
-	kcmdutil.AddDryRunFlag(cmd)
-
-	return cmd
-}
-
-func (o *DebugOptions) Complete(cmd *cobra.Command, f kcmdutil.Factory, args []string) error {
-	if i := cmd.ArgsLenAtDash(); i != -1 && i < len(args) {
-		o.Command = args[i:]
-		args = args[:i]
-	}
-	resources, envArgs, ok := utilenv.SplitEnvironmentFromResources(args)
-	if !ok {
-		return kcmdutil.UsageErrorf(cmd, "all resources must be specified before environment changes: %s", strings.Join(args, " "))
-	}
-	o.Resources = resources
-	o.RESTClientGetter = f
-
-	switch {
-	case o.ForceTTY && o.NoStdin:
-		return kcmdutil.UsageErrorf(cmd, "you may not specify -I and -t together")
-	case o.ForceTTY && o.DisableTTY:
-		return kcmdutil.UsageErrorf(cmd, "you may not specify -t and -T together")
-	case o.ForceTTY:
-		o.Attach.TTY = true
-	// since ForceTTY is defaulted to false, check if user specifically passed in "=false" flag
-	case !o.ForceTTY && cmd.Flags().Changed("tty"):
-		o.Attach.TTY = false
-	case o.DisableTTY:
-		o.Attach.TTY = false
-	// don't default TTY to true if a command is passed
-	case len(o.Command) > 0:
-		o.Attach.TTY = false
-		o.Attach.Stdin = false
-	default:
-		o.Attach.TTY = term.IsTerminal(o.In)
-		klog.V(4).Infof("Defaulting TTY to %t", o.Attach.TTY)
-	}
-	if o.NoStdin {
-		o.Attach.TTY = false
-		o.Attach.Stdin = false
-	}
-
-	if o.Annotations == nil {
-		o.Annotations = make(map[string]string)
-	}
-
-	if len(o.Command) == 0 {
-		o.Command = []string{"/bin/sh"}
-	}
-
-	var err error
-	o.Namespace, o.ExplicitNamespace, err = f.ToRawKubeConfigLoader().Namespace()
-	if err != nil {
-		return err
-	}
-
-	o.Builder = f.NewBuilder
-
-	o.AddEnv, o.RemoveEnv, err = utilenv.ParseEnv(envArgs, nil)
-	if err != nil {
-		return err
-	}
-
-	cmdParent := cmd.Parent()
-	if cmdParent != nil && len(cmdParent.CommandPath()) > 0 && kcmdutil.IsSiblingCommandExists(cmd, "describe") {
-		o.FullCmdName = cmdParent.CommandPath()
-	}
-	o.AsNonRoot = !o.AsRoot && cmd.Flag("as-root").Changed
-
-	templateArgSpecified := o.PrintFlags.TemplatePrinterFlags != nil &&
-		o.PrintFlags.TemplatePrinterFlags.TemplateArgument != nil &&
-		len(*o.PrintFlags.TemplatePrinterFlags.TemplateArgument) > 0
-
-	outputFormatSpecified := o.PrintFlags.OutputFormat != nil && len(*o.PrintFlags.OutputFormat) > 0
-
-	// TODO: below should be turned into a method on PrintFlags
-	if outputFormatSpecified || templateArgSpecified {
-		if o.DryRun {
-			o.PrintFlags.Complete("%s (dry run)")
-		}
-		o.Printer, err = o.PrintFlags.ToPrinter()
-		if err != nil {
-			return err
-		}
-	}
-
-	config, err := f.ToRESTConfig()
-	if err != nil {
-		return err
-	}
-	o.Attach.Config = config
-
-	o.CoreClient, err = corev1client.NewForConfig(config)
-	if err != nil {
-		return err
-	}
-
-	o.AppsClient, err = appsv1client.NewForConfig(config)
-	if err != nil {
-		return err
-	}
-
-	o.ImageClient, err = imagev1client.NewForConfig(config)
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (o DebugOptions) Validate() error {
-	if (o.AsRoot || o.AsNonRoot) && o.AsUser > 0 {
-		return fmt.Errorf("you may not specify --as-root and --as-user=%d at the same time", o.AsUser)
-	}
-	return nil
-}
-
-// Debug creates and runs a debugging pod.
-func (o *DebugOptions) RunDebug() error {
-	b := o.Builder().
-		WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...).
-		NamespaceParam(o.Namespace).DefaultNamespace().
-		SingleResourceType().
-		ResourceNames("pods", o.Resources...).
-		Flatten()
-	if len(o.FilenameOptions.Filenames) > 0 {
-		b.FilenameParam(o.ExplicitNamespace, &o.FilenameOptions)
-	}
-	one := false
-	infos, err := b.Do().IntoSingleItemImplied(&one).Infos()
-	if err != nil {
-		return err
-	}
-	if !one {
-		return fmt.Errorf("you must identify a resource with a pod template to debug")
-	}
-
-	templateV1, err := o.approximatePodTemplateForObject(infos[0].Object)
-	if err != nil && templateV1 == nil {
-		return fmt.Errorf("cannot debug %s: %v", infos[0].Name, err)
-	}
-	if err != nil {
-		klog.V(4).Infof("Unable to get exact template, but continuing with fallback: %v", err)
-	}
-	template := &corev1.PodTemplateSpec{}
-	if err := scheme.Scheme.Convert(templateV1, template, nil); err != nil {
-		return err
-	}
-	pod := &corev1.Pod{
-		ObjectMeta: template.ObjectMeta,
-		Spec:       template.Spec,
-	}
-	ns := infos[0].Namespace
-	if len(ns) == 0 {
-		ns = o.Namespace
-	}
-	pod.Name, pod.Namespace = fmt.Sprintf("%s-debug", generateapp.MakeSimpleName(infos[0].Name)), ns
-	o.Attach.Pod = pod
-
-	if len(o.Attach.ContainerName) == 0 && len(pod.Spec.Containers) > 0 {
-		if len(pod.Spec.Containers) > 1 && len(o.FullCmdName) > 0 {
-			fmt.Fprintf(o.ErrOut, "Defaulting container name to %s.\n", pod.Spec.Containers[0].Name)
-			fmt.Fprintf(o.ErrOut, "Use '%s describe pod/%s -n %s' to see all of the containers in this pod.\n", o.FullCmdName, pod.Name, pod.Namespace)
-			fmt.Fprintf(o.ErrOut, "\n")
-		}
-
-		klog.V(4).Infof("Defaulting container name to %s", pod.Spec.Containers[0].Name)
-		o.Attach.ContainerName = pod.Spec.Containers[0].Name
-	}
-
-	names := containerNames(o.Attach.Pod)
-	if len(names) == 0 {
-		return fmt.Errorf("the provided pod must have at least one container")
-	}
-	if len(o.Attach.ContainerName) == 0 {
-		return fmt.Errorf("you must provide a container name to debug")
-	}
-	if containerForName(o.Attach.Pod, o.Attach.ContainerName) == nil {
-		return fmt.Errorf("the container %q is not a valid container name; must be one of %v", o.Attach.ContainerName, names)
-	}
-
-	o.Annotations[debugPodAnnotationSourceResource] = fmt.Sprintf("%s/%s", infos[0].Mapping.Resource, infos[0].Name)
-	o.Annotations[debugPodAnnotationSourceContainer] = o.Attach.ContainerName
-
-	pod, originalCommand := o.transformPodForDebug(o.Annotations)
-	var commandString string
-	switch {
-	case len(originalCommand) > 0:
-		commandString = strings.Join(originalCommand, " ")
-	default:
-		commandString = ""
-	}
-
-	if o.Printer != nil {
-		return o.Printer.PrintObj(pod, o.Out)
-	}
-
-	klog.V(5).Infof("Creating pod: %#v", pod)
-	pod, err = o.createPod(pod)
-	if err != nil {
-		return err
-	}
-
-	// ensure the pod is cleaned up on shutdown
-	o.Attach.InterruptParent = interrupt.New(
-		func(os.Signal) { os.Exit(1) },
-		func() {
-			stderr := o.ErrOut
-			if stderr == nil {
-				stderr = os.Stderr
-			}
-			fmt.Fprintf(stderr, "\nRemoving debug pod ...\n")
-			if err := o.CoreClient.Pods(pod.Namespace).Delete(pod.Name, metav1.NewDeleteOptions(0)); err != nil {
-				if !kapierrors.IsNotFound(err) {
-					fmt.Fprintf(stderr, "error: unable to delete the debug pod %q: %v\n", pod.Name, err)
-				}
-			}
-		},
-	)
-
-	klog.V(5).Infof("Created attach arguments: %#v", o.Attach)
-	return o.Attach.InterruptParent.Run(func() error {
-		w, err := o.CoreClient.Pods(pod.Namespace).Watch(metav1.SingleObject(pod.ObjectMeta))
-		if err != nil {
-			return err
-		}
-		if len(commandString) > 0 {
-			fmt.Fprintf(o.ErrOut, "Starting pod/%s, command was: %s\n", pod.Name, commandString)
-		} else {
-			fmt.Fprintf(o.ErrOut, "Starting pod/%s ...\n", pod.Name)
-		}
-		if o.IsNode {
-			fmt.Fprintf(o.ErrOut, "To use host binaries, run `chroot /host`\n")
-		}
-
-		ctx, cancel := context.WithTimeout(context.Background(), o.Timeout)
-		defer cancel()
-		switch containerRunningEvent, err := watchtools.UntilWithoutRetry(ctx, w, conditions.PodContainerRunning(o.Attach.ContainerName)); {
-		// api didn't error right away but the pod wasn't even created
-		case kapierrors.IsNotFound(err):
-			msg := fmt.Sprintf("unable to create the debug pod %q", pod.Name)
-			if len(o.NodeName) > 0 {
-				msg += fmt.Sprintf(" on node %q", o.NodeName)
-			}
-			return fmt.Errorf(msg)
-			// switch to logging output
-		case err == kubectl.ErrPodCompleted, err == conditions.ErrContainerTerminated, !o.Attach.Stdin:
-			return logs.LogsOptions{
-				Object: pod,
-				Options: &corev1.PodLogOptions{
-					Container: o.Attach.ContainerName,
-					Follow:    true,
-				},
-				RESTClientGetter: o.RESTClientGetter,
-				ConsumeRequestFn: logs.DefaultConsumeRequest,
-				IOStreams:        o.IOStreams,
-				LogsForObject:    o.LogsForObject,
-			}.RunLogs()
-		case err != nil:
-			return err
-		default:
-			// TODO this doesn't do us much good for remote debugging sessions, but until we get a local port
-			// set up to proxy, this is what we've got.
-			if podWithStatus, ok := containerRunningEvent.Object.(*corev1.Pod); ok {
-				fmt.Fprintf(o.Attach.ErrOut, "Pod IP: %s\n", podWithStatus.Status.PodIP)
-			}
-
-			// TODO: attach can race with pod completion, allow attach to switch to logs
-			return o.Attach.Run()
-		}
-	})
-}
-
-// getContainerImageViaDeploymentConfig attempts to return an Image for a given
-// Container.  It tries to walk from the Container's Pod to its DeploymentConfig
-// (via the "openshift.io/deployment-config.name" annotation), then tries to
-// find the ImageStream from which the DeploymentConfig is deploying, then tries
-// to find a match for the Container's image in the ImageStream's Images.
-func (o *DebugOptions) getContainerImageViaDeploymentConfig(pod *corev1.Pod, container *corev1.Container) (*imagev1.Image, error) {
-	ref, err := reference.Parse(container.Image)
-	if err != nil {
-		return nil, err
-	}
-
-	if ref.ID == "" {
-		return nil, nil // ID is needed for later lookup
-	}
-
-	dcname := pod.Annotations[appsv1.DeploymentConfigAnnotation]
-	if dcname == "" {
-		return nil, nil // Pod doesn't appear to have been created by a DeploymentConfig
-	}
-
-	dc, err := o.AppsClient.DeploymentConfigs(o.Attach.Pod.Namespace).Get(dcname, metav1.GetOptions{})
-	if err != nil {
-		return nil, err
-	}
-
-	for _, trigger := range dc.Spec.Triggers {
-		if trigger.Type == appsv1.DeploymentTriggerOnImageChange &&
-			trigger.ImageChangeParams != nil &&
-			trigger.ImageChangeParams.From.Kind == "ImageStreamTag" {
-
-			isname, _, err := imageutil.ParseImageStreamTagName(trigger.ImageChangeParams.From.Name)
-			if err != nil {
-				return nil, err
-			}
-
-			namespace := trigger.ImageChangeParams.From.Namespace
-			if len(namespace) == 0 {
-				namespace = o.Attach.Pod.Namespace
-			}
-
-			isi, err := o.ImageClient.ImageStreamImages(namespace).Get(imageutil.JoinImageStreamImage(isname, ref.ID), metav1.GetOptions{})
-			if err != nil {
-				return nil, err
-			}
-
-			return &isi.Image, nil
-		}
-	}
-
-	return nil, nil // DeploymentConfig doesn't have an ImageChange Trigger
-}
-
-// getContainerImageViaImageStreamImport attempts to return an Image for a given
-// Container.  It does this by submiting a ImageStreamImport request with Import
-// set to false.  The request will not succeed if the backing repository
-// requires Insecure to be set to true, which cannot be hard-coded for security
-// reasons.
-func (o *DebugOptions) getContainerImageViaImageStreamImport(container *corev1.Container) (*imagev1.Image, error) {
-	isi := &imagev1.ImageStreamImport{
-		ObjectMeta: metav1.ObjectMeta{
-			Name: "oc-debug",
-		},
-		Spec: imagev1.ImageStreamImportSpec{
-			Images: []imagev1.ImageImportSpec{
-				{
-					From: corev1.ObjectReference{
-						Kind: "DockerImage",
-						Name: container.Image,
-					},
-				},
-			},
-		},
-	}
-
-	isi, err := o.ImageClient.ImageStreamImports(o.Attach.Pod.Namespace).Create(isi)
-	if err != nil {
-		return nil, err
-	}
-
-	if len(isi.Status.Images) > 0 {
-		return isi.Status.Images[0].Image, nil
-	}
-
-	return nil, nil
-}
-
-func (o *DebugOptions) getContainerImageCommand(pod *corev1.Pod, container *corev1.Container) ([]string, error) {
-	if len(container.Command) > 0 {
-		return container.Command, nil
-	}
-	image, err := o.getContainerImageViaDeploymentConfig(pod, container)
-	if err != nil {
-		image, err = o.getContainerImageViaImageStreamImport(container)
-		if err != nil {
-			return nil, err
-		}
-	}
-
-	if image == nil {
-		return nil, fmt.Errorf("error: no usable image found")
-	}
-
-	if err := imageutil.ImageWithMetadata(image); err != nil {
-		return nil, err
-	}
-	dockerImage, ok := image.DockerImageMetadata.Object.(*dockerv10.DockerImage)
-	if !ok {
-		return nil, err
-	}
-
-	return append(dockerImage.Config.Entrypoint, dockerImage.Config.Cmd...), nil
-}
-
-// transformPodForDebug alters the input pod to be debuggable
-func (o *DebugOptions) transformPodForDebug(annotations map[string]string) (*corev1.Pod, []string) {
-	pod := o.Attach.Pod
-
-	if !o.KeepInitContainers {
-		pod.Spec.InitContainers = nil
-	}
-
-	// reset the container
-	container := containerForName(pod, o.Attach.ContainerName)
-
-	// identify the command to be run
-	originalCommand, _ := o.getContainerImageCommand(pod, container)
-	if len(container.Command) > 0 {
-		originalCommand = container.Command
-		originalCommand = append(originalCommand, container.Args...)
-	}
-
-	if len(o.Image) > 0 {
-		container.Image = o.Image
-	}
-
-	container.Command = o.Command
-	container.Args = nil
-	container.TTY = o.Attach.Stdin && o.Attach.TTY
-	container.Stdin = o.Attach.Stdin
-	container.StdinOnce = o.Attach.Stdin
-
-	if !o.KeepReadiness {
-		container.ReadinessProbe = nil
-	}
-	if !o.KeepLiveness {
-		container.LivenessProbe = nil
-	}
-
-	var newEnv []corev1.EnvVar
-	if len(o.RemoveEnv) > 0 {
-		for i := range container.Env {
-			skip := false
-			for _, name := range o.RemoveEnv {
-				if name == container.Env[i].Name {
-					skip = true
-					break
-				}
-			}
-			if skip {
-				continue
-			}
-			newEnv = append(newEnv, container.Env[i])
-		}
-	} else {
-		newEnv = container.Env
-	}
-	newEnv = append(newEnv, o.AddEnv...)
-	container.Env = newEnv
-
-	if container.SecurityContext == nil {
-		container.SecurityContext = &corev1.SecurityContext{}
-	}
-	switch {
-	case o.AsNonRoot:
-		b := true
-		container.SecurityContext.RunAsNonRoot = &b
-	case o.AsRoot:
-		zero := int64(0)
-		container.SecurityContext.RunAsUser = &zero
-		container.SecurityContext.RunAsNonRoot = nil
-	case o.AsUser != -1:
-		container.SecurityContext.RunAsUser = &o.AsUser
-		container.SecurityContext.RunAsNonRoot = nil
-	}
-
-	if o.OneContainer {
-		pod.Spec.Containers = []corev1.Container{*container}
-	}
-
-	// reset the pod
-	if pod.Annotations == nil || !o.KeepAnnotations {
-		pod.Annotations = make(map[string]string)
-	}
-	for k, v := range annotations {
-		pod.Annotations[k] = v
-	}
-	if o.KeepLabels {
-		if pod.Labels == nil {
-			pod.Labels = make(map[string]string)
-		}
-	} else {
-		pod.Labels = map[string]string{}
-	}
-
-	pod.ResourceVersion = ""
-	pod.Spec.RestartPolicy = corev1.RestartPolicyNever
-
-	pod.Status = corev1.PodStatus{}
-	pod.UID = ""
-	pod.CreationTimestamp = metav1.Time{}
-	pod.SelfLink = ""
-
-	// clear pod ownerRefs
-	pod.ObjectMeta.OwnerReferences = []metav1.OwnerReference{}
-
-	return pod, originalCommand
-}
-
-// createPod creates the debug pod, and will attempt to delete an existing debug
-// pod with the same name, but will return an error in any other case.
-func (o *DebugOptions) createPod(pod *corev1.Pod) (*corev1.Pod, error) {
-	namespace, name := pod.Namespace, pod.Name
-
-	// create the pod
-	created, err := o.CoreClient.Pods(namespace).Create(pod)
-	if err == nil || !kapierrors.IsAlreadyExists(err) {
-		return created, err
-	}
-
-	// only continue if the pod has the right annotations
-	existing, err := o.CoreClient.Pods(namespace).Get(name, metav1.GetOptions{})
-	if err != nil {
-		return nil, err
-	}
-	if existing.Annotations[debugPodAnnotationSourceResource] != o.Annotations[debugPodAnnotationSourceResource] {
-		return nil, fmt.Errorf("a pod already exists named %q, please delete it before running debug", name)
-	}
-
-	// delete the existing pod
-	if err := o.CoreClient.Pods(namespace).Delete(name, metav1.NewDeleteOptions(0)); err != nil && !kapierrors.IsNotFound(err) {
-		return nil, fmt.Errorf("unable to delete existing debug pod %q: %v", name, err)
-	}
-	return o.CoreClient.Pods(namespace).Create(pod)
-}
-
-func containerForName(pod *corev1.Pod, name string) *corev1.Container {
-	for i, c := range pod.Spec.Containers {
-		if c.Name == name {
-			return &pod.Spec.Containers[i]
-		}
-	}
-	for i, c := range pod.Spec.InitContainers {
-		if c.Name == name {
-			return &pod.Spec.InitContainers[i]
-		}
-	}
-	return nil
-}
-
-func containerNames(pod *corev1.Pod) []string {
-	var names []string
-	for _, c := range pod.Spec.Containers {
-		names = append(names, c.Name)
-	}
-	return names
-}
-
-func (o *DebugOptions) approximatePodTemplateForObject(object runtime.Object) (*corev1.PodTemplateSpec, error) {
-	switch t := object.(type) {
-	case *corev1.Node:
-		o.IsNode = true
-		if len(o.NodeName) > 0 {
-			return nil, fmt.Errorf("you may not set --node-name when debugging a node")
-		}
-		if o.AsNonRoot || o.AsUser > 0 {
-			// TODO: allow --as-root=false to skip all the namespaces except network
-			return nil, fmt.Errorf("can't debug nodes without running as the root user")
-		}
-		image := o.Image
-		if len(o.Image) == 0 {
-			istag, err := o.ImageClient.ImageStreamTags("openshift").Get("tools:latest", metav1.GetOptions{})
-			if err == nil && len(istag.Image.DockerImageReference) > 0 {
-				image = istag.Image.DockerImageReference
-			}
-		}
-		if len(o.Image) == 0 {
-			image = "registry.redhat.io/rhel7/support-tools"
-		}
-		zero := int64(0)
-		isTrue := true
-		hostPathType := corev1.HostPathDirectory
-		return &corev1.PodTemplateSpec{
-			Spec: corev1.PodSpec{
-				NodeName:    t.Name,
-				HostNetwork: true,
-				HostPID:     true,
-				Volumes: []corev1.Volume{
-					{
-						Name: "host",
-						VolumeSource: corev1.VolumeSource{
-							HostPath: &corev1.HostPathVolumeSource{
-								Path: "/",
-								Type: &hostPathType,
-							},
-						},
-					},
-				},
-				RestartPolicy: corev1.RestartPolicyNever,
-				Containers: []corev1.Container{
-					{
-						Name:  "container-00",
-						Image: image,
-						SecurityContext: &corev1.SecurityContext{
-							Privileged: &isTrue,
-							RunAsUser:  &zero,
-						},
-						VolumeMounts: []corev1.VolumeMount{
-							{
-								Name:      "host",
-								MountPath: "/host",
-							},
-						},
-					},
-				},
-			},
-		}, nil
-	case *imagev1.ImageStreamTag:
-		// create a minimal pod spec that uses the image referenced by the istag without any introspection
-		// it possible that we could someday do a better job introspecting it
-		return setNodeName(&corev1.PodTemplateSpec{
-			Spec: corev1.PodSpec{
-				RestartPolicy: corev1.RestartPolicyNever,
-				Containers: []corev1.Container{
-					{Name: "container-00", Image: t.Image.DockerImageReference},
-				},
-			},
-		}, o.NodeName), nil
-	case *imagev1.ImageStreamImage:
-		// create a minimal pod spec that uses the image referenced by the istag without any introspection
-		// it possible that we could someday do a better job introspecting it
-		return setNodeName(&corev1.PodTemplateSpec{
-			Spec: corev1.PodSpec{
-				RestartPolicy: corev1.RestartPolicyNever,
-				Containers: []corev1.Container{
-					{Name: "container-00", Image: t.Image.DockerImageReference},
-				},
-			},
-		}, o.NodeName), nil
-	case *appsv1.DeploymentConfig:
-		fallback := t.Spec.Template
-
-		latestDeploymentName := appsutil.LatestDeploymentNameForConfig(t)
-		deployment, err := o.CoreClient.ReplicationControllers(t.Namespace).Get(latestDeploymentName, metav1.GetOptions{})
-		if err != nil {
-			return setNodeName(fallback, o.NodeName), err
-		}
-
-		fallback = deployment.Spec.Template
-
-		pods, err := o.CoreClient.Pods(deployment.Namespace).List(metav1.ListOptions{LabelSelector: labels.SelectorFromSet(deployment.Spec.Selector).String()})
-		if err != nil {
-			return setNodeName(fallback, o.NodeName), err
-		}
-
-		// If we have any pods available, find the newest
-		// pod with regards to our most recent deployment.
-		// If the fallback PodTemplateSpec is nil, prefer
-		// the newest pod available.
-		for i := range pods.Items {
-			pod := &pods.Items[i]
-			if fallback == nil || pod.CreationTimestamp.Before(&fallback.CreationTimestamp) {
-				fallback = &corev1.PodTemplateSpec{
-					ObjectMeta: pod.ObjectMeta,
-					Spec:       pod.Spec,
-				}
-			}
-		}
-		return setNodeName(fallback, o.NodeName), nil
-
-	case *corev1.Pod:
-		return setNodeName(&corev1.PodTemplateSpec{
-			ObjectMeta: t.ObjectMeta,
-			Spec:       t.Spec,
-		}, o.NodeName), nil
-
-	// ReplicationController
-	case *corev1.ReplicationController:
-		return setNodeName(t.Spec.Template, o.NodeName), nil
-
-	// ReplicaSet
-	case *extensionsv1beta1.ReplicaSet:
-		return setNodeName(&t.Spec.Template, o.NodeName), nil
-	case *kappsv1beta2.ReplicaSet:
-		return setNodeName(&t.Spec.Template, o.NodeName), nil
-	case *kappsv1.ReplicaSet:
-		return setNodeName(&t.Spec.Template, o.NodeName), nil
-
-	// Deployment
-	case *extensionsv1beta1.Deployment:
-		return setNodeName(&t.Spec.Template, o.NodeName), nil
-	case *kappsv1beta1.Deployment:
-		return setNodeName(&t.Spec.Template, o.NodeName), nil
-	case *kappsv1beta2.Deployment:
-		return setNodeName(&t.Spec.Template, o.NodeName), nil
-	case *kappsv1.Deployment:
-		return setNodeName(&t.Spec.Template, o.NodeName), nil
-
-	// StatefulSet
-	case *kappsv1.StatefulSet:
-		return setNodeName(&t.Spec.Template, o.NodeName), nil
-	case *kappsv1beta2.StatefulSet:
-		return setNodeName(&t.Spec.Template, o.NodeName), nil
-	case *kappsv1beta1.StatefulSet:
-		return setNodeName(&t.Spec.Template, o.NodeName), nil
-
-	// DaemonSet
-	case *extensionsv1beta1.DaemonSet:
-		return setNodeName(&t.Spec.Template, o.NodeName), nil
-	case *kappsv1beta2.DaemonSet:
-		return setNodeName(&t.Spec.Template, o.NodeName), nil
-	case *kappsv1.DaemonSet:
-		return setNodeName(&t.Spec.Template, o.NodeName), nil
-
-	// Job
-	case *batchv1.Job:
-		return setNodeName(&t.Spec.Template, o.NodeName), nil
-
-	// CronJob
-	case *batchv1beta1.CronJob:
-		return setNodeName(&t.Spec.JobTemplate.Spec.Template, o.NodeName), nil
-	case *batchv2alpha1.CronJob:
-		return setNodeName(&t.Spec.JobTemplate.Spec.Template, o.NodeName), nil
-	}
-
-	return nil, fmt.Errorf("unable to extract pod template from type %v", reflect.TypeOf(object))
-}
-
-func setNodeName(template *corev1.PodTemplateSpec, nodeName string) *corev1.PodTemplateSpec {
-	template.Spec.NodeName = nodeName
-	return template
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/deployer/OWNERS b/vendor/github.com/openshift/oc/pkg/cli/deployer/OWNERS
deleted file mode 100644
index 7d67d53c299c..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/deployer/OWNERS
+++ /dev/null
@@ -1,6 +0,0 @@
-reviewers:
-  - tnozicka
-  - mfojtik
-approvers:
-  - tnozicka
-  - mfojtik
diff --git a/vendor/github.com/openshift/oc/pkg/cli/deployer/deployer.go b/vendor/github.com/openshift/oc/pkg/cli/deployer/deployer.go
deleted file mode 100644
index c36fcd644ece..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/deployer/deployer.go
+++ /dev/null
@@ -1,283 +0,0 @@
-package deployer
-
-import (
-	"fmt"
-	"io"
-	"os"
-	"sort"
-	"strconv"
-	"time"
-
-	"github.com/spf13/cobra"
-
-	corev1 "k8s.io/api/core/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/runtime"
-	"k8s.io/client-go/kubernetes"
-	kv1core "k8s.io/client-go/kubernetes/typed/core/v1"
-	restclient "k8s.io/client-go/rest"
-	kapi "k8s.io/kubernetes/pkg/apis/core"
-	"k8s.io/kubernetes/pkg/kubectl"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-
-	appsv1 "github.com/openshift/api/apps/v1"
-	imageclientv1 "github.com/openshift/client-go/image/clientset/versioned"
-
-	"github.com/openshift/library-go/pkg/apps/appsserialization"
-	"github.com/openshift/library-go/pkg/apps/appsutil"
-	"github.com/openshift/oc/pkg/cli/deployer/strategy"
-	"github.com/openshift/oc/pkg/cli/deployer/strategy/recreate"
-	"github.com/openshift/oc/pkg/cli/deployer/strategy/rolling"
-	"github.com/openshift/oc/pkg/version"
-)
-
-var (
-	deployerLong = templates.LongDesc(`
-		Perform a deployment
-
-		This command launches a deployment as described by a deployment configuration. It accepts the name
-		of a replication controller created by a deployment and runs that deployment to completion. You can
-		use the --until flag to run the deployment until you reach the specified condition.
-
-		Available conditions:
-
-		* "start": after old deployments are scaled to zero
-		* "pre": after the pre hook completes (even if no hook specified)
-		* "mid": after the mid hook completes (even if no hook specified)
-		* A percentage of the deployment, based on which strategy is in use
-		  * "0%"   Recreate after the previous deployment is scaled to zero
-		  * "N%"   Recreate after the acceptance check if this is not the first deployment
-		  * "0%"   Rolling  before the rolling deployment is started, equivalent to "pre"
-		  * "N%"   Rolling  the percentage of pods in the target deployment that are ready
-		  * "100%" All      after the deployment is at full scale, but before the post hook runs
-
-		Unrecognized conditions will be ignored and the deployment will run to completion. You can run this
-		command multiple times when --until is specified - hooks will only be executed once.`)
-)
-
-type config struct {
-	Out, ErrOut io.Writer
-
-	rcName    string
-	Namespace string
-
-	Until string
-}
-
-// NewCommandDeployer provides a CLI handler for deploy.
-func NewCommandDeployer(name string) *cobra.Command {
-	cfg := &config{}
-
-	cmd := &cobra.Command{
-		Use:   fmt.Sprintf("%s [--until=CONDITION]", name),
-		Short: "Run the deployer",
-		Long:  deployerLong,
-		Run: func(c *cobra.Command, args []string) {
-			cfg.Out = os.Stdout
-			cfg.ErrOut = c.OutOrStderr()
-			err := cfg.RunDeployer()
-			if strategy.IsConditionReached(err) {
-				fmt.Fprintf(os.Stdout, "--> %s\n", err.Error())
-				return
-			}
-			kcmdutil.CheckErr(err)
-		},
-	}
-
-	cmd.AddCommand(NewCmdVersion(name, version.Get(), os.Stdout))
-
-	flag := cmd.Flags()
-	flag.StringVar(&cfg.rcName, "deployment", os.Getenv("OPENSHIFT_DEPLOYMENT_NAME"), "The deployment name to start")
-	flag.StringVar(&cfg.Namespace, "namespace", os.Getenv("OPENSHIFT_DEPLOYMENT_NAMESPACE"), "The deployment namespace")
-	flag.StringVar(&cfg.Until, "until", "", "Exit the deployment when this condition is met. See help for more details")
-
-	return cmd
-}
-
-func (cfg *config) RunDeployer() error {
-	if len(cfg.rcName) == 0 {
-		return fmt.Errorf("--deployment or OPENSHIFT_DEPLOYMENT_NAME is required")
-	}
-	if len(cfg.Namespace) == 0 {
-		return fmt.Errorf("--namespace or OPENSHIFT_DEPLOYMENT_NAMESPACE is required")
-	}
-
-	kcfg, err := restclient.InClusterConfig()
-	if err != nil {
-		return err
-	}
-	openshiftImageClient, err := imageclientv1.NewForConfig(kcfg)
-	if err != nil {
-		return err
-	}
-	kubeClient, err := kubernetes.NewForConfig(kcfg)
-	if err != nil {
-		return err
-	}
-
-	deployer := NewDeployer(kubeClient, openshiftImageClient, cfg.Out, cfg.ErrOut, cfg.Until)
-	return deployer.Deploy(cfg.Namespace, cfg.rcName)
-}
-
-// NewDeployer makes a new Deployer from a kube client.
-func NewDeployer(kubeClient kubernetes.Interface, images imageclientv1.Interface, out, errOut io.Writer,
-	until string) *Deployer {
-	return &Deployer{
-		out:    out,
-		errOut: errOut,
-		until:  until,
-		getDeployment: func(namespace, name string) (*corev1.ReplicationController, error) {
-			return kubeClient.CoreV1().ReplicationControllers(namespace).Get(name, metav1.GetOptions{})
-		},
-		getDeployments: func(namespace, configName string) (*corev1.ReplicationControllerList, error) {
-			return kubeClient.CoreV1().ReplicationControllers(namespace).List(metav1.ListOptions{LabelSelector: appsutil.ConfigSelector(configName).
-				String()})
-		},
-		scaler: kubectl.NewScaler(appsutil.NewReplicationControllerScaleClient(kubeClient)),
-		strategyFor: func(config *appsv1.DeploymentConfig) (strategy.DeploymentStrategy, error) {
-			switch config.Spec.Strategy.Type {
-			case appsv1.DeploymentStrategyTypeRecreate:
-				return recreate.NewRecreateDeploymentStrategy(kubeClient, images.ImageV1(),
-					&kv1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")}, out, errOut, until), nil
-			case appsv1.DeploymentStrategyTypeRolling:
-				recreateDeploymentStrategy := recreate.NewRecreateDeploymentStrategy(kubeClient, images.ImageV1(),
-					&kv1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")}, out, errOut, until)
-				return rolling.NewRollingDeploymentStrategy(config.Namespace, kubeClient, images.ImageV1(),
-					recreateDeploymentStrategy, out, errOut, until), nil
-			default:
-				return nil, fmt.Errorf("unsupported strategy type: %s", config.Spec.Strategy.Type)
-			}
-		},
-	}
-}
-
-// Deployer prepares and executes the deployment process. It will:
-//
-// 1. Validate the deployment has a desired replica count and strategy.
-// 2. Find the last completed deployment.
-// 3. Scale down to 0 any old deployments which aren't the new deployment or
-// the last complete deployment.
-// 4. Pass the last completed deployment and the new deployment to a strategy
-// to perform the deployment.
-type Deployer struct {
-	// out and errOut control display when deploy is invoked
-	out, errOut io.Writer
-	// until is a condition to run until
-	until string
-	// strategyFor returns a DeploymentStrategy for config.
-	strategyFor func(config *appsv1.DeploymentConfig) (strategy.DeploymentStrategy, error)
-	// getDeployment finds the named deployment.
-	getDeployment func(namespace, name string) (*corev1.ReplicationController, error)
-	// getDeployments finds all deployments associated with a config.
-	getDeployments func(namespace, configName string) (*corev1.ReplicationControllerList, error)
-	// scaler is used to scale replication controllers.
-	scaler kubectl.Scaler
-}
-
-// Deploy starts the deployment process for rcName.
-func (d *Deployer) Deploy(namespace, rcName string) error {
-	// Look up the new deployment.
-	to, err := d.getDeployment(namespace, rcName)
-	if err != nil {
-		return fmt.Errorf("couldn't get deployment %s: %v", rcName, err)
-	}
-
-	// Decode the config from the deployment.
-	// TODO: Remove this once we are sure there are no internal versions of configs serialized in DC
-	config, err := appsserialization.DecodeDeploymentConfig(to)
-	if err != nil {
-		return fmt.Errorf("couldn't decode deployment config from deployment %s: %v", to.Name, err)
-	}
-
-	// Get a strategy for the deployment.
-	s, err := d.strategyFor(config)
-	if err != nil {
-		return err
-	}
-
-	// New deployments must have a desired replica count.
-	desiredReplicas, hasDesired := deploymentDesiredReplicas(to)
-	if !hasDesired {
-		return fmt.Errorf("deployment %s has already run to completion", to.Name)
-	}
-
-	// Find all deployments for the config.
-	unsortedDeployments, err := d.getDeployments(namespace, config.Name)
-	if err != nil {
-		return fmt.Errorf("couldn't get controllers in namespace %s: %v", namespace, err)
-	}
-	deployments := make([]*corev1.ReplicationController, 0, len(unsortedDeployments.Items))
-	for i := range unsortedDeployments.Items {
-		deployments = append(deployments, &unsortedDeployments.Items[i])
-	}
-
-	// Sort all the deployments by version.
-	sort.Sort(appsutil.ByLatestVersionDesc(deployments))
-
-	// Find any last completed deployment.
-	var from *corev1.ReplicationController
-	for _, candidate := range deployments {
-		if candidate.Name == to.Name {
-			continue
-		}
-		if appsutil.IsCompleteDeployment(candidate) {
-			from = candidate
-			break
-		}
-	}
-
-	if appsutil.DeploymentVersionFor(to) < appsutil.DeploymentVersionFor(from) {
-		return fmt.Errorf("deployment %s is older than %s", to.Name, from.Name)
-	}
-
-	// Scale down any deployments which aren't the new or last deployment.
-	for _, candidate := range deployments {
-		// Skip the from/to deployments.
-		if candidate.Name == to.Name {
-			continue
-		}
-		if from != nil && candidate.Name == from.Name {
-			continue
-		}
-		// Skip the deployment if it's already scaled down.
-		if candidate.Spec.Replicas == nil || *candidate.Spec.Replicas == 0 {
-			continue
-		}
-		// Scale the deployment down to zero.
-		retryWaitParams := kubectl.NewRetryParams(1*time.Second, 120*time.Second)
-		if err := d.scaler.Scale(candidate.Namespace, candidate.Name, uint(0), &kubectl.ScalePrecondition{Size: -1, ResourceVersion: ""}, retryWaitParams, retryWaitParams, kapi.Resource("replicationcontrollers")); err != nil {
-			fmt.Fprintf(d.errOut, "error: Couldn't scale down prior deployment %s: %v\n", appsutil.LabelForDeployment(candidate), err)
-		} else {
-			fmt.Fprintf(d.out, "--> Scaled older deployment %s down\n", candidate.Name)
-		}
-	}
-
-	if d.until == "start" {
-		return strategy.NewConditionReachedErr("Ready to start deployment")
-	}
-
-	// Perform the deployment.
-	if err := s.Deploy(from, to, int(desiredReplicas)); err != nil {
-		return err
-	}
-	fmt.Fprintln(d.out, "--> Success")
-	return nil
-}
-
-func int32AnnotationFor(obj runtime.Object, key string) (int32, bool) {
-	s := appsutil.AnnotationFor(obj, key)
-	if len(s) == 0 {
-		return 0, false
-	}
-	i, err := strconv.ParseInt(s, 10, 32)
-	if err != nil {
-		return 0, false
-	}
-	return int32(i), true
-}
-
-// deploymentDesiredReplicas returns number of desired replica for the given replication controller
-func deploymentDesiredReplicas(obj runtime.Object) (int32, bool) {
-	return int32AnnotationFor(obj, appsv1.DesiredReplicasAnnotation)
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/deployer/deployer_test.go b/vendor/github.com/openshift/oc/pkg/cli/deployer/deployer_test.go
deleted file mode 100644
index 81dfafc5ee12..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/deployer/deployer_test.go
+++ /dev/null
@@ -1,244 +0,0 @@
-package deployer
-
-import (
-	"bytes"
-	"fmt"
-	"strconv"
-	"testing"
-
-	corev1 "k8s.io/api/core/v1"
-	"k8s.io/apimachinery/pkg/runtime/schema"
-	"k8s.io/kubernetes/pkg/kubectl"
-
-	appsv1 "github.com/openshift/api/apps/v1"
-
-	"github.com/openshift/library-go/pkg/apps/appsutil"
-	"github.com/openshift/oc/pkg/cli/deployer/strategy"
-	"github.com/openshift/oc/pkg/cli/deployer/strategy/util/appstest"
-)
-
-func TestDeployer_getDeploymentFail(t *testing.T) {
-	deployer := &Deployer{
-		strategyFor: func(config *appsv1.DeploymentConfig) (strategy.DeploymentStrategy, error) {
-			t.Fatal("unexpected call")
-			return nil, nil
-		},
-		getDeployment: func(namespace, name string) (*corev1.ReplicationController, error) {
-			return nil, fmt.Errorf("get error")
-		},
-		getDeployments: func(namespace, configName string) (*corev1.ReplicationControllerList, error) {
-			t.Fatal("unexpected call")
-			return nil, nil
-		},
-		scaler: &FakeScaler{},
-	}
-
-	err := deployer.Deploy("namespace", "name")
-	if err == nil {
-		t.Fatalf("expected an error")
-	}
-	t.Logf("got expected error: %v", err)
-}
-
-func TestDeployer_deployScenarios(t *testing.T) {
-	mkd := func(version int64, status appsv1.DeploymentStatus, replicas int32, desired int32) *corev1.ReplicationController {
-		deployment := mkdeployment(version, status)
-		deployment.Spec.Replicas = &replicas
-		if desired > 0 {
-			deployment.Annotations[appsv1.DesiredReplicasAnnotation] = strconv.Itoa(int(desired))
-		}
-		return deployment
-	}
-	type scaleEvent struct {
-		version int64
-		size    int32
-	}
-	scenarios := []struct {
-		name        string
-		deployments []*corev1.ReplicationController
-		fromVersion int64
-		toVersion   int64
-		scaleEvents []scaleEvent
-	}{
-		{
-			"initial deployment",
-			// existing deployments
-			[]*corev1.ReplicationController{
-				mkd(1, appsv1.DeploymentStatusNew, 0, 3),
-			},
-			// from and to version
-			0, 1,
-			// expected scale events
-			[]scaleEvent{},
-		},
-		{
-			"last deploy failed",
-			// existing deployments
-			[]*corev1.ReplicationController{
-				mkd(1, appsv1.DeploymentStatusComplete, 3, 0),
-				mkd(2, appsv1.DeploymentStatusFailed, 1, 3),
-				mkd(3, appsv1.DeploymentStatusNew, 0, 3),
-			},
-			// from and to version
-			1, 3,
-			// expected scale events
-			[]scaleEvent{
-				{2, 0},
-			},
-		},
-		{
-			"sequential complete",
-			// existing deployments
-			[]*corev1.ReplicationController{
-				mkd(1, appsv1.DeploymentStatusComplete, 0, 0),
-				mkd(2, appsv1.DeploymentStatusComplete, 3, 0),
-				mkd(3, appsv1.DeploymentStatusNew, 0, 3),
-			},
-			// from and to version
-			2, 3,
-			// expected scale events
-			[]scaleEvent{},
-		},
-		{
-			"sequential failure",
-			// existing deployments
-			[]*corev1.ReplicationController{
-				mkd(1, appsv1.DeploymentStatusFailed, 1, 3),
-				mkd(2, appsv1.DeploymentStatusFailed, 1, 3),
-				mkd(3, appsv1.DeploymentStatusNew, 0, 3),
-			},
-			// from and to version
-			0, 3,
-			// expected scale events
-			[]scaleEvent{
-				{1, 0},
-				{2, 0},
-			},
-		},
-		{
-			"version mismatch",
-			// existing deployments
-			[]*corev1.ReplicationController{
-				mkd(1, appsv1.DeploymentStatusComplete, 0, 0),
-				mkd(2, appsv1.DeploymentStatusNew, 3, 0),
-				mkd(3, appsv1.DeploymentStatusComplete, 0, 3),
-			},
-			// from and to version
-			3, 2,
-			// expected scale events
-			[]scaleEvent{},
-		},
-	}
-
-	for _, s := range scenarios {
-		t.Logf("executing scenario %s", s.name)
-		findDeployment := func(version int64) *corev1.ReplicationController {
-			for _, d := range s.deployments {
-				if appsutil.DeploymentVersionFor(d) == version {
-					return d
-				}
-			}
-			return nil
-		}
-
-		var actualFrom, actualTo *corev1.ReplicationController
-		to := findDeployment(s.toVersion)
-		scaler := &FakeScaler{}
-
-		deployer := &Deployer{
-			out:    &bytes.Buffer{},
-			errOut: &bytes.Buffer{},
-			strategyFor: func(config *appsv1.DeploymentConfig) (strategy.DeploymentStrategy, error) {
-				return &testStrategy{
-					deployFunc: func(from *corev1.ReplicationController, to *corev1.ReplicationController, desiredReplicas int) error {
-						actualFrom = from
-						actualTo = to
-						return nil
-					},
-				}, nil
-			},
-			getDeployment: func(namespace, name string) (*corev1.ReplicationController, error) {
-				return to, nil
-			},
-			getDeployments: func(namespace, configName string) (*corev1.ReplicationControllerList, error) {
-				list := &corev1.ReplicationControllerList{}
-				for _, d := range s.deployments {
-					list.Items = append(list.Items, *d)
-				}
-				return list, nil
-			},
-			scaler: scaler,
-		}
-
-		err := deployer.Deploy(to.Namespace, to.Name)
-		if s.toVersion < s.fromVersion {
-			if err == nil {
-				t.Fatalf("expected error when toVersion is older than newVersion")
-			}
-			continue
-		}
-		if err != nil {
-			t.Fatalf("unexpected error: %v", err)
-		}
-
-		if s.fromVersion > 0 {
-			if e, a := s.fromVersion, appsutil.DeploymentVersionFor(actualFrom); e != a {
-				t.Fatalf("expected from.latestVersion %d, got %d", e, a)
-			}
-		}
-		if e, a := s.toVersion, appsutil.DeploymentVersionFor(actualTo); e != a {
-			t.Fatalf("expected to.latestVersion %d, got %d", e, a)
-		}
-		if e, a := len(s.scaleEvents), len(scaler.Events); e != a {
-			t.Fatalf("expected %d scale events, got %d", e, a)
-		}
-		for _, expected := range s.scaleEvents {
-			expectedTo := findDeployment(expected.version)
-			expectedWasScaled := false
-			for _, actual := range scaler.Events {
-				if actual.Name != expectedTo.Name {
-					continue
-				}
-				if e, a := uint(expected.size), actual.Size; e != a {
-					t.Fatalf("expected version %d to be scaled to %d, got %d", expected.version, e, a)
-				}
-				expectedWasScaled = true
-			}
-			if !expectedWasScaled {
-				t.Fatalf("expected version %d to be scaled to %d, but it wasn't scaled at all", expected.version, expected.size)
-			}
-		}
-	}
-}
-
-func mkdeployment(version int64, status appsv1.DeploymentStatus) *corev1.ReplicationController {
-	deployment, _ := appsutil.MakeDeployment(appstest.OkDeploymentConfig(version))
-	deployment.Annotations[appsv1.DeploymentStatusAnnotation] = string(status)
-	return deployment
-}
-
-type testStrategy struct {
-	deployFunc func(from *corev1.ReplicationController, to *corev1.ReplicationController, desiredReplicas int) error
-}
-
-func (t *testStrategy) Deploy(from *corev1.ReplicationController, to *corev1.ReplicationController, desiredReplicas int) error {
-	return t.deployFunc(from, to, desiredReplicas)
-}
-
-type FakeScaler struct {
-	Events []ScaleEvent
-}
-
-type ScaleEvent struct {
-	Name string
-	Size uint
-}
-
-func (t *FakeScaler) Scale(namespace, name string, newSize uint, preconditions *kubectl.ScalePrecondition, retry, wait *kubectl.RetryParams, resource schema.GroupResource) error {
-	t.Events = append(t.Events, ScaleEvent{name, newSize})
-	return nil
-}
-
-func (t *FakeScaler) ScaleSimple(namespace, name string, preconditions *kubectl.ScalePrecondition, newSize uint, resource schema.GroupResource) (string, error) {
-	return "", fmt.Errorf("unexpected call to ScaleSimple")
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/deployer/strategy/doc.go b/vendor/github.com/openshift/oc/pkg/cli/deployer/strategy/doc.go
deleted file mode 100644
index bcd9ff8e0ba9..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/deployer/strategy/doc.go
+++ /dev/null
@@ -1,5 +0,0 @@
-// Package strategy contains implementations of core deployment strategies.
-//
-// The code in this package will be more verbose with logging given the intended application as
-// standalone container CLI support.
-package strategy
diff --git a/vendor/github.com/openshift/oc/pkg/cli/deployer/strategy/interfaces.go b/vendor/github.com/openshift/oc/pkg/cli/deployer/strategy/interfaces.go
deleted file mode 100644
index 4430bbb48563..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/deployer/strategy/interfaces.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package strategy
-
-import (
-	"strconv"
-	"strings"
-
-	corev1 "k8s.io/api/core/v1"
-)
-
-// DeploymentStrategy knows how to make a deployment active.
-type DeploymentStrategy interface {
-	// Deploy transitions an old deployment to a new one.
-	Deploy(from *corev1.ReplicationController, to *corev1.ReplicationController, desiredReplicas int) error
-}
-
-// UpdateAcceptor is given a chance to accept or reject the new controller
-// during a deployment each time the controller is scaled up.
-//
-// After the successful scale-up of the controller, the controller is given to
-// the UpdateAcceptor. If the UpdateAcceptor rejects the controller, the
-// deployment is stopped with an error.
-//
-// DEPRECATED: Acceptance checking has been incorporated into the rolling
-// strategy, but we still need this around to support Recreate.
-type UpdateAcceptor interface {
-	// Accept returns nil if the controller is okay, otherwise returns an error.
-	Accept(*corev1.ReplicationController) error
-}
-
-type errConditionReached struct {
-	msg string
-}
-
-func NewConditionReachedErr(msg string) error {
-	return &errConditionReached{msg: msg}
-}
-
-func (e *errConditionReached) Error() string {
-	return e.msg
-}
-
-func IsConditionReached(err error) bool {
-	value, ok := err.(*errConditionReached)
-	return ok && value != nil
-}
-
-func PercentageBetween(until string, min, max int) bool {
-	if !strings.HasSuffix(until, "%") {
-		return false
-	}
-	until = until[:len(until)-1]
-	i, err := strconv.Atoi(until)
-	if err != nil {
-		return false
-	}
-	return i >= min && i <= max
-}
-
-func Percentage(until string) (int, bool) {
-	if !strings.HasSuffix(until, "%") {
-		return 0, false
-	}
-	until = until[:len(until)-1]
-	i, err := strconv.Atoi(until)
-	if err != nil {
-		return 0, false
-	}
-	return i, true
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/deployer/strategy/recreate/recreate.go b/vendor/github.com/openshift/oc/pkg/cli/deployer/strategy/recreate/recreate.go
deleted file mode 100644
index bd0893446086..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/deployer/strategy/recreate/recreate.go
+++ /dev/null
@@ -1,309 +0,0 @@
-package recreate
-
-import (
-	"fmt"
-	"io"
-	"io/ioutil"
-	"os"
-	"strings"
-	"time"
-
-	corev1 "k8s.io/api/core/v1"
-	"k8s.io/apimachinery/pkg/api/errors"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/labels"
-	"k8s.io/apimachinery/pkg/runtime"
-	"k8s.io/apimachinery/pkg/util/wait"
-	"k8s.io/client-go/kubernetes"
-	corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
-	"k8s.io/client-go/scale"
-	"k8s.io/client-go/tools/record"
-	"k8s.io/client-go/util/retry"
-	kapi "k8s.io/kubernetes/pkg/apis/core"
-
-	imageclienttyped "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1"
-
-	"github.com/openshift/library-go/pkg/apps/appsserialization"
-	"github.com/openshift/library-go/pkg/apps/appsutil"
-
-	strat "github.com/openshift/oc/pkg/cli/deployer/strategy"
-	stratsupport "github.com/openshift/oc/pkg/cli/deployer/strategy/support"
-	stratutil "github.com/openshift/oc/pkg/cli/deployer/strategy/util"
-)
-
-// RecreateDeploymentStrategy is a simple strategy appropriate as a default.
-// Its behavior is to scale down the last deployment to 0, and to scale up the
-// new deployment to 1.
-//
-// A failure to disable any existing deployments will be considered a
-// deployment failure.
-type RecreateDeploymentStrategy struct {
-	// out and errOut control where output is sent during the strategy
-	out, errOut io.Writer
-	// until is a condition that, if reached, will cause the strategy to exit early
-	until string
-	// rcClient is a client to access replication controllers
-	rcClient corev1client.ReplicationControllersGetter
-	// scaleClient is a client to access scaling
-	scaleClient scale.ScalesGetter
-	// podClient is used to list and watch pods.
-	podClient corev1client.PodsGetter
-	// eventClient is a client to access events
-	eventClient corev1client.EventsGetter
-	// getUpdateAcceptor returns an UpdateAcceptor to verify the first replica
-	// of the deployment.
-	getUpdateAcceptor func(time.Duration, int32) strat.UpdateAcceptor
-	// codec is used to decode DeploymentConfigs contained in deployments.
-	decoder runtime.Decoder
-	// hookExecutor can execute a lifecycle hook.
-	hookExecutor stratsupport.HookExecutor
-	// events records the events
-	events record.EventSink
-}
-
-// NewRecreateDeploymentStrategy makes a RecreateDeploymentStrategy backed by
-// a real HookExecutor and client.
-func NewRecreateDeploymentStrategy(kubeClient kubernetes.Interface, imageClient imageclienttyped.ImageStreamTagsGetter, events record.EventSink, out, errOut io.Writer,
-	until string) *RecreateDeploymentStrategy {
-	if out == nil {
-		out = ioutil.Discard
-	}
-	if errOut == nil {
-		errOut = ioutil.Discard
-	}
-
-	return &RecreateDeploymentStrategy{
-		out:         out,
-		errOut:      errOut,
-		events:      events,
-		until:       until,
-		rcClient:    kubeClient.CoreV1(),
-		scaleClient: appsutil.NewReplicationControllerScaleClient(kubeClient),
-		eventClient: kubeClient.CoreV1(),
-		podClient:   kubeClient.CoreV1(),
-		getUpdateAcceptor: func(timeout time.Duration, minReadySeconds int32) strat.UpdateAcceptor {
-			return stratsupport.NewAcceptAvailablePods(out, kubeClient.CoreV1(), timeout)
-		},
-		hookExecutor: stratsupport.NewHookExecutor(kubeClient, imageClient, os.Stdout),
-	}
-}
-
-// Deploy makes deployment active and disables oldDeployments.
-func (s *RecreateDeploymentStrategy) Deploy(from *corev1.ReplicationController, to *corev1.ReplicationController, desiredReplicas int) error {
-	return s.DeployWithAcceptor(from, to, desiredReplicas, nil)
-}
-
-// DeployWithAcceptor scales down from and then scales up to. If
-// updateAcceptor is provided and the desired replica count is >1, the first
-// replica of to is rolled out and validated before performing the full scale
-// up.
-//
-// This is currently only used in conjunction with the rolling update strategy
-// for initial deployments.
-func (s *RecreateDeploymentStrategy) DeployWithAcceptor(from *corev1.ReplicationController, to *corev1.ReplicationController, desiredReplicas int,
-	updateAcceptor strat.UpdateAcceptor) error {
-	config, err := appsserialization.DecodeDeploymentConfig(to)
-	if err != nil {
-		return fmt.Errorf("couldn't decode config from deployment %s: %v", to.Name, err)
-	}
-
-	recreateTimeout := time.Duration(appsutil.DefaultRecreateTimeoutSeconds) * time.Second
-	params := config.Spec.Strategy.RecreateParams
-	rollingParams := config.Spec.Strategy.RollingParams
-
-	if params != nil && params.TimeoutSeconds != nil {
-		recreateTimeout = time.Duration(*params.TimeoutSeconds) * time.Second
-	}
-
-	// When doing the initial rollout for rolling strategy we use recreate and for that we
-	// have to set the TimeoutSecond based on the rollling strategy parameters.
-	if rollingParams != nil && rollingParams.TimeoutSeconds != nil {
-		recreateTimeout = time.Duration(*rollingParams.TimeoutSeconds) * time.Second
-	}
-
-	if updateAcceptor == nil {
-		updateAcceptor = s.getUpdateAcceptor(recreateTimeout, config.Spec.MinReadySeconds)
-	}
-
-	// Execute any pre-hook.
-	if params != nil && params.Pre != nil {
-		if err := s.hookExecutor.Execute(params.Pre, to, appsutil.PreHookPodSuffix, "pre"); err != nil {
-			return fmt.Errorf("pre hook failed: %s", err)
-		}
-	}
-
-	if s.until == "pre" {
-		return strat.NewConditionReachedErr("pre hook succeeded")
-	}
-
-	// Record all warnings
-	defer stratutil.RecordConfigWarnings(s.eventClient, from, s.out)
-	defer stratutil.RecordConfigWarnings(s.eventClient, to, s.out)
-
-	// Scale down the from deployment.
-	if from != nil {
-		fmt.Fprintf(s.out, "--> Scaling %s down to zero\n", from.Name)
-		_, err := s.scaleAndWait(from, 0, recreateTimeout)
-		if err != nil {
-			return fmt.Errorf("couldn't scale %s to 0: %v", from.Name, err)
-		}
-		// Wait for pods to terminate.
-		s.waitForTerminatedPods(from, time.Duration(*params.TimeoutSeconds)*time.Second)
-	}
-
-	if s.until == "0%" {
-		return strat.NewConditionReachedErr("Reached 0% (no running pods)")
-	}
-
-	if params != nil && params.Mid != nil {
-		if err := s.hookExecutor.Execute(params.Mid, to, appsutil.MidHookPodSuffix, "mid"); err != nil {
-			return fmt.Errorf("mid hook failed: %s", err)
-		}
-	}
-
-	if s.until == "mid" {
-		return strat.NewConditionReachedErr("mid hook succeeded")
-	}
-
-	accepted := false
-
-	// Scale up the to deployment.
-	if desiredReplicas > 0 {
-		if from != nil {
-			// Scale up to 1 and validate the replica,
-			// aborting if the replica isn't acceptable.
-			fmt.Fprintf(s.out, "--> Scaling %s to 1 before performing acceptance check\n", to.Name)
-			updatedTo, err := s.scaleAndWait(to, 1, recreateTimeout)
-			if err != nil {
-				return fmt.Errorf("couldn't scale %s to 1: %v", to.Name, err)
-			}
-			if err := updateAcceptor.Accept(updatedTo); err != nil {
-				return fmt.Errorf("update acceptor rejected %s: %v", to.Name, err)
-			}
-			accepted = true
-			to = updatedTo
-
-			if strat.PercentageBetween(s.until, 1, 99) {
-				return strat.NewConditionReachedErr(fmt.Sprintf("Reached %s", s.until))
-			}
-		}
-
-		// Complete the scale up.
-		if to.Spec.Replicas == nil || *to.Spec.Replicas != int32(desiredReplicas) {
-			fmt.Fprintf(s.out, "--> Scaling %s to %d\n", to.Name, desiredReplicas)
-			updatedTo, err := s.scaleAndWait(to, desiredReplicas, recreateTimeout)
-			if err != nil {
-				return fmt.Errorf("couldn't scale %s to %d: %v", to.Name, desiredReplicas, err)
-			}
-
-			to = updatedTo
-		}
-
-		if !accepted {
-			if err := updateAcceptor.Accept(to); err != nil {
-				return fmt.Errorf("update acceptor rejected %s: %v", to.Name, err)
-			}
-		}
-	}
-
-	if (from == nil && strat.PercentageBetween(s.until, 1, 100)) || (from != nil && s.until == "100%") {
-		return strat.NewConditionReachedErr(fmt.Sprintf("Reached %s", s.until))
-	}
-
-	// Execute any post-hook.
-	if params != nil && params.Post != nil {
-		if err := s.hookExecutor.Execute(params.Post, to, appsutil.PostHookPodSuffix, "post"); err != nil {
-			return fmt.Errorf("post hook failed: %s", err)
-		}
-	}
-
-	return nil
-}
-
-func (s *RecreateDeploymentStrategy) scaleAndWait(deployment *corev1.ReplicationController, replicas int, retryTimeout time.Duration) (*corev1.ReplicationController, error) {
-	if deployment.Spec.Replicas != nil && int32(replicas) == *deployment.Spec.Replicas && int32(replicas) == deployment.Status.Replicas {
-		return deployment, nil
-	}
-	alreadyScaled := false
-	// Scale the replication controller.
-	// In case the cache is not fully synced, retry the scaling.
-	err := wait.PollImmediate(1*time.Second, retryTimeout, func() (bool, error) {
-		updateScaleErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {
-			curScale, err := s.scaleClient.Scales(deployment.Namespace).Get(kapi.Resource("replicationcontrollers"), deployment.Name)
-			if err != nil {
-				return err
-			}
-			if curScale.Status.Replicas == int32(replicas) {
-				alreadyScaled = true
-				return nil
-			}
-			curScaleCopy := curScale.DeepCopy()
-			curScaleCopy.Spec.Replicas = int32(replicas)
-			_, scaleErr := s.scaleClient.Scales(deployment.Namespace).Update(kapi.Resource("replicationcontrollers"), curScaleCopy)
-			return scaleErr
-		})
-		// FIXME: The error admission returns here should be 503 (come back later) or similar.
-		if errors.IsForbidden(updateScaleErr) && strings.Contains(updateScaleErr.Error(), "not yet ready to handle request") {
-			return false, nil
-		}
-		return true, updateScaleErr
-	})
-	if err != nil {
-		return nil, err
-	}
-	// Wait for the scale to take effect.
-	if !alreadyScaled {
-		// FIXME: This should really be a watch, however the scaler client does not implement the watch interface atm.
-		err = wait.PollImmediate(1*time.Second, retryTimeout, func() (bool, error) {
-			curScale, err := s.scaleClient.Scales(deployment.Namespace).Get(kapi.Resource("replicationcontrollers"), deployment.Name)
-			if err != nil {
-				return false, err
-			}
-			return curScale.Status.Replicas == int32(replicas), nil
-		})
-	}
-	return s.rcClient.ReplicationControllers(deployment.Namespace).Get(deployment.Name, metav1.GetOptions{})
-}
-
-// hasRunningPod returns true if there is at least one pod in non-terminal state.
-func hasRunningPod(pods []corev1.Pod) bool {
-	for _, pod := range pods {
-		switch pod.Status.Phase {
-		case corev1.PodFailed, corev1.PodSucceeded:
-			// Don't count pods in terminal state.
-			continue
-		case corev1.PodUnknown:
-			// This happens in situation like when the node is temporarily disconnected from the cluster.
-			// If we can't be sure that the pod is not running, we have to count it.
-			return true
-		default:
-			// Pod is not in terminal phase.
-			return true
-		}
-	}
-
-	return false
-}
-
-// waitForTerminatedPods waits until all pods for the provided replication controller are terminated.
-func (s *RecreateDeploymentStrategy) waitForTerminatedPods(rc *corev1.ReplicationController, timeout time.Duration) {
-	// Decode the config from the deployment.
-	err := wait.PollImmediate(1*time.Second, timeout, func() (bool, error) {
-		podList, err := s.podClient.Pods(rc.Namespace).List(metav1.ListOptions{
-			LabelSelector: labels.SelectorFromValidatedSet(labels.Set(rc.Spec.Selector)).String(),
-		})
-		if err != nil {
-			fmt.Fprintf(s.out, "--> ERROR: Cannot list pods: %v\n", err)
-			return false, nil
-		}
-
-		if hasRunningPod(podList.Items) {
-			return false, nil
-		}
-
-		return true, nil
-	})
-	if err != nil {
-		fmt.Fprintf(s.out, "--> Failed to wait for old pods to be terminated: %v\nNew pods may be scaled up before old pods get terminated!\n", err)
-	}
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/deployer/strategy/recreate/recreate_test.go b/vendor/github.com/openshift/oc/pkg/cli/deployer/strategy/recreate/recreate_test.go
deleted file mode 100644
index 9687cd831a23..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/deployer/strategy/recreate/recreate_test.go
+++ /dev/null
@@ -1,437 +0,0 @@
-package recreate
-
-import (
-	"bytes"
-	"fmt"
-	"testing"
-	"time"
-
-	autoscalingv1 "k8s.io/api/autoscaling/v1"
-	corev1 "k8s.io/api/core/v1"
-	"k8s.io/apimachinery/pkg/runtime"
-	"k8s.io/client-go/kubernetes/fake"
-	kcoreclient "k8s.io/client-go/kubernetes/typed/core/v1"
-	scalefake "k8s.io/client-go/scale/fake"
-	clientgotesting "k8s.io/client-go/testing"
-
-	appsv1 "github.com/openshift/api/apps/v1"
-
-	"github.com/openshift/library-go/pkg/apps/appsutil"
-	appsstrategy "github.com/openshift/oc/pkg/cli/deployer/strategy"
-	"github.com/openshift/oc/pkg/cli/deployer/strategy/util/appstest"
-)
-
-func getUpdateAcceptor(timeout time.Duration, minReadySeconds int32) appsstrategy.UpdateAcceptor {
-	return &testAcceptor{
-		acceptFn: func(deployment *corev1.ReplicationController) error {
-			return nil
-		},
-	}
-}
-
-func recreateParams(timeout int64, preFailurePolicy, midFailurePolicy, postFailurePolicy appsv1.LifecycleHookFailurePolicy) appsv1.DeploymentStrategy {
-	var pre, mid, post *appsv1.LifecycleHook
-	if len(preFailurePolicy) > 0 {
-		pre = &appsv1.LifecycleHook{
-			FailurePolicy: preFailurePolicy,
-			ExecNewPod:    &appsv1.ExecNewPodHook{},
-		}
-	}
-	if len(midFailurePolicy) > 0 {
-		mid = &appsv1.LifecycleHook{
-			FailurePolicy: midFailurePolicy,
-			ExecNewPod:    &appsv1.ExecNewPodHook{},
-		}
-	}
-	if len(postFailurePolicy) > 0 {
-		post = &appsv1.LifecycleHook{
-			FailurePolicy: postFailurePolicy,
-			ExecNewPod:    &appsv1.ExecNewPodHook{},
-		}
-	}
-	return appsv1.DeploymentStrategy{
-		Type: appsv1.DeploymentStrategyTypeRecreate,
-		RecreateParams: &appsv1.RecreateDeploymentStrategyParams{
-			TimeoutSeconds: &timeout,
-			Pre:            pre,
-			Mid:            mid,
-			Post:           post,
-		},
-	}
-}
-
-type testAcceptor struct {
-	acceptFn func(*corev1.ReplicationController) error
-}
-
-func (t *testAcceptor) Accept(deployment *corev1.ReplicationController) error {
-	return t.acceptFn(deployment)
-}
-
-type fakeControllerClient struct {
-	deployment *corev1.ReplicationController
-	fakeClient *fake.Clientset
-
-	scaleEvents []*autoscalingv1.Scale
-}
-
-func (c *fakeControllerClient) ReplicationControllers(ns string) kcoreclient.ReplicationControllerInterface {
-	return c.fakeClient.CoreV1().ReplicationControllers(ns)
-}
-
-func (c *fakeControllerClient) scaledOnce() bool {
-	return len(c.scaleEvents) == 1
-}
-
-func (c *fakeControllerClient) fakeScaleClient() *scalefake.FakeScaleClient {
-	scaleFakeClient := &scalefake.FakeScaleClient{}
-	scaleFakeClient.AddReactor("get", "replicationcontrollers", func(action clientgotesting.Action) (handled bool, ret runtime.Object, err error) {
-		obj := &autoscalingv1.Scale{}
-		obj.Status.Replicas = c.deployment.Status.Replicas
-		return true, obj, nil
-	})
-	scaleFakeClient.AddReactor("update", "replicationcontrollers", func(action clientgotesting.Action) (handled bool, ret runtime.Object, err error) {
-		updateAction := action.(clientgotesting.UpdateAction)
-		scaleObj := updateAction.GetObject().(*autoscalingv1.Scale)
-		c.scaleEvents = append(c.scaleEvents, scaleObj)
-		c.deployment.Spec.Replicas = &scaleObj.Spec.Replicas
-		c.deployment.Status.Replicas = scaleObj.Spec.Replicas
-		return true, scaleObj, nil
-	})
-	return scaleFakeClient
-}
-
-func newFakeControllerClient(deployment *corev1.ReplicationController) *fakeControllerClient {
-	c := &fakeControllerClient{deployment: deployment}
-	c.fakeClient = fake.NewSimpleClientset(c.deployment)
-	return c
-}
-
-type fakePodClient struct {
-	deployerName string
-}
-
-func (c *fakePodClient) Pods(ns string) kcoreclient.PodInterface {
-	deployerPod := &corev1.Pod{}
-	deployerPod.Name = c.deployerName
-	deployerPod.Namespace = ns
-	deployerPod.Status = corev1.PodStatus{}
-	return fake.NewSimpleClientset(deployerPod).CoreV1().Pods(ns)
-}
-
-type hookExecutorImpl struct {
-	executeFunc func(hook *appsv1.LifecycleHook, deployment *corev1.ReplicationController, suffix, label string) error
-}
-
-func (h *hookExecutorImpl) Execute(hook *appsv1.LifecycleHook, rc *corev1.ReplicationController, suffix, label string) error {
-	return h.executeFunc(hook, rc, suffix, label)
-}
-
-func TestRecreate_initialDeployment(t *testing.T) {
-	var deployment *corev1.ReplicationController
-	strategy := &RecreateDeploymentStrategy{
-		out:               &bytes.Buffer{},
-		errOut:            &bytes.Buffer{},
-		getUpdateAcceptor: getUpdateAcceptor,
-		eventClient:       fake.NewSimpleClientset().CoreV1(),
-	}
-
-	config := appstest.OkDeploymentConfig(1)
-	config.Spec.Strategy = recreateParams(30, "", "", "")
-	deployment, _ = appsutil.MakeDeployment(config)
-
-	controllerClient := newFakeControllerClient(deployment)
-	strategy.rcClient = controllerClient
-	strategy.scaleClient = controllerClient.fakeScaleClient()
-	strategy.podClient = &fakePodClient{deployerName: appsutil.DeployerPodNameForDeployment(deployment.Name)}
-
-	err := strategy.Deploy(nil, deployment, 3)
-	if err != nil {
-		t.Fatalf("unexpected deploy error: %#v", err)
-	}
-
-	if !controllerClient.scaledOnce() {
-		t.Fatalf("expected 1 scale calls, got %d", len(controllerClient.scaleEvents))
-	}
-}
-
-func TestRecreate_deploymentPreHookSuccess(t *testing.T) {
-	config := appstest.OkDeploymentConfig(1)
-	config.Spec.Strategy = recreateParams(30, appsv1.LifecycleHookFailurePolicyAbort, "", "")
-	deployment, _ := appsutil.MakeDeployment(config)
-	controllerClient := newFakeControllerClient(deployment)
-
-	hookExecuted := false
-	strategy := &RecreateDeploymentStrategy{
-		out:               &bytes.Buffer{},
-		errOut:            &bytes.Buffer{},
-		getUpdateAcceptor: getUpdateAcceptor,
-		eventClient:       fake.NewSimpleClientset().CoreV1(),
-		rcClient:          controllerClient,
-		scaleClient:       controllerClient.fakeScaleClient(),
-		hookExecutor: &hookExecutorImpl{
-			executeFunc: func(hook *appsv1.LifecycleHook, deployment *corev1.ReplicationController, suffix, label string) error {
-				hookExecuted = true
-				return nil
-			},
-		},
-	}
-	strategy.podClient = &fakePodClient{deployerName: appsutil.DeployerPodNameForDeployment(deployment.Name)}
-
-	err := strategy.Deploy(nil, deployment, 2)
-	if err != nil {
-		t.Fatalf("unexpected deploy error: %#v", err)
-	}
-	if !hookExecuted {
-		t.Fatalf("expected hook execution")
-	}
-}
-
-func TestRecreate_deploymentPreHookFail(t *testing.T) {
-	config := appstest.OkDeploymentConfig(1)
-	config.Spec.Strategy = recreateParams(30, appsv1.LifecycleHookFailurePolicyAbort, "", "")
-	deployment, _ := appsutil.MakeDeployment(config)
-	controllerClient := newFakeControllerClient(deployment)
-
-	strategy := &RecreateDeploymentStrategy{
-		out:               &bytes.Buffer{},
-		errOut:            &bytes.Buffer{},
-		getUpdateAcceptor: getUpdateAcceptor,
-		eventClient:       fake.NewSimpleClientset().CoreV1(),
-		rcClient:          controllerClient,
-		scaleClient:       controllerClient.fakeScaleClient(),
-		hookExecutor: &hookExecutorImpl{
-			executeFunc: func(hook *appsv1.LifecycleHook, deployment *corev1.ReplicationController, suffix, label string) error {
-				return fmt.Errorf("hook execution failure")
-			},
-		},
-	}
-	strategy.podClient = &fakePodClient{deployerName: appsutil.DeployerPodNameForDeployment(deployment.Name)}
-
-	err := strategy.Deploy(nil, deployment, 2)
-	if err == nil {
-		t.Fatalf("expected a deploy error")
-	}
-
-	if len(controllerClient.scaleEvents) > 0 {
-		t.Fatalf("unexpected scaling events: %v", controllerClient.scaleEvents)
-	}
-}
-
-func TestRecreate_deploymentMidHookSuccess(t *testing.T) {
-	config := appstest.OkDeploymentConfig(1)
-	config.Spec.Strategy = recreateParams(30, "", appsv1.LifecycleHookFailurePolicyAbort, "")
-	deployment, _ := appsutil.MakeDeployment(config)
-	controllerClient := newFakeControllerClient(deployment)
-
-	strategy := &RecreateDeploymentStrategy{
-		out:               &bytes.Buffer{},
-		errOut:            &bytes.Buffer{},
-		rcClient:          controllerClient,
-		scaleClient:       controllerClient.fakeScaleClient(),
-		eventClient:       fake.NewSimpleClientset().CoreV1(),
-		getUpdateAcceptor: getUpdateAcceptor,
-		hookExecutor: &hookExecutorImpl{
-			executeFunc: func(hook *appsv1.LifecycleHook, deployment *corev1.ReplicationController, suffix, label string) error {
-				return fmt.Errorf("hook execution failure")
-			},
-		},
-	}
-	strategy.podClient = &fakePodClient{deployerName: appsutil.DeployerPodNameForDeployment(deployment.Name)}
-
-	err := strategy.Deploy(nil, deployment, 2)
-	if err == nil {
-		t.Fatalf("expected a deploy error")
-	}
-
-	if len(controllerClient.scaleEvents) > 0 {
-		t.Fatalf("unexpected scaling events: %v", controllerClient.scaleEvents)
-	}
-}
-
-func TestRecreate_deploymentPostHookSuccess(t *testing.T) {
-	config := appstest.OkDeploymentConfig(1)
-	config.Spec.Strategy = recreateParams(30, "", "", appsv1.LifecycleHookFailurePolicyAbort)
-	deployment, _ := appsutil.MakeDeployment(config)
-	controllerClient := newFakeControllerClient(deployment)
-
-	hookExecuted := false
-	strategy := &RecreateDeploymentStrategy{
-		out:               &bytes.Buffer{},
-		errOut:            &bytes.Buffer{},
-		rcClient:          controllerClient,
-		scaleClient:       controllerClient.fakeScaleClient(),
-		eventClient:       fake.NewSimpleClientset().CoreV1(),
-		getUpdateAcceptor: getUpdateAcceptor,
-		hookExecutor: &hookExecutorImpl{
-			executeFunc: func(hook *appsv1.LifecycleHook, deployment *corev1.ReplicationController, suffix, label string) error {
-				hookExecuted = true
-				return nil
-			},
-		},
-	}
-	strategy.podClient = &fakePodClient{deployerName: appsutil.DeployerPodNameForDeployment(deployment.Name)}
-
-	err := strategy.Deploy(nil, deployment, 2)
-	if err != nil {
-		t.Fatalf("unexpected deploy error: %#v", err)
-	}
-	if !hookExecuted {
-		t.Fatalf("expected hook execution")
-	}
-}
-
-func TestRecreate_deploymentPostHookFail(t *testing.T) {
-	config := appstest.OkDeploymentConfig(1)
-	config.Spec.Strategy = recreateParams(30, "", "", appsv1.LifecycleHookFailurePolicyAbort)
-	deployment, _ := appsutil.MakeDeployment(config)
-	controllerClient := newFakeControllerClient(deployment)
-
-	hookExecuted := false
-	strategy := &RecreateDeploymentStrategy{
-		out:               &bytes.Buffer{},
-		errOut:            &bytes.Buffer{},
-		rcClient:          controllerClient,
-		scaleClient:       controllerClient.fakeScaleClient(),
-		eventClient:       fake.NewSimpleClientset().CoreV1(),
-		getUpdateAcceptor: getUpdateAcceptor,
-		hookExecutor: &hookExecutorImpl{
-			executeFunc: func(hook *appsv1.LifecycleHook, deployment *corev1.ReplicationController, suffix, label string) error {
-				hookExecuted = true
-				return fmt.Errorf("post hook failure")
-			},
-		},
-	}
-	strategy.podClient = &fakePodClient{deployerName: appsutil.DeployerPodNameForDeployment(deployment.Name)}
-
-	err := strategy.Deploy(nil, deployment, 2)
-	if err == nil {
-		t.Fatalf("unexpected non deploy error: %#v", err)
-	}
-	if !hookExecuted {
-		t.Fatalf("expected hook execution")
-	}
-}
-
-func TestRecreate_acceptorSuccess(t *testing.T) {
-	var deployment *corev1.ReplicationController
-	strategy := &RecreateDeploymentStrategy{
-		out:         &bytes.Buffer{},
-		errOut:      &bytes.Buffer{},
-		eventClient: fake.NewSimpleClientset().CoreV1(),
-	}
-
-	acceptorCalled := false
-	acceptor := &testAcceptor{
-		acceptFn: func(deployment *corev1.ReplicationController) error {
-			acceptorCalled = true
-			return nil
-		},
-	}
-
-	oldDeployment, _ := appsutil.MakeDeployment(appstest.OkDeploymentConfig(1))
-	deployment, _ = appsutil.MakeDeployment(appstest.OkDeploymentConfig(2))
-	controllerClient := newFakeControllerClient(deployment)
-	strategy.rcClient = controllerClient
-	strategy.scaleClient = controllerClient.fakeScaleClient()
-	strategy.podClient = &fakePodClient{deployerName: appsutil.DeployerPodNameForDeployment(deployment.Name)}
-
-	err := strategy.DeployWithAcceptor(oldDeployment, deployment, 2, acceptor)
-	if err != nil {
-		t.Fatalf("unexpected deploy error: %#v", err)
-	}
-
-	if !acceptorCalled {
-		t.Fatalf("expected acceptor to be called")
-	}
-
-	if len(controllerClient.scaleEvents) != 2 {
-		t.Fatalf("expected 2 scale calls, got %d", len(controllerClient.scaleEvents))
-	}
-	if r := controllerClient.scaleEvents[0].Spec.Replicas; r != 1 {
-		t.Fatalf("expected first scale event to be 1 replica, got %d", r)
-	}
-
-	if r := controllerClient.scaleEvents[1].Spec.Replicas; r != 2 {
-		t.Fatalf("expected second scale event to be 2 replica, got %d", r)
-	}
-}
-
-func TestRecreate_acceptorSuccessWithColdCaches(t *testing.T) {
-	var deployment *corev1.ReplicationController
-	strategy := &RecreateDeploymentStrategy{
-		out:         &bytes.Buffer{},
-		errOut:      &bytes.Buffer{},
-		eventClient: fake.NewSimpleClientset().CoreV1(),
-	}
-
-	acceptorCalled := false
-	acceptor := &testAcceptor{
-		acceptFn: func(deployment *corev1.ReplicationController) error {
-			acceptorCalled = true
-			return nil
-		},
-	}
-
-	oldDeployment, _ := appsutil.MakeDeployment(appstest.OkDeploymentConfig(1))
-	deployment, _ = appsutil.MakeDeployment(appstest.OkDeploymentConfig(2))
-	controllerClient := newFakeControllerClient(deployment)
-
-	strategy.rcClient = controllerClient
-	strategy.scaleClient = controllerClient.fakeScaleClient()
-	strategy.podClient = &fakePodClient{deployerName: appsutil.DeployerPodNameForDeployment(deployment.Name)}
-
-	err := strategy.DeployWithAcceptor(oldDeployment, deployment, 2, acceptor)
-	if err != nil {
-		t.Fatalf("unexpected deploy error: %#v", err)
-	}
-
-	if !acceptorCalled {
-		t.Fatalf("expected acceptor to be called")
-	}
-
-	if len(controllerClient.scaleEvents) != 2 {
-		t.Fatalf("expected 2 scale calls, got %d", len(controllerClient.scaleEvents))
-	}
-	if r := controllerClient.scaleEvents[0]; r.Spec.Replicas != 1 {
-		t.Errorf("expected first scale event to be 1 replica, got %v", r)
-	}
-	if r := controllerClient.scaleEvents[1]; r.Spec.Replicas != 2 {
-		t.Errorf("expected second scale event to be 2 replica, got %v", r)
-	}
-}
-
-func TestRecreate_acceptorFail(t *testing.T) {
-	var deployment *corev1.ReplicationController
-
-	strategy := &RecreateDeploymentStrategy{
-		out:         &bytes.Buffer{},
-		errOut:      &bytes.Buffer{},
-		eventClient: fake.NewSimpleClientset().CoreV1(),
-	}
-
-	acceptor := &testAcceptor{
-		acceptFn: func(deployment *corev1.ReplicationController) error {
-			return fmt.Errorf("rejected")
-		},
-	}
-
-	oldDeployment, _ := appsutil.MakeDeployment(appstest.OkDeploymentConfig(1))
-	deployment, _ = appsutil.MakeDeployment(appstest.OkDeploymentConfig(2))
-	rcClient := newFakeControllerClient(deployment)
-	strategy.rcClient = rcClient
-	strategy.scaleClient = rcClient.fakeScaleClient()
-	strategy.podClient = &fakePodClient{deployerName: appsutil.DeployerPodNameForDeployment(deployment.Name)}
-	err := strategy.DeployWithAcceptor(oldDeployment, deployment, 2, acceptor)
-	if err == nil {
-		t.Fatalf("expected a deployment failure")
-	}
-	t.Logf("got expected error: %v", err)
-
-	if len(rcClient.scaleEvents) != 1 {
-		t.Fatalf("expected 1 scale calls, got %d", len(rcClient.scaleEvents))
-	}
-	if r := rcClient.scaleEvents[0]; r.Spec.Replicas != 1 {
-		t.Errorf("expected first scale event to be 1 replica, got %v", r)
-	}
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/deployer/strategy/rolling/rolling.go b/vendor/github.com/openshift/oc/pkg/cli/deployer/strategy/rolling/rolling.go
deleted file mode 100644
index 8c0124aff101..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/deployer/strategy/rolling/rolling.go
+++ /dev/null
@@ -1,276 +0,0 @@
-package rolling
-
-import (
-	"bytes"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"os"
-	"time"
-
-	corev1 "k8s.io/api/core/v1"
-	kerrors "k8s.io/apimachinery/pkg/api/errors"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/util/wait"
-	"k8s.io/client-go/kubernetes"
-	corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
-
-	imageclienttyped "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1"
-
-	"github.com/openshift/library-go/pkg/apps/appsserialization"
-	"github.com/openshift/library-go/pkg/apps/appsutil"
-	strat "github.com/openshift/oc/pkg/cli/deployer/strategy"
-	stratsupport "github.com/openshift/oc/pkg/cli/deployer/strategy/support"
-	stratutil "github.com/openshift/oc/pkg/cli/deployer/strategy/util"
-)
-
-const (
-	defaultAPIRetryPeriod  = 1 * time.Second
-	defaultAPIRetryTimeout = 10 * time.Second
-)
-
-// RollingDeploymentStrategy is a Strategy which implements rolling
-// deployments using the upstream Kubernetes RollingUpdater.
-//
-// Currently, there are some caveats:
-//
-// 1. When there is no existing prior deployment, deployment delegates to
-// another strategy.
-// 2. The interface to the RollingUpdater is not very clean.
-//
-// These caveats can be resolved with future upstream refactorings to
-// RollingUpdater[1][2].
-//
-// [1] https://github.com/kubernetes/kubernetes/pull/7183
-// [2] https://github.com/kubernetes/kubernetes/issues/7851
-type RollingDeploymentStrategy struct {
-	// out and errOut control where output is sent during the strategy
-	out, errOut io.Writer
-	// until is a condition that, if reached, will cause the strategy to exit early
-	until string
-	// initialStrategy is used when there are no prior deployments.
-	initialStrategy acceptingDeploymentStrategy
-	// rcClient is used to deal with ReplicationControllers.
-	rcClient corev1client.ReplicationControllersGetter
-	// eventClient is a client to access events
-	eventClient corev1client.EventsGetter
-	// rollingUpdate knows how to perform a rolling update.
-	rollingUpdate func(config *RollingUpdaterConfig) error
-	// hookExecutor can execute a lifecycle hook.
-	hookExecutor stratsupport.HookExecutor
-	// getUpdateAcceptor returns an UpdateAcceptor to verify the first replica
-	// of the deployment.
-	getUpdateAcceptor func(time.Duration, int32) strat.UpdateAcceptor
-	// apiRetryPeriod is how long to wait before retrying a failed API call.
-	apiRetryPeriod time.Duration
-	// apiRetryTimeout is how long to retry API calls before giving up.
-	apiRetryTimeout time.Duration
-}
-
-// acceptingDeploymentStrategy is a DeploymentStrategy which accepts an
-// injected UpdateAcceptor as part of the deploy function. This is a hack to
-// support using the Recreate strategy for initial deployments and should be
-// removed when https://github.com/kubernetes/kubernetes/pull/7183 is
-// fixed.
-type acceptingDeploymentStrategy interface {
-	DeployWithAcceptor(from *corev1.ReplicationController, to *corev1.ReplicationController, desiredReplicas int,
-		updateAcceptor strat.UpdateAcceptor) error
-}
-
-// NewRollingDeploymentStrategy makes a new RollingDeploymentStrategy.
-func NewRollingDeploymentStrategy(namespace string, kubeClient kubernetes.Interface, imageClient imageclienttyped.ImageStreamTagsGetter,
-	initialStrategy acceptingDeploymentStrategy, out, errOut io.Writer, until string) *RollingDeploymentStrategy {
-	if out == nil {
-		out = ioutil.Discard
-	}
-	if errOut == nil {
-		errOut = ioutil.Discard
-	}
-
-	return &RollingDeploymentStrategy{
-		out:             out,
-		errOut:          errOut,
-		until:           until,
-		initialStrategy: initialStrategy,
-		rcClient:        kubeClient.CoreV1(),
-		eventClient:     kubeClient.CoreV1(),
-		apiRetryPeriod:  defaultAPIRetryPeriod,
-		apiRetryTimeout: defaultAPIRetryTimeout,
-		rollingUpdate: func(config *RollingUpdaterConfig) error {
-			updater := NewRollingUpdater(namespace, kubeClient.CoreV1(), kubeClient.CoreV1(), appsutil.NewReplicationControllerScaleClient(kubeClient))
-			return updater.Update(config)
-		},
-		hookExecutor: stratsupport.NewHookExecutor(kubeClient, imageClient, os.Stdout),
-		getUpdateAcceptor: func(timeout time.Duration, minReadySeconds int32) strat.UpdateAcceptor {
-			return stratsupport.NewAcceptAvailablePods(out, kubeClient.CoreV1(), timeout)
-		},
-	}
-}
-
-func (s *RollingDeploymentStrategy) Deploy(from *corev1.ReplicationController, to *corev1.ReplicationController, desiredReplicas int) error {
-	config, err := appsserialization.DecodeDeploymentConfig(to)
-	if err != nil {
-		return fmt.Errorf("couldn't decode DeploymentConfig from deployment %s: %v", appsutil.LabelForDeployment(to), err)
-	}
-
-	params := config.Spec.Strategy.RollingParams
-	updateAcceptor := s.getUpdateAcceptor(time.Duration(*params.TimeoutSeconds)*time.Second, config.Spec.MinReadySeconds)
-
-	// If there's no prior deployment, delegate to another strategy since the
-	// rolling updater only supports transitioning between two deployments.
-	//
-	// Hook support is duplicated here for now. When the rolling updater can
-	// handle initial deployments, all of this code can go away.
-	if from == nil {
-		// Execute any pre-hook.
-		if params.Pre != nil {
-			if err := s.hookExecutor.Execute(params.Pre, to, appsutil.PreHookPodSuffix, "pre"); err != nil {
-				return fmt.Errorf("pre hook failed: %s", err)
-			}
-		}
-
-		// Execute the delegate strategy.
-		err := s.initialStrategy.DeployWithAcceptor(from, to, desiredReplicas, updateAcceptor)
-		if err != nil {
-			return err
-		}
-
-		// Execute any post-hook. Errors are logged and ignored.
-		if params.Post != nil {
-			if err := s.hookExecutor.Execute(params.Post, to, appsutil.PostHookPodSuffix, "post"); err != nil {
-				return fmt.Errorf("post hook failed: %s", err)
-			}
-		}
-
-		// All done.
-		return nil
-	}
-
-	// Record all warnings
-	defer stratutil.RecordConfigWarnings(s.eventClient, from, s.out)
-	defer stratutil.RecordConfigWarnings(s.eventClient, to, s.out)
-
-	// Prepare for a rolling update.
-	// Execute any pre-hook.
-	if params.Pre != nil {
-		if err := s.hookExecutor.Execute(params.Pre, to, appsutil.PreHookPodSuffix, "pre"); err != nil {
-			return fmt.Errorf("pre hook failed: %s", err)
-		}
-	}
-
-	if s.until == "pre" {
-		return strat.NewConditionReachedErr("pre hook succeeded")
-	}
-
-	if s.until == "0%" {
-		return strat.NewConditionReachedErr("Reached 0% (before rollout)")
-	}
-
-	// HACK: Assign the source ID annotation that the rolling updater expects,
-	// unless it already exists on the deployment.
-	//
-	// Related upstream issue:
-	// https://github.com/kubernetes/kubernetes/pull/7183
-	err = wait.Poll(s.apiRetryPeriod, s.apiRetryTimeout, func() (done bool, err error) {
-		existing, err := s.rcClient.ReplicationControllers(to.Namespace).Get(to.Name, metav1.GetOptions{})
-		if err != nil {
-			msg := fmt.Sprintf("couldn't look up deployment %s: %s", to.Name, err)
-			if kerrors.IsNotFound(err) {
-				return false, fmt.Errorf("%s", msg)
-			}
-			// Try again.
-			fmt.Fprintln(s.errOut, "error:", msg)
-			return false, nil
-		}
-		if _, hasSourceId := existing.Annotations[sourceIdAnnotation]; !hasSourceId {
-			existing.Annotations[sourceIdAnnotation] = fmt.Sprintf("%s:%s", from.Name, from.ObjectMeta.UID)
-			if _, err := s.rcClient.ReplicationControllers(existing.Namespace).Update(existing); err != nil {
-				msg := fmt.Sprintf("couldn't assign source annotation to deployment %s: %v", existing.Name, err)
-				if kerrors.IsNotFound(err) {
-					return false, fmt.Errorf("%s", msg)
-				}
-				// Try again.
-				fmt.Fprintln(s.errOut, "error:", msg)
-				return false, nil
-			}
-		}
-		return true, nil
-	})
-	if err != nil {
-		return err
-	}
-	to, err = s.rcClient.ReplicationControllers(to.Namespace).Get(to.Name, metav1.GetOptions{})
-	if err != nil {
-		return err
-	}
-
-	// HACK: There's a validation in the rolling updater which assumes that when
-	// an existing RC is supplied, it will have >0 replicas- a validation which
-	// is then disregarded as the desired count is obtained from the annotation
-	// on the RC. For now, fake it out by just setting replicas to 1.
-	//
-	// Related upstream issue:
-	// https://github.com/kubernetes/kubernetes/pull/7183
-	one := int32(1)
-	to.Spec.Replicas = &one
-
-	// Perform a rolling update.
-	rollingConfig := &RollingUpdaterConfig{
-		Out:             &rollingUpdaterWriter{w: s.out},
-		OldRc:           from,
-		NewRc:           to,
-		UpdatePeriod:    time.Duration(*params.UpdatePeriodSeconds) * time.Second,
-		Interval:        time.Duration(*params.IntervalSeconds) * time.Second,
-		Timeout:         time.Duration(*params.TimeoutSeconds) * time.Second,
-		MinReadySeconds: config.Spec.MinReadySeconds,
-		CleanupPolicy:   PreserveRollingUpdateCleanupPolicy,
-		OnProgress: func(oldRc, newRc *corev1.ReplicationController, percentage int) error {
-			if expect, ok := strat.Percentage(s.until); ok && percentage >= expect {
-				return strat.NewConditionReachedErr(fmt.Sprintf("Reached %s (currently %d%%)", s.until, percentage))
-			}
-			return nil
-		},
-	}
-	if params.MaxSurge != nil {
-		rollingConfig.MaxSurge = *params.MaxSurge
-	}
-	if params.MaxUnavailable != nil {
-		rollingConfig.MaxUnavailable = *params.MaxUnavailable
-	}
-	if err := s.rollingUpdate(rollingConfig); err != nil {
-		return err
-	}
-
-	// Execute any post-hook.
-	if params.Post != nil {
-		if err := s.hookExecutor.Execute(params.Post, to, appsutil.PostHookPodSuffix, "post"); err != nil {
-			return fmt.Errorf("post hook failed: %s", err)
-		}
-	}
-	return nil
-}
-
-// rollingUpdaterWriter is an io.Writer that delegates to klog.
-type rollingUpdaterWriter struct {
-	w      io.Writer
-	called bool
-}
-
-func (w *rollingUpdaterWriter) Write(p []byte) (n int, err error) {
-	n = len(p)
-	if bytes.HasPrefix(p, []byte("Continuing update with ")) {
-		return n, nil
-	}
-	if bytes.HasSuffix(p, []byte("\n")) {
-		p = p[:len(p)-1]
-	}
-	for _, line := range bytes.Split(p, []byte("\n")) {
-		if w.called {
-			fmt.Fprintln(w.w, "   ", string(line))
-		} else {
-			w.called = true
-			fmt.Fprintln(w.w, "-->", string(line))
-		}
-	}
-	return n, nil
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/deployer/strategy/rolling/rolling_test.go b/vendor/github.com/openshift/oc/pkg/cli/deployer/strategy/rolling/rolling_test.go
deleted file mode 100644
index e45f810bc3d8..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/deployer/strategy/rolling/rolling_test.go
+++ /dev/null
@@ -1,340 +0,0 @@
-package rolling
-
-import (
-	"bytes"
-	"fmt"
-	"reflect"
-	"testing"
-	"time"
-
-	corev1 "k8s.io/api/core/v1"
-	"k8s.io/apimachinery/pkg/runtime"
-	"k8s.io/apimachinery/pkg/util/diff"
-	"k8s.io/client-go/kubernetes/fake"
-	clientgotesting "k8s.io/client-go/testing"
-
-	appsv1 "github.com/openshift/api/apps/v1"
-
-	"github.com/openshift/library-go/pkg/apps/appsutil"
-	strat "github.com/openshift/oc/pkg/cli/deployer/strategy"
-	"github.com/openshift/oc/pkg/cli/deployer/strategy/util/appstest"
-)
-
-func TestRolling_deployInitial(t *testing.T) {
-	initialStrategyInvoked := false
-
-	strategy := &RollingDeploymentStrategy{
-		rcClient:    fake.NewSimpleClientset().CoreV1(),
-		eventClient: fake.NewSimpleClientset().CoreV1(),
-		initialStrategy: &testStrategy{
-			deployFn: func(from *corev1.ReplicationController, to *corev1.ReplicationController, desiredReplicas int, updateAcceptor strat.UpdateAcceptor) error {
-				initialStrategyInvoked = true
-				return nil
-			},
-		},
-		rollingUpdate: func(config *RollingUpdaterConfig) error {
-			t.Fatalf("unexpected call to rollingUpdate")
-			return nil
-		},
-		getUpdateAcceptor: getUpdateAcceptor,
-		apiRetryPeriod:    1 * time.Millisecond,
-		apiRetryTimeout:   10 * time.Millisecond,
-	}
-
-	config := appstest.OkDeploymentConfig(1)
-	config.Spec.Strategy = appstest.OkRollingStrategy()
-	deployment, _ := appsutil.MakeDeployment(config)
-	strategy.out, strategy.errOut = &bytes.Buffer{}, &bytes.Buffer{}
-	err := strategy.Deploy(nil, deployment, 2)
-	if err != nil {
-		t.Fatalf("unexpected error: %v", err)
-	}
-	if !initialStrategyInvoked {
-		t.Fatalf("expected initial strategy to be invoked")
-	}
-}
-
-func TestRolling_deployRolling(t *testing.T) {
-	latestConfig := appstest.OkDeploymentConfig(1)
-	latestConfig.Spec.Strategy = appstest.OkRollingStrategy()
-	latest, _ := appsutil.MakeDeployment(latestConfig)
-	config := appstest.OkDeploymentConfig(2)
-	config.Spec.Strategy = appstest.OkRollingStrategy()
-	deployment, _ := appsutil.MakeDeployment(config)
-
-	deployments := map[string]*corev1.ReplicationController{
-		latest.Name:     latest,
-		deployment.Name: deployment,
-	}
-	deploymentUpdated := false
-
-	client := &fake.Clientset{}
-	client.AddReactor("get", "replicationcontrollers", func(action clientgotesting.Action) (handled bool, ret runtime.Object, err error) {
-		name := action.(clientgotesting.GetAction).GetName()
-		return true, deployments[name], nil
-	})
-	client.AddReactor("update", "replicationcontrollers", func(action clientgotesting.Action) (handled bool, ret runtime.Object, err error) {
-		updated := action.(clientgotesting.UpdateAction).GetObject().(*corev1.ReplicationController)
-		deploymentUpdated = true
-		return true, updated, nil
-	})
-
-	var rollingConfig *RollingUpdaterConfig
-	strategy := &RollingDeploymentStrategy{
-		rcClient:    client.CoreV1(),
-		eventClient: fake.NewSimpleClientset().CoreV1(),
-		initialStrategy: &testStrategy{
-			deployFn: func(from *corev1.ReplicationController, to *corev1.ReplicationController, desiredReplicas int, updateAcceptor strat.UpdateAcceptor) error {
-				t.Fatalf("unexpected call to initial strategy")
-				return nil
-			},
-		},
-		rollingUpdate: func(config *RollingUpdaterConfig) error {
-			rollingConfig = config
-			return nil
-		},
-		getUpdateAcceptor: getUpdateAcceptor,
-		apiRetryPeriod:    1 * time.Millisecond,
-		apiRetryTimeout:   10 * time.Millisecond,
-	}
-
-	strategy.out, strategy.errOut = &bytes.Buffer{}, &bytes.Buffer{}
-	err := strategy.Deploy(latest, deployment, 2)
-	if err != nil {
-		t.Fatalf("unexpected error: %v", err)
-	}
-	if rollingConfig == nil {
-		t.Fatalf("expected rolling update to be invoked")
-	}
-
-	if !reflect.DeepEqual(latest, rollingConfig.OldRc) {
-		t.Errorf("unexpected rollingConfig.OldRc:%s\n", diff.ObjectGoPrintDiff(latest, rollingConfig.OldRc))
-	}
-
-	if !reflect.DeepEqual(deployment, rollingConfig.NewRc) {
-		t.Errorf("unexpected rollingConfig.NewRc:%s\n", diff.ObjectGoPrintDiff(latest, rollingConfig.OldRc))
-	}
-
-	if e, a := 1*time.Second, rollingConfig.Interval; e != a {
-		t.Errorf("expected Interval %d, got %d", e, a)
-	}
-
-	if e, a := 1*time.Second, rollingConfig.UpdatePeriod; e != a {
-		t.Errorf("expected UpdatePeriod %d, got %d", e, a)
-	}
-
-	if e, a := 20*time.Second, rollingConfig.Timeout; e != a {
-		t.Errorf("expected Timeout %d, got %d", e, a)
-	}
-
-	// verify hack
-	if e, a := int32(1), rollingConfig.NewRc.Spec.Replicas; e != *a {
-		t.Errorf("expected rollingConfig.NewRc.Spec.Replicas %d, got %d", e, a)
-	}
-
-	// verify hack
-	if !deploymentUpdated {
-		t.Errorf("expected deployment to be updated for source annotation")
-	}
-	sid := fmt.Sprintf("%s:%s", latest.Name, latest.ObjectMeta.UID)
-	if e, a := sid, rollingConfig.NewRc.Annotations[sourceIdAnnotation]; e != a {
-		t.Errorf("expected sourceIdAnnotation %s, got %s", e, a)
-	}
-}
-
-type hookExecutorImpl struct {
-	executeFunc func(hook *appsv1.LifecycleHook, deployment *corev1.ReplicationController, suffix, label string) error
-}
-
-func (h *hookExecutorImpl) Execute(hook *appsv1.LifecycleHook, rc *corev1.ReplicationController, suffix, label string) error {
-	return h.executeFunc(hook, rc, suffix, label)
-}
-
-func TestRolling_deployRollingHooks(t *testing.T) {
-	config := appstest.OkDeploymentConfig(1)
-	config.Spec.Strategy = appstest.OkRollingStrategy()
-	latest, _ := appsutil.MakeDeployment(config)
-
-	var hookError error
-
-	deployments := map[string]*corev1.ReplicationController{latest.Name: latest}
-
-	client := &fake.Clientset{}
-	client.AddReactor("get", "replicationcontrollers", func(action clientgotesting.Action) (handled bool, ret runtime.Object, err error) {
-		name := action.(clientgotesting.GetAction).GetName()
-		return true, deployments[name], nil
-	})
-	client.AddReactor("update", "replicationcontrollers", func(action clientgotesting.Action) (handled bool, ret runtime.Object, err error) {
-		updated := action.(clientgotesting.UpdateAction).GetObject().(*corev1.ReplicationController)
-		return true, updated, nil
-	})
-
-	strategy := &RollingDeploymentStrategy{
-		rcClient:    client.CoreV1(),
-		eventClient: fake.NewSimpleClientset().CoreV1(),
-		initialStrategy: &testStrategy{
-			deployFn: func(from *corev1.ReplicationController, to *corev1.ReplicationController, desiredReplicas int, updateAcceptor strat.UpdateAcceptor) error {
-				t.Fatalf("unexpected call to initial strategy")
-				return nil
-			},
-		},
-		rollingUpdate: func(config *RollingUpdaterConfig) error {
-			return nil
-		},
-		hookExecutor: &hookExecutorImpl{
-			executeFunc: func(hook *appsv1.LifecycleHook, deployment *corev1.ReplicationController, suffix, label string) error {
-				return hookError
-			},
-		},
-		getUpdateAcceptor: getUpdateAcceptor,
-		apiRetryPeriod:    1 * time.Millisecond,
-		apiRetryTimeout:   10 * time.Millisecond,
-	}
-
-	cases := []struct {
-		params               *appsv1.RollingDeploymentStrategyParams
-		hookShouldFail       bool
-		deploymentShouldFail bool
-	}{
-		{rollingParams(appsv1.LifecycleHookFailurePolicyAbort, ""), true, true},
-		{rollingParams(appsv1.LifecycleHookFailurePolicyAbort, ""), false, false},
-		{rollingParams("", appsv1.LifecycleHookFailurePolicyAbort), true, true},
-		{rollingParams("", appsv1.LifecycleHookFailurePolicyAbort), false, false},
-	}
-
-	for _, tc := range cases {
-		config := appstest.OkDeploymentConfig(2)
-		config.Spec.Strategy.RollingParams = tc.params
-		deployment, _ := appsutil.MakeDeployment(config)
-		deployments[deployment.Name] = deployment
-		hookError = nil
-		if tc.hookShouldFail {
-			hookError = fmt.Errorf("hook failure")
-		}
-		strategy.out, strategy.errOut = &bytes.Buffer{}, &bytes.Buffer{}
-		err := strategy.Deploy(latest, deployment, 2)
-		if err != nil && tc.deploymentShouldFail {
-			t.Logf("got expected error: %v", err)
-		}
-		if err == nil && tc.deploymentShouldFail {
-			t.Errorf("expected an error for case: %#v", tc)
-		}
-		if err != nil && !tc.deploymentShouldFail {
-			t.Errorf("unexpected error for case: %#v: %v", tc, err)
-		}
-	}
-}
-
-// TestRolling_deployInitialHooks can go away once the rolling strategy
-// supports initial deployments.
-func TestRolling_deployInitialHooks(t *testing.T) {
-	var hookError error
-
-	strategy := &RollingDeploymentStrategy{
-		rcClient:    fake.NewSimpleClientset().CoreV1(),
-		eventClient: fake.NewSimpleClientset().CoreV1(),
-		initialStrategy: &testStrategy{
-			deployFn: func(from *corev1.ReplicationController, to *corev1.ReplicationController, desiredReplicas int,
-				updateAcceptor strat.UpdateAcceptor) error {
-				return nil
-			},
-		},
-		rollingUpdate: func(config *RollingUpdaterConfig) error {
-			return nil
-		},
-		hookExecutor: &hookExecutorImpl{
-			executeFunc: func(hook *appsv1.LifecycleHook, deployment *corev1.ReplicationController, suffix, label string) error {
-				return hookError
-			},
-		},
-		getUpdateAcceptor: getUpdateAcceptor,
-		apiRetryPeriod:    1 * time.Millisecond,
-		apiRetryTimeout:   10 * time.Millisecond,
-	}
-
-	cases := []struct {
-		params               *appsv1.RollingDeploymentStrategyParams
-		hookShouldFail       bool
-		deploymentShouldFail bool
-	}{
-		{rollingParams(appsv1.LifecycleHookFailurePolicyAbort, ""), true, true},
-		{rollingParams(appsv1.LifecycleHookFailurePolicyAbort, ""), false, false},
-		{rollingParams("", appsv1.LifecycleHookFailurePolicyAbort), true, true},
-		{rollingParams("", appsv1.LifecycleHookFailurePolicyAbort), false, false},
-	}
-
-	for i, tc := range cases {
-		config := appstest.OkDeploymentConfig(2)
-		config.Spec.Strategy.RollingParams = tc.params
-		deployment, _ := appsutil.MakeDeployment(config)
-		hookError = nil
-		if tc.hookShouldFail {
-			hookError = fmt.Errorf("hook failure")
-		}
-		strategy.out, strategy.errOut = &bytes.Buffer{}, &bytes.Buffer{}
-		err := strategy.Deploy(nil, deployment, 2)
-		if err != nil && tc.deploymentShouldFail {
-			t.Logf("got expected error: %v", err)
-		}
-		if err == nil && tc.deploymentShouldFail {
-			t.Errorf("%d: expected an error for case: %v", i, tc)
-		}
-		if err != nil && !tc.deploymentShouldFail {
-			t.Errorf("%d: unexpected error for case: %v: %v", i, tc, err)
-		}
-	}
-}
-
-type testStrategy struct {
-	deployFn func(from *corev1.ReplicationController, to *corev1.ReplicationController, desiredReplicas int, updateAcceptor strat.UpdateAcceptor) error
-}
-
-func (s *testStrategy) DeployWithAcceptor(from *corev1.ReplicationController, to *corev1.ReplicationController, desiredReplicas int, updateAcceptor strat.UpdateAcceptor) error {
-	return s.deployFn(from, to, desiredReplicas, updateAcceptor)
-}
-
-func mkintp(i int) *int64 {
-	v := int64(i)
-	return &v
-}
-
-func rollingParams(preFailurePolicy, postFailurePolicy appsv1.LifecycleHookFailurePolicy) *appsv1.RollingDeploymentStrategyParams {
-	var pre *appsv1.LifecycleHook
-	var post *appsv1.LifecycleHook
-
-	if len(preFailurePolicy) > 0 {
-		pre = &appsv1.LifecycleHook{
-			FailurePolicy: preFailurePolicy,
-			ExecNewPod:    &appsv1.ExecNewPodHook{},
-		}
-	}
-	if len(postFailurePolicy) > 0 {
-		post = &appsv1.LifecycleHook{
-			FailurePolicy: postFailurePolicy,
-			ExecNewPod:    &appsv1.ExecNewPodHook{},
-		}
-	}
-	return &appsv1.RollingDeploymentStrategyParams{
-		UpdatePeriodSeconds: mkintp(1),
-		IntervalSeconds:     mkintp(1),
-		TimeoutSeconds:      mkintp(20),
-		Pre:                 pre,
-		Post:                post,
-	}
-}
-
-func getUpdateAcceptor(timeout time.Duration, minReadySeconds int32) strat.UpdateAcceptor {
-	return &testAcceptor{
-		acceptFn: func(deployment *corev1.ReplicationController) error {
-			return nil
-		},
-	}
-}
-
-type testAcceptor struct {
-	acceptFn func(*corev1.ReplicationController) error
-}
-
-func (t *testAcceptor) Accept(deployment *corev1.ReplicationController) error {
-	return t.acceptFn(deployment)
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/deployer/strategy/rolling/rolling_updater.go b/vendor/github.com/openshift/oc/pkg/cli/deployer/strategy/rolling/rolling_updater.go
deleted file mode 100644
index f6a5f324b272..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/deployer/strategy/rolling/rolling_updater.go
+++ /dev/null
@@ -1,640 +0,0 @@
-/*
-Copyright 2014 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package rolling
-
-// This file is a copy of k8s.io/kubernetes/pkg/kubectl/cmd/rollingupdate.go.
-// It has been inlined as that location is effectively unmaintained and this
-// package must maintain backwards compatibility with older openshift versions.
-
-import (
-	"fmt"
-	"io"
-	"strconv"
-	"time"
-
-	api "k8s.io/api/core/v1"
-	"k8s.io/apimachinery/pkg/api/errors"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/labels"
-	"k8s.io/apimachinery/pkg/runtime/schema"
-	"k8s.io/apimachinery/pkg/util/intstr"
-	"k8s.io/apimachinery/pkg/util/wait"
-	coreclient "k8s.io/client-go/kubernetes/typed/core/v1"
-	scaleclient "k8s.io/client-go/scale"
-	"k8s.io/client-go/util/retry"
-	podutil "k8s.io/kubernetes/pkg/api/v1/pod"
-	deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
-	"k8s.io/kubernetes/pkg/kubectl"
-	"k8s.io/utils/integer"
-)
-
-// ControllerHasDesiredReplicas returns a condition that will be true if and only if
-// the desired replica count for a controller's ReplicaSelector equals the Replicas count.
-func ControllerHasDesiredReplicas(rcClient coreclient.ReplicationControllersGetter, controller *api.ReplicationController) wait.ConditionFunc {
-
-	// If we're given a controller where the status lags the spec, it either means that the controller is stale,
-	// or that the rc manager hasn't noticed the update yet. Polling status.Replicas is not safe in the latter case.
-	desiredGeneration := controller.Generation
-
-	return func() (bool, error) {
-		ctrl, err := rcClient.ReplicationControllers(controller.Namespace).Get(controller.Name, metav1.GetOptions{})
-		if err != nil {
-			return false, err
-		}
-		// There's a chance a concurrent update modifies the Spec.Replicas causing this check to pass,
-		// or, after this check has passed, a modification causes the rc manager to create more pods.
-		// This will not be an issue once we've implemented graceful delete for rcs, but till then
-		// concurrent stop operations on the same rc might have unintended side effects.
-		return ctrl.Status.ObservedGeneration >= desiredGeneration && ctrl.Status.Replicas == *ctrl.Spec.Replicas, nil
-	}
-}
-
-const (
-	kubectlAnnotationPrefix    = "kubectl.kubernetes.io/"
-	sourceIdAnnotation         = kubectlAnnotationPrefix + "update-source-id"
-	desiredReplicasAnnotation  = kubectlAnnotationPrefix + "desired-replicas"
-	originalReplicasAnnotation = kubectlAnnotationPrefix + "original-replicas"
-)
-
-// RollingUpdaterConfig is the configuration for a rolling deployment process.
-type RollingUpdaterConfig struct {
-	// Out is a writer for progress output.
-	Out io.Writer
-	// OldRC is an existing controller to be replaced.
-	OldRc *api.ReplicationController
-	// NewRc is a controller that will take ownership of updated pods (will be
-	// created if needed).
-	NewRc *api.ReplicationController
-	// UpdatePeriod is the time to wait between individual pod updates.
-	UpdatePeriod time.Duration
-	// Interval is the time to wait between polling controller status after
-	// update.
-	Interval time.Duration
-	// Timeout is the time to wait for controller updates before giving up.
-	Timeout time.Duration
-	// MinReadySeconds is the number of seconds to wait after the pods are ready
-	MinReadySeconds int32
-	// CleanupPolicy defines the cleanup action to take after the deployment is
-	// complete.
-	CleanupPolicy RollingUpdaterCleanupPolicy
-	// MaxUnavailable is the maximum number of pods that can be unavailable during the update.
-	// Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
-	// Absolute number is calculated from percentage by rounding up.
-	// This can not be 0 if MaxSurge is 0.
-	// By default, a fixed value of 1 is used.
-	// Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods
-	// immediately when the rolling update starts. Once new pods are ready, old RC
-	// can be scaled down further, followed by scaling up the new RC, ensuring
-	// that the total number of pods available at all times during the update is at
-	// least 70% of desired pods.
-	MaxUnavailable intstr.IntOrString
-	// MaxSurge is the maximum number of pods that can be scheduled above the desired number of pods.
-	// Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
-	// This can not be 0 if MaxUnavailable is 0.
-	// Absolute number is calculated from percentage by rounding up.
-	// By default, a value of 1 is used.
-	// Example: when this is set to 30%, the new RC can be scaled up immediately
-	// when the rolling update starts, such that the total number of old and new pods do not exceed
-	// 130% of desired pods. Once old pods have been killed, new RC can be scaled up
-	// further, ensuring that total number of pods running at any time during
-	// the update is atmost 130% of desired pods.
-	MaxSurge intstr.IntOrString
-	// OnProgress is invoked if set during each scale cycle, to allow the caller to perform additional logic or
-	// abort the scale. If an error is returned the cleanup method will not be invoked. The percentage value
-	// is a synthetic "progress" calculation that represents the approximate percentage completion.
-	OnProgress func(oldRc, newRc *api.ReplicationController, percentage int) error
-}
-
-// RollingUpdaterCleanupPolicy is a cleanup action to take after the
-// deployment is complete.
-type RollingUpdaterCleanupPolicy string
-
-const (
-	// DeleteRollingUpdateCleanupPolicy means delete the old controller.
-	DeleteRollingUpdateCleanupPolicy RollingUpdaterCleanupPolicy = "Delete"
-	// PreserveRollingUpdateCleanupPolicy means keep the old controller.
-	PreserveRollingUpdateCleanupPolicy RollingUpdaterCleanupPolicy = "Preserve"
-	// RenameRollingUpdateCleanupPolicy means delete the old controller, and rename
-	// the new controller to the name of the old controller.
-	RenameRollingUpdateCleanupPolicy RollingUpdaterCleanupPolicy = "Rename"
-)
-
-// RollingUpdater provides methods for updating replicated pods in a predictable,
-// fault-tolerant way.
-type RollingUpdater struct {
-	rcClient    coreclient.ReplicationControllersGetter
-	podClient   coreclient.PodsGetter
-	scaleClient scaleclient.ScalesGetter
-	// Namespace for resources
-	ns string
-	// scaleAndWait scales a controller and returns its updated state.
-	scaleAndWait func(rc *api.ReplicationController, retry *kubectl.RetryParams, wait *kubectl.RetryParams) (*api.ReplicationController, error)
-	// getOrCreateTargetController gets and validates an existing controller or
-	// makes a new one.
-	getOrCreateTargetController func(controller *api.ReplicationController, sourceId string) (*api.ReplicationController, bool, error)
-	// cleanup performs post deployment cleanup tasks for newRc and oldRc.
-	cleanup func(oldRc, newRc *api.ReplicationController, config *RollingUpdaterConfig) error
-	// getReadyPods returns the amount of old and new ready pods.
-	getReadyPods func(oldRc, newRc *api.ReplicationController, minReadySeconds int32) (int32, int32, error)
-	// nowFn returns the current time used to calculate the minReadySeconds
-	nowFn func() metav1.Time
-}
-
-// NewRollingUpdater creates a RollingUpdater from a client.
-func NewRollingUpdater(namespace string, rcClient coreclient.ReplicationControllersGetter, podClient coreclient.PodsGetter, sc scaleclient.ScalesGetter) *RollingUpdater {
-	updater := &RollingUpdater{
-		rcClient:    rcClient,
-		podClient:   podClient,
-		scaleClient: sc,
-		ns:          namespace,
-	}
-	// Inject real implementations.
-	updater.scaleAndWait = updater.scaleAndWaitWithScaler
-	updater.getOrCreateTargetController = updater.getOrCreateTargetControllerWithClient
-	updater.getReadyPods = updater.readyPods
-	updater.cleanup = updater.cleanupWithClients
-	updater.nowFn = metav1.Now
-	return updater
-}
-
-// Update all pods for a ReplicationController (oldRc) by creating a new
-// controller (newRc) with 0 replicas, and synchronously scaling oldRc and
-// newRc until oldRc has 0 replicas and newRc has the original # of desired
-// replicas. Cleanup occurs based on a RollingUpdaterCleanupPolicy.
-//
-// Each interval, the updater will attempt to make progress however it can
-// without violating any availability constraints defined by the config. This
-// means the amount scaled up or down each interval will vary based on the
-// timeliness of readiness and the updater will always try to make progress,
-// even slowly.
-//
-// If an update from newRc to oldRc is already in progress, we attempt to
-// drive it to completion. If an error occurs at any step of the update, the
-// error will be returned.
-//
-// A scaling event (either up or down) is considered progress; if no progress
-// is made within the config.Timeout, an error is returned.
-//
-// TODO: make this handle performing a rollback of a partially completed
-// rollout.
-func (r *RollingUpdater) Update(config *RollingUpdaterConfig) error {
-	out := config.Out
-	oldRc := config.OldRc
-	if oldRc.Spec.Replicas == nil {
-		one := int32(1)
-		oldRc.Spec.Replicas = &one
-	}
-	scaleRetryParams := kubectl.NewRetryParams(config.Interval, config.Timeout)
-
-	// Find an existing controller (for continuing an interrupted update) or
-	// create a new one if necessary.
-	sourceId := fmt.Sprintf("%s:%s", oldRc.Name, oldRc.UID)
-	newRc, existed, err := r.getOrCreateTargetController(config.NewRc, sourceId)
-	if err != nil {
-		return err
-	}
-	if newRc.Spec.Replicas == nil {
-		one := int32(1)
-		newRc.Spec.Replicas = &one
-	}
-	if existed {
-		fmt.Fprintf(out, "Continuing update with existing controller %s.\n", newRc.Name)
-	} else {
-		fmt.Fprintf(out, "Created %s\n", newRc.Name)
-	}
-	// Extract the desired replica count from the controller.
-	desiredAnnotation, err := strconv.Atoi(newRc.Annotations[desiredReplicasAnnotation])
-	if err != nil {
-		return fmt.Errorf("unable to parse annotation for %s: %s=%s",
-			newRc.Name, desiredReplicasAnnotation, newRc.Annotations[desiredReplicasAnnotation])
-	}
-	desired := int32(desiredAnnotation)
-	// Extract the original replica count from the old controller, adding the
-	// annotation if it doesn't yet exist.
-	_, hasOriginalAnnotation := oldRc.Annotations[originalReplicasAnnotation]
-	if !hasOriginalAnnotation {
-		existing, err := r.rcClient.ReplicationControllers(oldRc.Namespace).Get(oldRc.Name, metav1.GetOptions{})
-		if err != nil {
-			return err
-		}
-		if existing.Spec.Replicas != nil {
-			originReplicas := strconv.Itoa(int(*existing.Spec.Replicas))
-			applyUpdate := func(rc *api.ReplicationController) {
-				if rc.Annotations == nil {
-					rc.Annotations = map[string]string{}
-				}
-				rc.Annotations[originalReplicasAnnotation] = originReplicas
-			}
-			if oldRc, err = updateRcWithRetries(r.rcClient, existing.Namespace, existing, applyUpdate); err != nil {
-				return err
-			}
-		}
-	}
-	// maxSurge is the maximum scaling increment and maxUnavailable are the maximum pods
-	// that can be unavailable during a rollout.
-	maxSurge, maxUnavailable, err := deploymentutil.ResolveFenceposts(&config.MaxSurge, &config.MaxUnavailable, desired)
-	if err != nil {
-		return err
-	}
-	// Validate maximums.
-	if desired > 0 && maxUnavailable == 0 && maxSurge == 0 {
-		return fmt.Errorf("one of maxSurge or maxUnavailable must be specified")
-	}
-	// The minimum pods which must remain available throughout the update
-	// calculated for internal convenience.
-	minAvailable := int32(integer.IntMax(0, int(desired-maxUnavailable)))
-	// If the desired new scale is 0, then the max unavailable is necessarily
-	// the effective scale of the old RC regardless of the configuration
-	// (equivalent to 100% maxUnavailable).
-	if desired == 0 {
-		maxUnavailable = *oldRc.Spec.Replicas
-		minAvailable = 0
-	}
-
-	fmt.Fprintf(out, "Scaling up %s from %d to %d, scaling down %s from %d to 0 (keep %d pods available, don't exceed %d pods)\n",
-		newRc.Name, *newRc.Spec.Replicas, desired, oldRc.Name, *oldRc.Spec.Replicas, minAvailable, desired+maxSurge)
-
-	// give a caller incremental notification and allow them to exit early
-	goal := desired - *newRc.Spec.Replicas
-	if goal < 0 {
-		goal = -goal
-	}
-	progress := func(complete bool) error {
-		if config.OnProgress == nil {
-			return nil
-		}
-		progress := desired - *newRc.Spec.Replicas
-		if progress < 0 {
-			progress = -progress
-		}
-		percentage := 100
-		if !complete && goal > 0 {
-			percentage = int((goal - progress) * 100 / goal)
-		}
-		return config.OnProgress(oldRc, newRc, percentage)
-	}
-
-	// Scale newRc and oldRc until newRc has the desired number of replicas and
-	// oldRc has 0 replicas.
-	progressDeadline := time.Now().UnixNano() + config.Timeout.Nanoseconds()
-	for *newRc.Spec.Replicas != desired || *oldRc.Spec.Replicas != 0 {
-		// Store the existing replica counts for progress timeout tracking.
-		newReplicas := *newRc.Spec.Replicas
-		oldReplicas := *oldRc.Spec.Replicas
-
-		// Scale up as much as possible.
-		scaledRc, err := r.scaleUp(newRc, oldRc, desired, maxSurge, maxUnavailable, scaleRetryParams, config)
-		if err != nil {
-			return err
-		}
-		newRc = scaledRc
-
-		// notify the caller if necessary
-		if err := progress(false); err != nil {
-			return err
-		}
-
-		// Wait between scaling operations for things to settle.
-		time.Sleep(config.UpdatePeriod)
-
-		// Scale down as much as possible.
-		scaledRc, err = r.scaleDown(newRc, oldRc, desired, minAvailable, maxUnavailable, maxSurge, config)
-		if err != nil {
-			return err
-		}
-		oldRc = scaledRc
-
-		// notify the caller if necessary
-		if err := progress(false); err != nil {
-			return err
-		}
-
-		// If we are making progress, continue to advance the progress deadline.
-		// Otherwise, time out with an error.
-		progressMade := (*newRc.Spec.Replicas != newReplicas) || (*oldRc.Spec.Replicas != oldReplicas)
-		if progressMade {
-			progressDeadline = time.Now().UnixNano() + config.Timeout.Nanoseconds()
-		} else if time.Now().UnixNano() > progressDeadline {
-			return fmt.Errorf("timed out waiting for any update progress to be made")
-		}
-	}
-
-	// notify the caller if necessary
-	if err := progress(true); err != nil {
-		return err
-	}
-
-	// Housekeeping and cleanup policy execution.
-	return r.cleanup(oldRc, newRc, config)
-}
-
-// scaleUp scales up newRc to desired by whatever increment is possible given
-// the configured surge threshold. scaleUp will safely no-op as necessary when
-// it detects redundancy or other relevant conditions.
-func (r *RollingUpdater) scaleUp(newRc, oldRc *api.ReplicationController, desired, maxSurge, maxUnavailable int32, scaleRetryParams *kubectl.RetryParams, config *RollingUpdaterConfig) (*api.ReplicationController, error) {
-	// If we're already at the desired, do nothing.
-	if *newRc.Spec.Replicas == desired {
-		return newRc, nil
-	}
-
-	// Scale up as far as we can based on the surge limit.
-	increment := (desired + maxSurge) - (*oldRc.Spec.Replicas + *newRc.Spec.Replicas)
-	// If the old is already scaled down, go ahead and scale all the way up.
-	if *oldRc.Spec.Replicas == 0 {
-		increment = desired - *newRc.Spec.Replicas
-	}
-	// We can't scale up without violating the surge limit, so do nothing.
-	if increment <= 0 {
-		return newRc, nil
-	}
-	// Increase the replica count, and deal with fenceposts.
-	*newRc.Spec.Replicas += increment
-	if *newRc.Spec.Replicas > desired {
-		*newRc.Spec.Replicas = desired
-	}
-	// Perform the scale-up.
-	fmt.Fprintf(config.Out, "Scaling %s up to %d\n", newRc.Name, *newRc.Spec.Replicas)
-	scaledRc, err := r.scaleAndWait(newRc, scaleRetryParams, scaleRetryParams)
-	if err != nil {
-		return nil, err
-	}
-	return scaledRc, nil
-}
-
-// scaleDown scales down oldRc to 0 at whatever decrement possible given the
-// thresholds defined on the config. scaleDown will safely no-op as necessary
-// when it detects redundancy or other relevant conditions.
-func (r *RollingUpdater) scaleDown(newRc, oldRc *api.ReplicationController, desired, minAvailable, maxUnavailable, maxSurge int32, config *RollingUpdaterConfig) (*api.ReplicationController, error) {
-	// Already scaled down; do nothing.
-	if *oldRc.Spec.Replicas == 0 {
-		return oldRc, nil
-	}
-	// Get ready pods. We shouldn't block, otherwise in case both old and new
-	// pods are unavailable then the rolling update process blocks.
-	// Timeout-wise we are already covered by the progress check.
-	_, newAvailable, err := r.getReadyPods(oldRc, newRc, config.MinReadySeconds)
-	if err != nil {
-		return nil, err
-	}
-	// The old controller is considered as part of the total because we want to
-	// maintain minimum availability even with a volatile old controller.
-	// Scale down as much as possible while maintaining minimum availability
-	allPods := *oldRc.Spec.Replicas + *newRc.Spec.Replicas
-	newUnavailable := *newRc.Spec.Replicas - newAvailable
-	decrement := allPods - minAvailable - newUnavailable
-	// The decrement normally shouldn't drop below 0 because the available count
-	// always starts below the old replica count, but the old replica count can
-	// decrement due to externalities like pods death in the replica set. This
-	// will be considered a transient condition; do nothing and try again later
-	// with new readiness values.
-	//
-	// If the most we can scale is 0, it means we can't scale down without
-	// violating the minimum. Do nothing and try again later when conditions may
-	// have changed.
-	if decrement <= 0 {
-		return oldRc, nil
-	}
-	// Reduce the replica count, and deal with fenceposts.
-	*oldRc.Spec.Replicas -= decrement
-	if *oldRc.Spec.Replicas < 0 {
-		*oldRc.Spec.Replicas = 0
-	}
-	// If the new is already fully scaled and available up to the desired size, go
-	// ahead and scale old all the way down.
-	if *newRc.Spec.Replicas == desired && newAvailable == desired {
-		*oldRc.Spec.Replicas = 0
-	}
-	// Perform the scale-down.
-	fmt.Fprintf(config.Out, "Scaling %s down to %d\n", oldRc.Name, *oldRc.Spec.Replicas)
-	retryWait := &kubectl.RetryParams{Interval: config.Interval, Timeout: config.Timeout}
-	scaledRc, err := r.scaleAndWait(oldRc, retryWait, retryWait)
-	if err != nil {
-		return nil, err
-	}
-	return scaledRc, nil
-}
-
-// scalerScaleAndWait scales a controller using a Scaler and a real client.
-func (r *RollingUpdater) scaleAndWaitWithScaler(rc *api.ReplicationController, retry, wait *kubectl.RetryParams) (*api.ReplicationController, error) {
-	scaler := kubectl.NewScaler(r.scaleClient)
-	if err := scaler.Scale(rc.Namespace, rc.Name, uint(*rc.Spec.Replicas), &kubectl.ScalePrecondition{Size: -1}, retry, wait, schema.GroupResource{Resource: "replicationcontrollers"}); err != nil {
-		return nil, err
-	}
-	return r.rcClient.ReplicationControllers(rc.Namespace).Get(rc.Name, metav1.GetOptions{})
-}
-
-// readyPods returns the old and new ready counts for their pods.
-// If a pod is observed as being ready, it's considered ready even
-// if it later becomes notReady.
-func (r *RollingUpdater) readyPods(oldRc, newRc *api.ReplicationController, minReadySeconds int32) (int32, int32, error) {
-	controllers := []*api.ReplicationController{oldRc, newRc}
-	oldReady := int32(0)
-	newReady := int32(0)
-	if r.nowFn == nil {
-		r.nowFn = metav1.Now
-	}
-
-	for i := range controllers {
-		controller := controllers[i]
-		selector := labels.Set(controller.Spec.Selector).AsSelector()
-		options := metav1.ListOptions{LabelSelector: selector.String()}
-		pods, err := r.podClient.Pods(controller.Namespace).List(options)
-		if err != nil {
-			return 0, 0, err
-		}
-		for _, pod := range pods.Items {
-			// Do not count deleted pods as ready
-			if pod.DeletionTimestamp != nil {
-				continue
-			}
-			if !podutil.IsPodAvailable(&pod, minReadySeconds, r.nowFn()) {
-				continue
-			}
-			switch controller.Name {
-			case oldRc.Name:
-				oldReady++
-			case newRc.Name:
-				newReady++
-			}
-		}
-	}
-	return oldReady, newReady, nil
-}
-
-// getOrCreateTargetControllerWithClient looks for an existing controller with
-// sourceId. If found, the existing controller is returned with true
-// indicating that the controller already exists. If the controller isn't
-// found, a new one is created and returned along with false indicating the
-// controller was created.
-//
-// Existing controllers are validated to ensure their sourceIdAnnotation
-// matches sourceId; if there's a mismatch, an error is returned.
-func (r *RollingUpdater) getOrCreateTargetControllerWithClient(controller *api.ReplicationController, sourceId string) (*api.ReplicationController, bool, error) {
-	existingRc, err := r.existingController(controller)
-	if err != nil {
-		if !errors.IsNotFound(err) {
-			// There was an error trying to find the controller; don't assume we
-			// should create it.
-			return nil, false, err
-		}
-		if controller.Spec.Replicas == nil || *controller.Spec.Replicas <= 0 {
-			return nil, false, fmt.Errorf("Invalid controller spec for %s; required: > 0 replicas, actual: %d\n", controller.Name, controller.Spec.Replicas)
-		}
-		// The controller wasn't found, so create it.
-		if controller.Annotations == nil {
-			controller.Annotations = map[string]string{}
-		}
-		controller.Annotations[desiredReplicasAnnotation] = fmt.Sprintf("%d", controller.Spec.Replicas)
-		controller.Annotations[sourceIdAnnotation] = sourceId
-		zero := int32(0)
-		controller.Spec.Replicas = &zero
-		newRc, err := r.rcClient.ReplicationControllers(r.ns).Create(controller)
-		return newRc, false, err
-	}
-	// Validate and use the existing controller.
-	annotations := existingRc.Annotations
-	source := annotations[sourceIdAnnotation]
-	_, ok := annotations[desiredReplicasAnnotation]
-	if source != sourceId || !ok {
-		return nil, false, fmt.Errorf("missing/unexpected annotations for controller %s, expected %s : %s", controller.Name, sourceId, annotations)
-	}
-	return existingRc, true, nil
-}
-
-// existingController verifies if the controller already exists
-func (r *RollingUpdater) existingController(controller *api.ReplicationController) (*api.ReplicationController, error) {
-	// without rc name but generate name, there's no existing rc
-	if len(controller.Name) == 0 && len(controller.GenerateName) > 0 {
-		return nil, errors.NewNotFound(api.Resource("replicationcontrollers"), controller.Name)
-	}
-	// controller name is required to get rc back
-	return r.rcClient.ReplicationControllers(controller.Namespace).Get(controller.Name, metav1.GetOptions{})
-}
-
-// cleanupWithClients performs cleanup tasks after the rolling update. Update
-// process related annotations are removed from oldRc and newRc. The
-// CleanupPolicy on config is executed.
-func (r *RollingUpdater) cleanupWithClients(oldRc, newRc *api.ReplicationController, config *RollingUpdaterConfig) error {
-	// Clean up annotations
-	var err error
-	newRc, err = r.rcClient.ReplicationControllers(r.ns).Get(newRc.Name, metav1.GetOptions{})
-	if err != nil {
-		return err
-	}
-	applyUpdate := func(rc *api.ReplicationController) {
-		delete(rc.Annotations, sourceIdAnnotation)
-		delete(rc.Annotations, desiredReplicasAnnotation)
-	}
-	if newRc, err = updateRcWithRetries(r.rcClient, r.ns, newRc, applyUpdate); err != nil {
-		return err
-	}
-
-	if err = wait.Poll(config.Interval, config.Timeout, ControllerHasDesiredReplicas(r.rcClient, newRc)); err != nil {
-		return err
-	}
-	newRc, err = r.rcClient.ReplicationControllers(r.ns).Get(newRc.Name, metav1.GetOptions{})
-	if err != nil {
-		return err
-	}
-
-	switch config.CleanupPolicy {
-	case DeleteRollingUpdateCleanupPolicy:
-		// delete old rc
-		fmt.Fprintf(config.Out, "Update succeeded. Deleting %s\n", oldRc.Name)
-		return r.rcClient.ReplicationControllers(r.ns).Delete(oldRc.Name, nil)
-	case RenameRollingUpdateCleanupPolicy:
-		// delete old rc
-		fmt.Fprintf(config.Out, "Update succeeded. Deleting old controller: %s\n", oldRc.Name)
-		if err := r.rcClient.ReplicationControllers(r.ns).Delete(oldRc.Name, nil); err != nil {
-			return err
-		}
-		fmt.Fprintf(config.Out, "Renaming %s to %s\n", newRc.Name, oldRc.Name)
-		return Rename(r.rcClient, newRc, oldRc.Name)
-	case PreserveRollingUpdateCleanupPolicy:
-		return nil
-	default:
-		return nil
-	}
-}
-
-func Rename(c coreclient.ReplicationControllersGetter, rc *api.ReplicationController, newName string) error {
-	oldName := rc.Name
-	rc.Name = newName
-	rc.ResourceVersion = ""
-	// First delete the oldName RC and orphan its pods.
-	propagation := metav1.DeletePropagationOrphan
-	err := c.ReplicationControllers(rc.Namespace).Delete(oldName, &metav1.DeleteOptions{PropagationPolicy: &propagation})
-	if err != nil && !errors.IsNotFound(err) {
-		return err
-	}
-	err = wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
-		_, err := c.ReplicationControllers(rc.Namespace).Get(oldName, metav1.GetOptions{})
-		if err == nil {
-			return false, nil
-		} else if errors.IsNotFound(err) {
-			return true, nil
-		} else {
-			return false, err
-		}
-	})
-	if err != nil {
-		return err
-	}
-	// Then create the same RC with the new name.
-	_, err = c.ReplicationControllers(rc.Namespace).Create(rc)
-	return err
-}
-
-type NewControllerConfig struct {
-	Namespace        string
-	OldName, NewName string
-	Image            string
-	Container        string
-	DeploymentKey    string
-	PullPolicy       api.PullPolicy
-}
-
-type updateRcFunc func(controller *api.ReplicationController)
-
-// updateRcWithRetries retries updating the given rc on conflict with the following steps:
-// 1. Get latest resource
-// 2. applyUpdate
-// 3. Update the resource
-func updateRcWithRetries(rcClient coreclient.ReplicationControllersGetter, namespace string, rc *api.ReplicationController, applyUpdate updateRcFunc) (*api.ReplicationController, error) {
-	// Deep copy the rc in case we failed on Get during retry loop
-	oldRc := rc.DeepCopy()
-	err := retry.RetryOnConflict(retry.DefaultBackoff, func() (e error) {
-		// Apply the update, then attempt to push it to the apiserver.
-		applyUpdate(rc)
-		if rc, e = rcClient.ReplicationControllers(namespace).Update(rc); e == nil {
-			// rc contains the latest controller post update
-			return
-		}
-		updateErr := e
-		// Update the controller with the latest resource version, if the update failed we
-		// can't trust rc so use oldRc.Name.
-		if rc, e = rcClient.ReplicationControllers(namespace).Get(oldRc.Name, metav1.GetOptions{}); e != nil {
-			// The Get failed: Value in rc cannot be trusted.
-			rc = oldRc
-		}
-		// Only return the error from update
-		return updateErr
-	})
-	// If the error is non-nil the returned controller cannot be trusted, if it is nil, the returned
-	// controller contains the applied update.
-	return rc, err
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/deployer/strategy/support/acceptor.go b/vendor/github.com/openshift/oc/pkg/cli/deployer/strategy/support/acceptor.go
deleted file mode 100644
index 1106a63a7ed4..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/deployer/strategy/support/acceptor.go
+++ /dev/null
@@ -1,118 +0,0 @@
-package support
-
-import (
-	"context"
-	"fmt"
-	"io"
-	"time"
-
-	corev1 "k8s.io/api/core/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/fields"
-	"k8s.io/apimachinery/pkg/runtime"
-	"k8s.io/apimachinery/pkg/util/wait"
-	"k8s.io/apimachinery/pkg/watch"
-	corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
-	"k8s.io/client-go/tools/cache"
-	watchtools "k8s.io/client-go/tools/watch"
-)
-
-// NewAcceptAvailablePods makes a new acceptAvailablePods from a real client.
-func NewAcceptAvailablePods(
-	out io.Writer,
-	kclient corev1client.ReplicationControllersGetter,
-	timeout time.Duration,
-) *acceptAvailablePods {
-	return &acceptAvailablePods{
-		out:     out,
-		kclient: kclient,
-		timeout: timeout,
-	}
-}
-
-// acceptAvailablePods will accept a replication controller if all the pods
-// for the replication controller become available.
-type acceptAvailablePods struct {
-	out     io.Writer
-	kclient corev1client.ReplicationControllersGetter
-	// timeout is how long to wait for pods to become available from ready state.
-	timeout time.Duration
-}
-
-// Accept all pods for a replication controller once they are available.
-func (c *acceptAvailablePods) Accept(rc *corev1.ReplicationController) error {
-	allReplicasAvailable := func(r *corev1.ReplicationController) bool {
-		return r.Status.AvailableReplicas == *r.Spec.Replicas
-	}
-
-	if allReplicasAvailable(rc) {
-		return nil
-	}
-
-	fieldSelector := fields.OneTermEqualSelector("metadata.name", rc.Name).String()
-	lw := &cache.ListWatch{
-		ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
-			options.FieldSelector = fieldSelector
-			return c.kclient.ReplicationControllers(rc.Namespace).List(options)
-		},
-		WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
-			options.FieldSelector = fieldSelector
-			return c.kclient.ReplicationControllers(rc.Namespace).Watch(options)
-		},
-	}
-
-	preconditionFunc := func(store cache.Store) (bool, error) {
-		item, exists, err := store.Get(&metav1.ObjectMeta{Namespace: rc.Namespace, Name: rc.Name})
-		if err != nil {
-			return true, err
-		}
-		if !exists {
-			// We need to make sure we see the object in the cache before we start waiting for events
-			// or we would be waiting for the timeout if such object didn't exist.
-			return true, fmt.Errorf("%s '%s/%s' not found", corev1.Resource("replicationcontrollers"), rc.Namespace, rc.Name)
-		}
-
-		// Check that the objects UID match for cases of recreation
-		storeRc, ok := item.(*corev1.ReplicationController)
-		if !ok {
-			return true, fmt.Errorf("unexpected store item type: %#v", item)
-		}
-		if rc.UID != storeRc.UID {
-			return true, fmt.Errorf("%s '%s/%s' no longer exists, expected UID %q, got UID %q", corev1.Resource("replicationcontrollers"), rc.Namespace, rc.Name, rc.UID, storeRc.UID)
-		}
-
-		return false, nil
-	}
-
-	ctx, cancel := context.WithTimeout(context.Background(), c.timeout)
-	defer cancel()
-	_, err := watchtools.UntilWithSync(ctx, lw, &corev1.ReplicationController{}, preconditionFunc, func(event watch.Event) (bool, error) {
-		switch event.Type {
-		case watch.Added, watch.Modified:
-			newRc, ok := event.Object.(*corev1.ReplicationController)
-			if !ok {
-				return true, fmt.Errorf("unknown event object %#v", event.Object)
-			}
-			return allReplicasAvailable(newRc), nil
-
-		case watch.Deleted:
-			return true, fmt.Errorf("replicationController got deleted %#v", event.Object)
-
-		case watch.Error:
-			return true, fmt.Errorf("unexpected error %#v", event.Object)
-
-		default:
-			return true, fmt.Errorf("unexpected event type: %T", event.Type)
-		}
-	})
-	// Handle acceptance failure.
-	if err == wait.ErrWaitTimeout {
-		return fmt.Errorf("pods for rc '%s/%s' took longer than %.f seconds to become available", rc.Namespace, rc.Name, c.timeout.Seconds())
-	}
-
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/deployer/strategy/support/doc.go b/vendor/github.com/openshift/oc/pkg/cli/deployer/strategy/support/doc.go
deleted file mode 100644
index f9539ad474e2..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/deployer/strategy/support/doc.go
+++ /dev/null
@@ -1,2 +0,0 @@
-// Package support is a library of code useful to any strategy.
-package support
diff --git a/vendor/github.com/openshift/oc/pkg/cli/deployer/strategy/support/lifecycle.go b/vendor/github.com/openshift/oc/pkg/cli/deployer/strategy/support/lifecycle.go
deleted file mode 100644
index b679a9c5e836..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/deployer/strategy/support/lifecycle.go
+++ /dev/null
@@ -1,499 +0,0 @@
-package support
-
-import (
-	"context"
-	"fmt"
-	"io"
-	"strings"
-	"sync"
-	"time"
-
-	corev1 "k8s.io/api/core/v1"
-	apierrors "k8s.io/apimachinery/pkg/api/errors"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/fields"
-	"k8s.io/apimachinery/pkg/runtime"
-	utilerrors "k8s.io/apimachinery/pkg/util/errors"
-	"k8s.io/apimachinery/pkg/util/sets"
-	"k8s.io/apimachinery/pkg/watch"
-	"k8s.io/client-go/kubernetes"
-	corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
-	"k8s.io/client-go/tools/cache"
-	watchtools "k8s.io/client-go/tools/watch"
-	kapi "k8s.io/kubernetes/pkg/apis/core"
-
-	appsv1 "github.com/openshift/api/apps/v1"
-	imageapiv1 "github.com/openshift/api/image/v1"
-	imageclienttyped "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1"
-	"github.com/openshift/library-go/pkg/build/naming"
-
-	"github.com/openshift/library-go/pkg/apps/appsserialization"
-	"github.com/openshift/library-go/pkg/apps/appsutil"
-	strategyutil "github.com/openshift/oc/pkg/cli/deployer/strategy/util"
-)
-
-const (
-	// hookContainerName is the name used for the container that runs inside hook pods.
-	hookContainerName = "lifecycle"
-	// deploymentPodTypeLabel is a label with which contains a type of deployment pod.
-	deploymentPodTypeLabel = "openshift.io/deployer-pod.type"
-	// deploymentAnnotation is an annotation on a deployer Pod. The annotation value is the name
-	// of the deployment (a ReplicationController) on which the deployer Pod acts.
-	deploymentAnnotation = "openshift.io/deployment.name"
-)
-
-// HookExecutor knows how to execute a deployment lifecycle hook.
-type HookExecutor interface {
-	Execute(hook *appsv1.LifecycleHook, rc *corev1.ReplicationController, suffix, label string) error
-}
-
-// hookExecutor implements the HookExecutor interface.
-var _ HookExecutor = &hookExecutor{}
-
-// hookExecutor executes a deployment lifecycle hook.
-type hookExecutor struct {
-	// pods provides client to pods
-	pods corev1client.PodsGetter
-	// tags allows setting image stream tags
-	tags imageclienttyped.ImageStreamTagsGetter
-	// out is where hook pod logs should be written to.
-	out io.Writer
-	// recorder is used to emit events from hooks
-	events corev1client.EventsGetter
-	// getPodLogs knows how to get logs from a pod and is used for testing
-	getPodLogs func(*corev1.Pod) (io.ReadCloser, error)
-}
-
-// NewHookExecutor makes a HookExecutor from a client.
-func NewHookExecutor(kubeClient kubernetes.Interface, imageClient imageclienttyped.ImageStreamTagsGetter, out io.Writer) HookExecutor {
-	executor := &hookExecutor{
-		tags:   imageClient,
-		pods:   kubeClient.CoreV1(),
-		events: kubeClient.CoreV1(),
-		out:    out,
-	}
-	executor.getPodLogs = func(pod *corev1.Pod) (io.ReadCloser, error) {
-		opts := &corev1.PodLogOptions{
-			Container:  hookContainerName,
-			Follow:     true,
-			Timestamps: false,
-		}
-		return executor.pods.Pods(pod.Namespace).GetLogs(pod.Name, opts).Stream()
-	}
-	return executor
-}
-
-// Execute executes hook in the context of deployment. The suffix is used to
-// distinguish the kind of hook (e.g. pre, post).
-func (e *hookExecutor) Execute(hook *appsv1.LifecycleHook, rc *corev1.ReplicationController, suffix, label string) error {
-	var err error
-	switch {
-	case len(hook.TagImages) > 0:
-		tagEventMessages := []string{}
-		for _, t := range hook.TagImages {
-			image, ok := findContainerImage(rc, t.ContainerName)
-			if ok {
-				tagEventMessages = append(tagEventMessages, fmt.Sprintf("image %q as %q", image, t.To.Name))
-			}
-		}
-		strategyutil.RecordConfigEvent(e.events, rc, kapi.EventTypeNormal, "Started",
-			fmt.Sprintf("Running %s-hook (TagImages) %s for rc %s/%s", label, strings.Join(tagEventMessages, ","), rc.Namespace, rc.Name))
-		err = e.tagImages(hook, rc, suffix, label)
-	case hook.ExecNewPod != nil:
-		strategyutil.RecordConfigEvent(e.events, rc, kapi.EventTypeNormal, "Started",
-			fmt.Sprintf("Running %s-hook (%q) for rc %s/%s", label, strings.Join(hook.ExecNewPod.Command, " "), rc.Namespace, rc.Name))
-		err = e.executeExecNewPod(hook, rc, suffix, label)
-	}
-
-	if err == nil {
-		strategyutil.RecordConfigEvent(e.events, rc, kapi.EventTypeNormal, "Completed",
-			fmt.Sprintf("The %s-hook for rc %s/%s completed successfully", label, rc.Namespace, rc.Name))
-		return nil
-	}
-
-	// Retry failures are treated the same as Abort.
-	switch hook.FailurePolicy {
-	case appsv1.LifecycleHookFailurePolicyAbort, appsv1.LifecycleHookFailurePolicyRetry:
-		strategyutil.RecordConfigEvent(e.events, rc, kapi.EventTypeWarning, "Failed",
-			fmt.Sprintf("The %s-hook failed: %v, aborting rollout of %s/%s", label, err, rc.Namespace, rc.Name))
-		return fmt.Errorf("the %s hook failed: %v, aborting rollout of %s/%s", label, err, rc.Namespace, rc.Name)
-	case appsv1.LifecycleHookFailurePolicyIgnore:
-		strategyutil.RecordConfigEvent(e.events, rc, kapi.EventTypeWarning, "Failed",
-			fmt.Sprintf("The %s-hook failed: %v (ignore), rollout of %s/%s will continue", label, err, rc.Namespace, rc.Name))
-		return nil
-	default:
-		return err
-	}
-}
-
-// findContainerImage returns the image with the given container name from a replication controller.
-func findContainerImage(rc *corev1.ReplicationController, containerName string) (string, bool) {
-	if rc.Spec.Template == nil {
-		return "", false
-	}
-	for _, container := range rc.Spec.Template.Spec.Containers {
-		if container.Name == containerName {
-			return container.Image, true
-		}
-	}
-	return "", false
-}
-
-// tagImages tags images as part of the lifecycle of a rc. It uses an ImageStreamTag client
-// which will provision an ImageStream if it doesn't already exist.
-func (e *hookExecutor) tagImages(hook *appsv1.LifecycleHook, rc *corev1.ReplicationController, suffix, label string) error {
-	var errs []error
-	for _, action := range hook.TagImages {
-		value, ok := findContainerImage(rc, action.ContainerName)
-		if !ok {
-			errs = append(errs, fmt.Errorf("unable to find image for container %q, container could not be found", action.ContainerName))
-			continue
-		}
-		namespace := action.To.Namespace
-		if len(namespace) == 0 {
-			namespace = rc.Namespace
-		}
-		if _, err := e.tags.ImageStreamTags(namespace).Update(&imageapiv1.ImageStreamTag{
-			ObjectMeta: metav1.ObjectMeta{
-				Name:      action.To.Name,
-				Namespace: namespace,
-			},
-			Tag: &imageapiv1.TagReference{
-				From: &corev1.ObjectReference{
-					Kind: "DockerImage",
-					Name: value,
-				},
-			},
-		}); err != nil {
-			errs = append(errs, err)
-			continue
-		}
-		fmt.Fprintf(e.out, "--> %s: Tagged %q into %s/%s\n", label, value, action.To.Namespace, action.To.Name)
-	}
-
-	return utilerrors.NewAggregate(errs)
-}
-
-// executeExecNewPod executes a ExecNewPod hook by creating a new pod based on
-// the hook parameters and replication controller. The pod is then synchronously
-// watched until the pod completes, and if the pod failed, an error is returned.
-//
-// The hook pod inherits the following from the container the hook refers to:
-//
-//   * Environment (hook keys take precedence)
-//   * Working directory
-//   * Resources
-func (e *hookExecutor) executeExecNewPod(hook *appsv1.LifecycleHook, rc *corev1.ReplicationController, suffix, label string) error {
-	config, err := appsserialization.DecodeDeploymentConfig(rc)
-	if err != nil {
-		return err
-	}
-
-	deployerPod, err := e.pods.Pods(rc.Namespace).Get(appsutil.DeployerPodNameForDeployment(rc.Name), metav1.GetOptions{})
-	if err != nil {
-		return err
-	}
-	var startTime time.Time
-	// if the deployer pod has not yet had its status updated, it means the execution of the pod is racing with the kubelet
-	// status update. Until kubernetes/kubernetes#36813 is implemented, this check will remain racy. Set to Now() expecting
-	// that the kubelet is unlikely to be very far behind.
-	if deployerPod.Status.StartTime != nil {
-		startTime = deployerPod.Status.StartTime.Time
-	} else {
-		startTime = time.Now()
-	}
-
-	// Build a pod spec from the hook config and replication controller.
-	podSpec, err := createHookPodManifest(hook, rc, &config.Spec.Strategy, suffix, startTime)
-	if err != nil {
-		return err
-	}
-
-	// Track whether the pod has already run to completion and avoid showing logs
-	// or the Success message twice.
-	completed, created := false, false
-
-	// Try to create the pod.
-	pod, err := e.pods.Pods(rc.Namespace).Create(podSpec)
-	if err != nil {
-		if !apierrors.IsAlreadyExists(err) {
-			return fmt.Errorf("couldn't create lifecycle pod for %s: %v", rc.Name, err)
-		}
-		completed = true
-		pod = podSpec
-		pod.Namespace = rc.Namespace
-	} else {
-		created = true
-		fmt.Fprintf(e.out, "--> %s: Running hook pod ...\n", label)
-	}
-
-	var updatedPod *corev1.Pod
-	restarts := int32(0)
-	alreadyRead := false
-	wg := &sync.WaitGroup{}
-	wg.Add(1)
-
-	listWatcher := &cache.ListWatch{
-		ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
-			options.FieldSelector = fields.OneTermEqualSelector("metadata.name", pod.Name).String()
-			return e.pods.Pods(pod.Namespace).List(options)
-		},
-		WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
-			options.FieldSelector = fields.OneTermEqualSelector("metadata.name", pod.Name).String()
-			return e.pods.Pods(pod.Namespace).Watch(options)
-		},
-	}
-	// make sure that the pod exists and wasn't deleted early
-	preconditionFunc := func(store cache.Store) (bool, error) {
-		_, exists, err := store.Get(&metav1.ObjectMeta{Namespace: pod.Namespace, Name: pod.Name})
-		if err != nil {
-			return true, err
-		}
-		if !exists {
-			// We need to make sure we see the object in the cache before we start waiting for events
-			// or we would be waiting for the timeout if such object didn't exist.
-			return true, apierrors.NewNotFound(corev1.Resource("pods"), pod.Name)
-		}
-
-		return false, nil
-	}
-	// Wait for the hook pod to reach a terminal phase. Start reading logs as
-	// soon as the pod enters a usable phase.
-	_, err = watchtools.UntilWithSync(
-		context.TODO(),
-		listWatcher,
-		&corev1.Pod{},
-		preconditionFunc,
-		func(event watch.Event) (bool, error) {
-			switch event.Type {
-			case watch.Error:
-				return false, apierrors.FromObject(event.Object)
-			case watch.Added, watch.Modified:
-				updatedPod = event.Object.(*corev1.Pod)
-			case watch.Deleted:
-				err := fmt.Errorf("%s: pod/%s[%s] unexpectedly deleted", label, pod.Name, pod.Namespace)
-				fmt.Fprintf(e.out, "%v\n", err)
-				return false, err
-
-			}
-
-			switch updatedPod.Status.Phase {
-			case corev1.PodRunning:
-				completed = false
-
-				// We should read only the first time or in any container restart when we want to retry.
-				canRetry, restartCount := canRetryReading(updatedPod, restarts)
-				if alreadyRead && !canRetry {
-					break
-				}
-				// The hook container has restarted; we need to notify that we are retrying in the logs.
-				// TODO: Maybe log the container id
-				if restarts != restartCount {
-					wg.Add(1)
-					restarts = restartCount
-					fmt.Fprintf(e.out, "--> %s: Retrying hook pod (retry #%d)\n", label, restartCount)
-				}
-				alreadyRead = true
-				go e.readPodLogs(pod, wg)
-
-			case corev1.PodSucceeded, corev1.PodFailed:
-				if completed {
-					if updatedPod.Status.Phase == corev1.PodSucceeded {
-						fmt.Fprintf(e.out, "--> %s: Hook pod already succeeded\n", label)
-					}
-					wg.Done()
-					return true, nil
-				}
-				if !created {
-					fmt.Fprintf(e.out, "--> %s: Hook pod is already running ...\n", label)
-				}
-				if !alreadyRead {
-					go e.readPodLogs(pod, wg)
-				}
-				return true, nil
-			default:
-				completed = false
-			}
-
-			return false, nil
-		},
-	)
-	if err != nil {
-		return err
-	}
-
-	// The pod is finished, wait for all logs to be consumed before returning.
-	wg.Wait()
-	if updatedPod.Status.Phase == corev1.PodFailed {
-		fmt.Fprintf(e.out, "--> %s: Failed\n", label)
-		return fmt.Errorf(updatedPod.Status.Message)
-	}
-	// Only show this message if we created the pod ourselves, or we saw
-	// the pod in a running or pending state.
-	if !completed {
-		fmt.Fprintf(e.out, "--> %s: Success\n", label)
-	}
-	return nil
-}
-
-// readPodLogs streams logs from pod to out. It signals wg when
-// done.
-func (e *hookExecutor) readPodLogs(pod *corev1.Pod, wg *sync.WaitGroup) {
-	defer wg.Done()
-	logStream, err := e.getPodLogs(pod)
-	if err != nil || logStream == nil {
-		fmt.Fprintf(e.out, "warning: Unable to retrieve hook logs from %s: %v\n", pod.Name, err)
-		return
-	}
-	// Read logs.
-	defer logStream.Close()
-	if _, err := io.Copy(e.out, logStream); err != nil {
-		fmt.Fprintf(e.out, "\nwarning: Unable to read all logs from %s, continuing: %v\n", pod.Name, err)
-	}
-}
-
-func createHookPodManifest(hook *appsv1.LifecycleHook, rc *corev1.ReplicationController, strategy *appsv1.DeploymentStrategy,
-	hookType string,
-	startTime time.Time) (*corev1.Pod, error) {
-
-	exec := hook.ExecNewPod
-
-	var baseContainer *corev1.Container
-
-	for _, container := range rc.Spec.Template.Spec.Containers {
-		if container.Name == exec.ContainerName {
-			baseContainer = &container
-			break
-		}
-	}
-	if baseContainer == nil {
-		return nil, fmt.Errorf("no container named '%s' found in rc template", exec.ContainerName)
-	}
-
-	// Build a merged environment; hook environment takes precedence over base
-	// container environment
-	envMap := map[string]corev1.EnvVar{}
-	mergedEnv := []corev1.EnvVar{}
-	for _, env := range baseContainer.Env {
-		envMap[env.Name] = env
-	}
-	for _, env := range exec.Env {
-		envMap[env.Name] = env
-	}
-	for k, v := range envMap {
-		mergedEnv = append(mergedEnv, corev1.EnvVar{Name: k, Value: v.Value, ValueFrom: v.ValueFrom})
-	}
-	mergedEnv = append(mergedEnv, corev1.EnvVar{Name: "OPENSHIFT_DEPLOYMENT_NAME", Value: rc.Name})
-	mergedEnv = append(mergedEnv, corev1.EnvVar{Name: "OPENSHIFT_DEPLOYMENT_NAMESPACE", Value: rc.Namespace})
-
-	// Assigning to a variable since its address is required
-	defaultActiveDeadline := appsutil.MaxDeploymentDurationSeconds
-	if strategy.ActiveDeadlineSeconds != nil {
-		defaultActiveDeadline = *(strategy.ActiveDeadlineSeconds)
-	}
-	maxDeploymentDurationSeconds := defaultActiveDeadline - int64(time.Since(startTime).Seconds())
-
-	// Let the kubelet manage retries if requested
-	restartPolicy := corev1.RestartPolicyNever
-	if hook.FailurePolicy == appsv1.LifecycleHookFailurePolicyRetry {
-		restartPolicy = corev1.RestartPolicyOnFailure
-	}
-
-	// Transfer any requested volumes to the hook pod.
-	volumes := []corev1.Volume{}
-	volumeNames := sets.NewString()
-	for _, volume := range rc.Spec.Template.Spec.Volumes {
-		for _, name := range exec.Volumes {
-			if volume.Name == name {
-				volumes = append(volumes, volume)
-				volumeNames.Insert(volume.Name)
-			}
-		}
-	}
-	// Transfer any volume mounts associated with transferred volumes.
-	volumeMounts := []corev1.VolumeMount{}
-	for _, mount := range baseContainer.VolumeMounts {
-		if volumeNames.Has(mount.Name) {
-			volumeMounts = append(volumeMounts, corev1.VolumeMount{
-				Name:      mount.Name,
-				ReadOnly:  mount.ReadOnly,
-				MountPath: mount.MountPath,
-				SubPath:   mount.SubPath,
-			})
-		}
-	}
-
-	// Transfer image pull secrets from the pod spec.
-	imagePullSecrets := []corev1.LocalObjectReference{}
-	for _, pullSecret := range rc.Spec.Template.Spec.ImagePullSecrets {
-		imagePullSecrets = append(imagePullSecrets, corev1.LocalObjectReference{Name: pullSecret.Name})
-	}
-
-	gracePeriod := int64(10)
-	podSecurityContextCopy := rc.Spec.Template.Spec.SecurityContext.DeepCopy()
-	securityContextCopy := baseContainer.SecurityContext.DeepCopy()
-
-	pod := &corev1.Pod{
-		ObjectMeta: metav1.ObjectMeta{
-			Name:      naming.GetPodName(rc.Name, hookType),
-			Namespace: rc.Namespace,
-			Annotations: map[string]string{
-				deploymentAnnotation: rc.Name,
-			},
-			Labels: map[string]string{
-				appsv1.DeployerPodForDeploymentLabel: rc.Name,
-				deploymentPodTypeLabel:               hookType,
-			},
-		},
-		Spec: corev1.PodSpec{
-			Containers: []corev1.Container{
-				{
-					Name:            hookContainerName,
-					Image:           baseContainer.Image,
-					ImagePullPolicy: baseContainer.ImagePullPolicy,
-					Command:         exec.Command,
-					WorkingDir:      baseContainer.WorkingDir,
-					Env:             mergedEnv,
-					Resources:       baseContainer.Resources,
-					VolumeMounts:    volumeMounts,
-					SecurityContext: securityContextCopy,
-				},
-			},
-			SecurityContext:       podSecurityContextCopy,
-			Volumes:               volumes,
-			ActiveDeadlineSeconds: &maxDeploymentDurationSeconds,
-			// Setting the node selector on the hook pod so that it is created
-			// on the same set of nodes as the rc pods.
-			NodeSelector:                  rc.Spec.Template.Spec.NodeSelector,
-			RestartPolicy:                 restartPolicy,
-			ImagePullSecrets:              imagePullSecrets,
-			TerminationGracePeriodSeconds: &gracePeriod,
-		},
-	}
-
-	// add in DC specified labels and annotations
-	for k, v := range strategy.Labels {
-		if _, ok := pod.Labels[k]; ok {
-			continue
-		}
-		pod.Labels[k] = v
-	}
-	for k, v := range strategy.Annotations {
-		if _, ok := pod.Annotations[k]; ok {
-			continue
-		}
-		pod.Annotations[k] = v
-	}
-
-	return pod, nil
-}
-
-// canRetryReading returns whether the deployment strategy can retry reading logs
-// from the given (hook) pod and the number of restarts that pod has.
-func canRetryReading(pod *corev1.Pod, restarts int32) (bool, int32) {
-	if len(pod.Status.ContainerStatuses) == 0 {
-		return false, int32(0)
-	}
-	restartCount := pod.Status.ContainerStatuses[0].RestartCount
-	return pod.Spec.RestartPolicy == corev1.RestartPolicyOnFailure && restartCount > restarts, restartCount
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/deployer/strategy/support/lifecycle_test.go b/vendor/github.com/openshift/oc/pkg/cli/deployer/strategy/support/lifecycle_test.go
deleted file mode 100644
index 9850275b4383..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/deployer/strategy/support/lifecycle_test.go
+++ /dev/null
@@ -1,639 +0,0 @@
-package support
-
-import (
-	"bytes"
-	"errors"
-	"io"
-	"io/ioutil"
-	"reflect"
-	"sort"
-	"strings"
-	"testing"
-	"time"
-
-	"github.com/openshift/library-go/pkg/build/naming"
-
-	corev1 "k8s.io/api/core/v1"
-	"k8s.io/apimachinery/pkg/api/resource"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/runtime"
-	"k8s.io/apimachinery/pkg/util/diff"
-	"k8s.io/apimachinery/pkg/watch"
-	"k8s.io/client-go/kubernetes/fake"
-	clientgotesting "k8s.io/client-go/testing"
-	kapihelper "k8s.io/kubernetes/pkg/apis/core/helper"
-
-	appsv1 "github.com/openshift/api/apps/v1"
-
-	"github.com/openshift/library-go/pkg/apps/appsutil"
-	"github.com/openshift/oc/pkg/cli/deployer/strategy/util/appstest"
-)
-
-func nowFunc() *metav1.Time {
-	return &metav1.Time{Time: time.Now().Add(-5 * time.Second)}
-}
-
-func newTestClient(config *appsv1.DeploymentConfig) *fake.Clientset {
-	client := &fake.Clientset{}
-	// when creating a lifecycle pod, we query the deployer pod for the start time to
-	// calculate the active deadline seconds for the lifecycle pod.
-	client.AddReactor("get", "pods", func(a clientgotesting.Action) (handled bool, ret runtime.Object, err error) {
-		action := a.(clientgotesting.GetAction)
-		if strings.HasPrefix(action.GetName(), config.Name) && strings.HasSuffix(action.GetName(), "-deploy") {
-			return true, &corev1.Pod{
-				ObjectMeta: metav1.ObjectMeta{
-					Name: "deployer",
-				},
-				Status: corev1.PodStatus{
-					StartTime: nowFunc(),
-				},
-			}, nil
-		}
-		return true, nil, nil
-	})
-	return client
-}
-
-func TestHookExecutor_executeExecNewCreatePodFailure(t *testing.T) {
-	hook := &appsv1.LifecycleHook{
-		FailurePolicy: appsv1.LifecycleHookFailurePolicyAbort,
-		ExecNewPod: &appsv1.ExecNewPodHook{
-			ContainerName: "container1",
-		},
-	}
-	dc := appstest.OkDeploymentConfig(1)
-	deployment, _ := appsutil.MakeDeployment(dc)
-	client := newTestClient(dc)
-	client.AddReactor("create", "pods", func(a clientgotesting.Action) (handled bool, ret runtime.Object, err error) {
-		return true, nil, errors.New("could not create the pod")
-	})
-	executor := &hookExecutor{
-		pods: client.CoreV1(),
-	}
-
-	if err := executor.executeExecNewPod(hook, deployment, "hook", "test"); err == nil {
-		t.Fatalf("expected an error")
-	}
-}
-
-func TestHookExecutor_executeExecNewPodSucceeded(t *testing.T) {
-	hook := &appsv1.LifecycleHook{
-		FailurePolicy: appsv1.LifecycleHookFailurePolicyAbort,
-		ExecNewPod: &appsv1.ExecNewPodHook{
-			ContainerName: "container1",
-		},
-	}
-
-	config := appstest.OkDeploymentConfig(1)
-	deployment, _ := appsutil.MakeDeployment(config)
-	deployment.Spec.Template.Spec.NodeSelector = map[string]string{"labelKey1": "labelValue1", "labelKey2": "labelValue2"}
-
-	client := newTestClient(config)
-	podCreated := make(chan struct{})
-
-	var createdPod *corev1.Pod
-	client.AddReactor("create", "pods", func(a clientgotesting.Action) (handled bool, ret runtime.Object, err error) {
-		defer close(podCreated)
-		action := a.(clientgotesting.CreateAction)
-		object := action.GetObject()
-		createdPod = object.(*corev1.Pod)
-		return true, createdPod, nil
-	})
-	podsWatch := watch.NewFake()
-	client.AddWatchReactor("pods", clientgotesting.DefaultWatchReactor(podsWatch, nil))
-
-	podLogs := &bytes.Buffer{}
-	// Simulate creation of the lifecycle pod
-	go func() {
-		<-podCreated
-		podsWatch.Add(createdPod)
-		updatedPod := createdPod.DeepCopy()
-		updatedPod.Status.Phase = corev1.PodSucceeded
-		podsWatch.Modify(updatedPod)
-	}()
-
-	executor := &hookExecutor{
-		pods: client.CoreV1(),
-		out:  podLogs,
-		getPodLogs: func(*corev1.Pod) (io.ReadCloser, error) {
-			return ioutil.NopCloser(strings.NewReader("test")), nil
-		},
-	}
-
-	err := executor.executeExecNewPod(hook, deployment, "hook", "test")
-
-	if err != nil {
-		t.Fatalf("unexpected error: %s", err)
-	}
-
-	if e, a := "--> test: Running hook pod ...\ntest--> test: Success\n", podLogs.String(); e != a {
-		t.Fatalf("expected pod logs to be %q, got %q", e, a)
-	}
-
-	if e, a := deployment.Spec.Template.Spec.NodeSelector, createdPod.Spec.NodeSelector; !reflect.DeepEqual(e, a) {
-		t.Fatalf("expected pod NodeSelector %v, got %v", e, a)
-	}
-
-	if createdPod.Spec.ActiveDeadlineSeconds == nil {
-		t.Fatalf("expected ActiveDeadlineSeconds to be set on the deployment hook executor pod")
-	}
-
-	if *createdPod.Spec.ActiveDeadlineSeconds >= appsutil.MaxDeploymentDurationSeconds {
-		t.Fatalf("expected ActiveDeadlineSeconds %+v to be lower than %+v", *createdPod.Spec.ActiveDeadlineSeconds, appsutil.MaxDeploymentDurationSeconds)
-	}
-}
-
-func TestHookExecutor_executeExecNewPodFailed(t *testing.T) {
-	hook := &appsv1.LifecycleHook{
-		FailurePolicy: appsv1.LifecycleHookFailurePolicyAbort,
-		ExecNewPod: &appsv1.ExecNewPodHook{
-			ContainerName: "container1",
-		},
-	}
-
-	config := appstest.OkDeploymentConfig(1)
-	deployment, _ := appsutil.MakeDeployment(config)
-
-	client := newTestClient(config)
-	podCreated := make(chan struct{})
-
-	var createdPod *corev1.Pod
-	client.AddReactor("create", "pods", func(a clientgotesting.Action) (handled bool, ret runtime.Object, err error) {
-		defer close(podCreated)
-		action := a.(clientgotesting.CreateAction)
-		object := action.GetObject()
-		createdPod = object.(*corev1.Pod)
-		return true, createdPod, nil
-	})
-	podsWatch := watch.NewFake()
-	client.AddWatchReactor("pods", clientgotesting.DefaultWatchReactor(podsWatch, nil))
-
-	go func() {
-		<-podCreated
-		podsWatch.Add(createdPod)
-		updatedPod := createdPod.DeepCopy()
-		updatedPod.Status.Phase = corev1.PodFailed
-		podsWatch.Modify(updatedPod)
-	}()
-
-	executor := &hookExecutor{
-		pods: client.CoreV1(),
-		out:  ioutil.Discard,
-		getPodLogs: func(*corev1.Pod) (io.ReadCloser, error) {
-			return ioutil.NopCloser(strings.NewReader("test")), nil
-		},
-	}
-
-	err := executor.executeExecNewPod(hook, deployment, "hook", "test")
-	if err == nil {
-		t.Fatalf("expected an error, got none")
-	}
-	t.Logf("got expected error: %T", err)
-}
-
-func TestHookExecutor_makeHookPodInvalidContainerRef(t *testing.T) {
-	hook := &appsv1.LifecycleHook{
-		FailurePolicy: appsv1.LifecycleHookFailurePolicyAbort,
-		ExecNewPod: &appsv1.ExecNewPodHook{
-			ContainerName: "undefined",
-		},
-	}
-
-	config := appstest.OkDeploymentConfig(1)
-	strategy := appsv1.DeploymentStrategy{
-		Type:           appsv1.DeploymentStrategyTypeRecreate,
-		RecreateParams: &appsv1.RecreateDeploymentStrategyParams{},
-	}
-	deployment, _ := appsutil.MakeDeployment(config)
-
-	_, err := createHookPodManifest(hook, deployment, &strategy, "hook", nowFunc().Time)
-	if err == nil {
-		t.Fatalf("expected an error")
-	}
-}
-
-func TestHookExecutor_makeHookPod(t *testing.T) {
-	deploymentName := "deployment-1"
-	deploymentNamespace := "test"
-	maxDeploymentDurationSeconds := appsutil.MaxDeploymentDurationSeconds
-	gracePeriod := int64(10)
-
-	tests := []struct {
-		name                string
-		hook                *appsv1.LifecycleHook
-		expected            *corev1.Pod
-		strategyLabels      map[string]string
-		strategyAnnotations map[string]string
-	}{
-		{
-			name: "overrides",
-			hook: &appsv1.LifecycleHook{
-				FailurePolicy: appsv1.LifecycleHookFailurePolicyAbort,
-				ExecNewPod: &appsv1.ExecNewPodHook{
-					ContainerName: "container1",
-					Command:       []string{"overridden"},
-					Env: []corev1.EnvVar{
-						{
-							Name:  "name",
-							Value: "value",
-						},
-						{
-							Name:  "ENV1",
-							Value: "overridden",
-						},
-					},
-					Volumes: []string{"volume-2"},
-				},
-			},
-			expected: &corev1.Pod{
-				ObjectMeta: metav1.ObjectMeta{
-					Name:      naming.GetPodName(deploymentName, "hook"),
-					Namespace: "test",
-					Labels: map[string]string{
-						appsv1.DeployerPodForDeploymentLabel: deploymentName,
-						deploymentPodTypeLabel:               "hook",
-					},
-					Annotations: map[string]string{
-						appsv1.DeploymentAnnotation: deploymentName,
-					},
-				},
-				Spec: corev1.PodSpec{
-					RestartPolicy: corev1.RestartPolicyNever,
-					Volumes: []corev1.Volume{
-						{
-							Name:         "volume-2",
-							VolumeSource: corev1.VolumeSource{},
-						},
-					},
-					ActiveDeadlineSeconds: &maxDeploymentDurationSeconds,
-					Containers: []corev1.Container{
-						{
-							Name:    "lifecycle",
-							Image:   "registry:8080/repo1:ref1",
-							Command: []string{"overridden"},
-							Env: []corev1.EnvVar{
-								{
-									Name:  "name",
-									Value: "value",
-								},
-								{
-									Name:  "ENV1",
-									Value: "overridden",
-								},
-								{
-									Name:  "OPENSHIFT_DEPLOYMENT_NAME",
-									Value: deploymentName,
-								},
-								{
-									Name:  "OPENSHIFT_DEPLOYMENT_NAMESPACE",
-									Value: deploymentNamespace,
-								},
-							},
-							ImagePullPolicy: corev1.PullIfNotPresent,
-							Resources: corev1.ResourceRequirements{
-								Limits: corev1.ResourceList{
-									corev1.ResourceCPU:    resource.MustParse("10"),
-									corev1.ResourceMemory: resource.MustParse("10M"),
-								},
-							},
-							VolumeMounts: []corev1.VolumeMount{
-								{
-									Name:      "volume-2",
-									ReadOnly:  true,
-									MountPath: "/mnt/volume-2",
-								},
-							},
-						},
-					},
-					TerminationGracePeriodSeconds: &gracePeriod,
-					ImagePullSecrets: []corev1.LocalObjectReference{
-						{
-							Name: "secret-1",
-						},
-					},
-				},
-			},
-		},
-		{
-			name: "no overrides",
-			hook: &appsv1.LifecycleHook{
-				FailurePolicy: appsv1.LifecycleHookFailurePolicyAbort,
-				ExecNewPod: &appsv1.ExecNewPodHook{
-					ContainerName: "container1",
-				},
-			},
-			expected: &corev1.Pod{
-				ObjectMeta: metav1.ObjectMeta{
-					Name:      naming.GetPodName(deploymentName, "hook"),
-					Namespace: "test",
-					Labels: map[string]string{
-						"openshift.io/deployer-pod.type":     "hook",
-						appsv1.DeployerPodForDeploymentLabel: deploymentName,
-					},
-					Annotations: map[string]string{
-						appsv1.DeploymentAnnotation: deploymentName,
-					},
-				},
-				Spec: corev1.PodSpec{
-					RestartPolicy:         corev1.RestartPolicyNever,
-					ActiveDeadlineSeconds: &maxDeploymentDurationSeconds,
-					Volumes:               []corev1.Volume{},
-					Containers: []corev1.Container{
-						{
-							Name:  "lifecycle",
-							Image: "registry:8080/repo1:ref1",
-							Env: []corev1.EnvVar{
-								{
-									Name:  "ENV1",
-									Value: "VAL1",
-								},
-								{
-									Name:  "OPENSHIFT_DEPLOYMENT_NAME",
-									Value: deploymentName,
-								},
-								{
-									Name:  "OPENSHIFT_DEPLOYMENT_NAMESPACE",
-									Value: deploymentNamespace,
-								},
-							},
-							ImagePullPolicy: corev1.PullIfNotPresent,
-							VolumeMounts:    []corev1.VolumeMount{},
-							Resources: corev1.ResourceRequirements{
-								Limits: corev1.ResourceList{
-									corev1.ResourceCPU:    resource.MustParse("10"),
-									corev1.ResourceMemory: resource.MustParse("10M"),
-								},
-							},
-						},
-					},
-					TerminationGracePeriodSeconds: &gracePeriod,
-					ImagePullSecrets: []corev1.LocalObjectReference{
-						{
-							Name: "secret-1",
-						},
-					},
-				},
-			},
-		},
-		{
-			name: "labels and annotations",
-			hook: &appsv1.LifecycleHook{
-				FailurePolicy: appsv1.LifecycleHookFailurePolicyAbort,
-				ExecNewPod: &appsv1.ExecNewPodHook{
-					ContainerName: "container1",
-				},
-			},
-			expected: &corev1.Pod{
-				ObjectMeta: metav1.ObjectMeta{
-					Name:      naming.GetPodName(deploymentName, "hook"),
-					Namespace: "test",
-					Labels: map[string]string{
-						"openshift.io/deployer-pod.type":     "hook",
-						appsv1.DeployerPodForDeploymentLabel: deploymentName,
-						"label1":                             "value1",
-					},
-					Annotations: map[string]string{
-						appsv1.DeploymentAnnotation: deploymentName,
-						"annotation2":               "value2",
-					},
-				},
-				Spec: corev1.PodSpec{
-					RestartPolicy:         corev1.RestartPolicyNever,
-					ActiveDeadlineSeconds: &maxDeploymentDurationSeconds,
-					Volumes:               []corev1.Volume{},
-					Containers: []corev1.Container{
-						{
-							Name:  "lifecycle",
-							Image: "registry:8080/repo1:ref1",
-							Env: []corev1.EnvVar{
-								{
-									Name:  "ENV1",
-									Value: "VAL1",
-								},
-								{
-									Name:  "OPENSHIFT_DEPLOYMENT_NAME",
-									Value: deploymentName,
-								},
-								{
-									Name:  "OPENSHIFT_DEPLOYMENT_NAMESPACE",
-									Value: deploymentNamespace,
-								},
-							},
-							ImagePullPolicy: corev1.PullIfNotPresent,
-							VolumeMounts:    []corev1.VolumeMount{},
-							Resources: corev1.ResourceRequirements{
-								Limits: corev1.ResourceList{
-									corev1.ResourceCPU:    resource.MustParse("10"),
-									corev1.ResourceMemory: resource.MustParse("10M"),
-								},
-							},
-						},
-					},
-					TerminationGracePeriodSeconds: &gracePeriod,
-					ImagePullSecrets: []corev1.LocalObjectReference{
-						{
-							Name: "secret-1",
-						},
-					},
-				},
-			},
-			strategyLabels: map[string]string{
-				appsv1.DeployerPodForDeploymentLabel: "ignoredValue",
-				"label1":                             "value1",
-			},
-			strategyAnnotations: map[string]string{"annotation2": "value2"},
-		},
-		{
-			name: "allways pull image",
-			hook: &appsv1.LifecycleHook{
-				FailurePolicy: appsv1.LifecycleHookFailurePolicyAbort,
-				ExecNewPod: &appsv1.ExecNewPodHook{
-					ContainerName: "container2",
-				},
-			},
-			expected: &corev1.Pod{
-				ObjectMeta: metav1.ObjectMeta{
-					Name:      naming.GetPodName(deploymentName, "hook"),
-					Namespace: "test",
-					Labels: map[string]string{
-						deploymentPodTypeLabel:               "hook",
-						appsv1.DeployerPodForDeploymentLabel: deploymentName,
-					},
-					Annotations: map[string]string{
-						appsv1.DeploymentAnnotation: deploymentName,
-					},
-				},
-				Spec: corev1.PodSpec{
-					RestartPolicy:         corev1.RestartPolicyNever,
-					ActiveDeadlineSeconds: &maxDeploymentDurationSeconds,
-					Volumes:               []corev1.Volume{},
-					Containers: []corev1.Container{
-						{
-							Name:  "lifecycle",
-							Image: "registry:8080/repo1:ref2",
-							Env: []corev1.EnvVar{
-								{
-									Name:  "OPENSHIFT_DEPLOYMENT_NAME",
-									Value: deploymentName,
-								},
-								{
-									Name:  "OPENSHIFT_DEPLOYMENT_NAMESPACE",
-									Value: deploymentNamespace,
-								},
-							},
-							ImagePullPolicy: corev1.PullAlways,
-							VolumeMounts:    []corev1.VolumeMount{},
-						},
-					},
-					TerminationGracePeriodSeconds: &gracePeriod,
-					ImagePullSecrets: []corev1.LocalObjectReference{
-						{
-							Name: "secret-1",
-						},
-					},
-				},
-			},
-		},
-	}
-
-	for _, test := range tests {
-		t.Logf("evaluating test: %s", test.name)
-		config, deployment := deployment("deployment", "test", test.strategyLabels, test.strategyAnnotations)
-		newStrategy := config.Spec.Strategy
-		pod, err := createHookPodManifest(test.hook, deployment, &newStrategy, "hook", nowFunc().Time)
-		if err != nil {
-			t.Fatalf("unexpected error: %s", err)
-		}
-		for _, c := range pod.Spec.Containers {
-			sort.Sort(envByNameAsc(c.Env))
-		}
-		for _, c := range test.expected.Spec.Containers {
-			sort.Sort(envByNameAsc(c.Env))
-		}
-
-		if *pod.Spec.ActiveDeadlineSeconds >= *test.expected.Spec.ActiveDeadlineSeconds {
-			t.Errorf("expected pod ActiveDeadlineSeconds %+v to be lower than %+v", *pod.Spec.ActiveDeadlineSeconds, *test.expected.Spec.ActiveDeadlineSeconds)
-		}
-		// Copy the ActiveDeadlineSeconds the deployer pod is running for 5 seconds already
-		test.expected.Spec.ActiveDeadlineSeconds = pod.Spec.ActiveDeadlineSeconds
-		if !kapihelper.Semantic.DeepEqual(pod, test.expected) {
-			t.Errorf("unexpected pod diff: %v", diff.ObjectReflectDiff(pod, test.expected))
-		}
-	}
-}
-
-func TestHookExecutor_makeHookPodRestart(t *testing.T) {
-	hook := &appsv1.LifecycleHook{
-		FailurePolicy: appsv1.LifecycleHookFailurePolicyRetry,
-		ExecNewPod: &appsv1.ExecNewPodHook{
-			ContainerName: "container1",
-		},
-	}
-
-	config := appstest.OkDeploymentConfig(1)
-	deployment, _ := appsutil.MakeDeployment(config)
-	newStrategy := config.Spec.Strategy
-	pod, err := createHookPodManifest(hook, deployment, &newStrategy, "hook", nowFunc().Time)
-	if err != nil {
-		t.Fatalf("unexpected error: %s", err)
-	}
-
-	if e, a := corev1.RestartPolicyOnFailure, pod.Spec.RestartPolicy; string(e) != string(a) {
-		t.Errorf("expected pod restart policy %s, got %s", e, a)
-	}
-}
-
-func deployment(name, namespace string, strategyLabels, strategyAnnotations map[string]string) (*appsv1.DeploymentConfig, *corev1.ReplicationController) {
-	config := &appsv1.DeploymentConfig{
-		ObjectMeta: metav1.ObjectMeta{
-			Name:      name,
-			Namespace: namespace,
-		},
-		Status: appsv1.DeploymentConfigStatus{
-			LatestVersion: 1,
-		},
-		Spec: appsv1.DeploymentConfigSpec{
-			Replicas: 1,
-			Selector: map[string]string{"a": "b"},
-			Strategy: appsv1.DeploymentStrategy{
-				Type: appsv1.DeploymentStrategyTypeRecreate,
-				Resources: corev1.ResourceRequirements{
-					Limits: corev1.ResourceList{
-						corev1.ResourceName(corev1.ResourceCPU):    resource.MustParse("10"),
-						corev1.ResourceName(corev1.ResourceMemory): resource.MustParse("10G"),
-					},
-				},
-				Labels:      strategyLabels,
-				Annotations: strategyAnnotations,
-			},
-			Template: &corev1.PodTemplateSpec{
-				Spec: corev1.PodSpec{
-					Containers: []corev1.Container{
-						{
-							Name:  "container1",
-							Image: "registry:8080/repo1:ref1",
-							Env: []corev1.EnvVar{
-								{
-									Name:  "ENV1",
-									Value: "VAL1",
-								},
-							},
-							ImagePullPolicy: corev1.PullIfNotPresent,
-							Resources: corev1.ResourceRequirements{
-								Limits: corev1.ResourceList{
-									corev1.ResourceCPU:    resource.MustParse("10"),
-									corev1.ResourceMemory: resource.MustParse("10M"),
-								},
-							},
-							VolumeMounts: []corev1.VolumeMount{
-								{
-									Name:      "volume-2",
-									ReadOnly:  true,
-									MountPath: "/mnt/volume-2",
-								},
-							},
-						},
-						{
-							Name:            "container2",
-							Image:           "registry:8080/repo1:ref2",
-							ImagePullPolicy: corev1.PullAlways,
-						},
-					},
-					Volumes: []corev1.Volume{
-						{
-							Name: "volume-1",
-						},
-						{
-							Name: "volume-2",
-						},
-					},
-					RestartPolicy: corev1.RestartPolicyAlways,
-					DNSPolicy:     corev1.DNSClusterFirst,
-					ImagePullSecrets: []corev1.LocalObjectReference{
-						{
-							Name: "secret-1",
-						},
-					},
-				},
-				ObjectMeta: metav1.ObjectMeta{
-					Labels: map[string]string{"a": "b"},
-				},
-			},
-		},
-	}
-	deployment, _ := appsutil.MakeDeployment(config)
-	deployment.Namespace = namespace
-	return config, deployment
-}
-
-type envByNameAsc []corev1.EnvVar
-
-func (a envByNameAsc) Len() int {
-	return len(a)
-}
-func (a envByNameAsc) Swap(i, j int) {
-	a[i], a[j] = a[j], a[i]
-}
-func (a envByNameAsc) Less(i, j int) bool {
-	return a[j].Name < a[i].Name
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/deployer/strategy/util/appstest/testutil.go b/vendor/github.com/openshift/oc/pkg/cli/deployer/strategy/util/appstest/testutil.go
deleted file mode 100644
index 443adff42093..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/deployer/strategy/util/appstest/testutil.go
+++ /dev/null
@@ -1,268 +0,0 @@
-package appstest
-
-import (
-	"testing"
-
-	corev1 "k8s.io/api/core/v1"
-	"k8s.io/apimachinery/pkg/api/resource"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/util/sets"
-	"k8s.io/kubernetes/pkg/kubectl/scheme"
-
-	appsv1 "github.com/openshift/api/apps/v1"
-)
-
-const (
-	ImageStreamName      = "test-image-stream"
-	ImageID              = "0000000000000000000000000000000000000000000000000000000000000001"
-	DockerImageReference = "registry:5000/openshift/test-image-stream@sha256:0000000000000000000000000000000000000000000000000000000000000001"
-)
-
-func OkDeploymentConfig(version int64) *appsv1.DeploymentConfig {
-	return &appsv1.DeploymentConfig{
-		ObjectMeta: metav1.ObjectMeta{
-			Name:      "config",
-			Namespace: corev1.NamespaceDefault,
-			SelfLink:  "/apis/apps.openshift.io/v1/deploymentConfig/config",
-		},
-		Spec:   OkDeploymentConfigSpec(),
-		Status: OkDeploymentConfigStatus(version),
-	}
-}
-
-func OkDeploymentConfigSpec() appsv1.DeploymentConfigSpec {
-	return appsv1.DeploymentConfigSpec{
-		Replicas: 1,
-		Selector: OkSelector(),
-		Strategy: OkStrategy(),
-		Template: OkPodTemplate(),
-		Triggers: []appsv1.DeploymentTriggerPolicy{
-			OkImageChangeTrigger(),
-			OkConfigChangeTrigger(),
-		},
-	}
-}
-
-func OkDeploymentConfigStatus(version int64) appsv1.DeploymentConfigStatus {
-	return appsv1.DeploymentConfigStatus{
-		LatestVersion: version,
-	}
-}
-
-func OkImageChangeDetails() *appsv1.DeploymentDetails {
-	return &appsv1.DeploymentDetails{
-		Causes: []appsv1.DeploymentCause{{
-			Type: appsv1.DeploymentTriggerOnImageChange,
-			ImageTrigger: &appsv1.DeploymentCauseImageTrigger{
-				From: corev1.ObjectReference{
-					Name: ImageStreamName + ":latest",
-					Kind: "ImageStreamTag",
-				}}}}}
-}
-
-func OkConfigChangeDetails() *appsv1.DeploymentDetails {
-	return &appsv1.DeploymentDetails{
-		Causes: []appsv1.DeploymentCause{{
-			Type: appsv1.DeploymentTriggerOnConfigChange,
-		}}}
-}
-
-func OkStrategy() appsv1.DeploymentStrategy {
-	return appsv1.DeploymentStrategy{
-		Type: appsv1.DeploymentStrategyTypeRecreate,
-		Resources: corev1.ResourceRequirements{
-			Limits: corev1.ResourceList{
-				corev1.ResourceName(corev1.ResourceCPU):    resource.MustParse("10"),
-				corev1.ResourceName(corev1.ResourceMemory): resource.MustParse("10G"),
-			},
-		},
-		RecreateParams: &appsv1.RecreateDeploymentStrategyParams{
-			TimeoutSeconds: mkintp(20),
-		},
-		ActiveDeadlineSeconds: mkintp(21600),
-	}
-}
-
-func OkCustomStrategy() appsv1.DeploymentStrategy {
-	return appsv1.DeploymentStrategy{
-		Type:         appsv1.DeploymentStrategyTypeCustom,
-		CustomParams: OkCustomParams(),
-		Resources: corev1.ResourceRequirements{
-			Limits: corev1.ResourceList{
-				corev1.ResourceName(corev1.ResourceCPU):    resource.MustParse("10"),
-				corev1.ResourceName(corev1.ResourceMemory): resource.MustParse("10G"),
-			},
-		},
-	}
-}
-
-func OkCustomParams() *appsv1.CustomDeploymentStrategyParams {
-	return &appsv1.CustomDeploymentStrategyParams{
-		Image: "openshift/origin-deployer",
-		Environment: []corev1.EnvVar{
-			{
-				Name:  "ENV1",
-				Value: "VAL1",
-			},
-		},
-		Command: []string{"/bin/echo", "hello", "world"},
-	}
-}
-
-func mkintp(i int) *int64 {
-	v := int64(i)
-	return &v
-}
-
-func OkRollingStrategy() appsv1.DeploymentStrategy {
-	return appsv1.DeploymentStrategy{
-		Type: appsv1.DeploymentStrategyTypeRolling,
-		RollingParams: &appsv1.RollingDeploymentStrategyParams{
-			UpdatePeriodSeconds: mkintp(1),
-			IntervalSeconds:     mkintp(1),
-			TimeoutSeconds:      mkintp(20),
-		},
-		Resources: corev1.ResourceRequirements{
-			Limits: corev1.ResourceList{
-				corev1.ResourceName(corev1.ResourceCPU):    resource.MustParse("10"),
-				corev1.ResourceName(corev1.ResourceMemory): resource.MustParse("10G"),
-			},
-		},
-	}
-}
-
-func OkSelector() map[string]string {
-	return map[string]string{"a": "b"}
-}
-
-func OkPodTemplate() *corev1.PodTemplateSpec {
-	one := int64(1)
-	return &corev1.PodTemplateSpec{
-		Spec: corev1.PodSpec{
-			Containers: []corev1.Container{
-				{
-					Name:  "container1",
-					Image: "registry:8080/repo1:ref1",
-					Env: []corev1.EnvVar{
-						{
-							Name:  "ENV1",
-							Value: "VAL1",
-						},
-					},
-					ImagePullPolicy:          corev1.PullIfNotPresent,
-					TerminationMessagePath:   "/dev/termination-log",
-					TerminationMessagePolicy: corev1.TerminationMessageReadFile,
-				},
-				{
-					Name:                     "container2",
-					Image:                    "registry:8080/repo1:ref2",
-					ImagePullPolicy:          corev1.PullIfNotPresent,
-					TerminationMessagePath:   "/dev/termination-log",
-					TerminationMessagePolicy: corev1.TerminationMessageReadFile,
-				},
-			},
-			RestartPolicy:                 corev1.RestartPolicyAlways,
-			DNSPolicy:                     corev1.DNSClusterFirst,
-			TerminationGracePeriodSeconds: &one,
-			SchedulerName:                 corev1.DefaultSchedulerName,
-			SecurityContext:               &corev1.PodSecurityContext{},
-		},
-		ObjectMeta: metav1.ObjectMeta{
-			Labels: OkSelector(),
-		},
-	}
-}
-
-func OkPodTemplateChanged() *corev1.PodTemplateSpec {
-	template := OkPodTemplate()
-	template.Spec.Containers[0].Image = DockerImageReference
-	return template
-}
-
-func OkPodTemplateMissingImage(missing ...string) *corev1.PodTemplateSpec {
-	set := sets.NewString(missing...)
-	template := OkPodTemplate()
-	for i, c := range template.Spec.Containers {
-		if set.Has(c.Name) {
-			// remember that slices use copies, so have to ref array entry explicitly
-			template.Spec.Containers[i].Image = ""
-		}
-	}
-	return template
-}
-
-func OkConfigChangeTrigger() appsv1.DeploymentTriggerPolicy {
-	return appsv1.DeploymentTriggerPolicy{
-		Type: appsv1.DeploymentTriggerOnConfigChange,
-	}
-}
-
-func OkImageChangeTrigger() appsv1.DeploymentTriggerPolicy {
-	return appsv1.DeploymentTriggerPolicy{
-		Type: appsv1.DeploymentTriggerOnImageChange,
-		ImageChangeParams: &appsv1.DeploymentTriggerImageChangeParams{
-			Automatic: true,
-			ContainerNames: []string{
-				"container1",
-			},
-			From: corev1.ObjectReference{
-				Kind: "ImageStreamTag",
-				Name: ImageStreamName + ":latest",
-			},
-		},
-	}
-}
-
-func OkTriggeredImageChange() appsv1.DeploymentTriggerPolicy {
-	ict := OkImageChangeTrigger()
-	ict.ImageChangeParams.LastTriggeredImage = DockerImageReference
-	return ict
-}
-
-func OkNonAutomaticICT() appsv1.DeploymentTriggerPolicy {
-	ict := OkImageChangeTrigger()
-	ict.ImageChangeParams.Automatic = false
-	return ict
-}
-
-func OkTriggeredNonAutomatic() appsv1.DeploymentTriggerPolicy {
-	ict := OkNonAutomaticICT()
-	ict.ImageChangeParams.LastTriggeredImage = DockerImageReference
-	return ict
-}
-
-func TestDeploymentConfig(config *appsv1.DeploymentConfig) *appsv1.DeploymentConfig {
-	config.Spec.Test = true
-	return config
-}
-
-func RemoveTriggerTypes(config *appsv1.DeploymentConfig, triggerTypes ...appsv1.DeploymentTriggerType) {
-	types := sets.NewString()
-	for _, triggerType := range triggerTypes {
-		types.Insert(string(triggerType))
-	}
-
-	remaining := []appsv1.DeploymentTriggerPolicy{}
-	for _, trigger := range config.Spec.Triggers {
-		if types.Has(string(trigger.Type)) {
-			continue
-		}
-		remaining = append(remaining, trigger)
-	}
-
-	config.Spec.Triggers = remaining
-}
-
-func RoundTripConfig(t *testing.T, config *appsv1.DeploymentConfig) *appsv1.DeploymentConfig {
-	versioned, err := scheme.Scheme.ConvertToVersion(config, appsv1.SchemeGroupVersion)
-	if err != nil {
-		t.Errorf("unexpected conversion error: %v", err)
-		return nil
-	}
-	defaulted, err := scheme.Scheme.ConvertToVersion(versioned, appsv1.SchemeGroupVersion)
-	if err != nil {
-		t.Errorf("unexpected conversion error: %v", err)
-		return nil
-	}
-	return defaulted.(*appsv1.DeploymentConfig)
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/deployer/strategy/util/events.go b/vendor/github.com/openshift/oc/pkg/cli/deployer/strategy/util/events.go
deleted file mode 100644
index fc47307b7e3c..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/deployer/strategy/util/events.go
+++ /dev/null
@@ -1,76 +0,0 @@
-package util
-
-import (
-	"fmt"
-	"io"
-	"time"
-
-	"k8s.io/klog"
-
-	corev1 "k8s.io/api/core/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/runtime"
-	corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
-	"k8s.io/client-go/tools/reference"
-	"k8s.io/kubernetes/pkg/kubectl/scheme"
-
-	"github.com/openshift/library-go/pkg/apps/appsserialization"
-	"github.com/openshift/library-go/pkg/apps/appsutil"
-)
-
-// RecordConfigEvent records an event for the deployment config referenced by the
-// deployment.
-func RecordConfigEvent(client corev1client.EventsGetter, deployment *corev1.ReplicationController, eventType, reason,
-	msg string) {
-	t := metav1.Time{Time: time.Now()}
-	var obj runtime.Object = deployment
-	if config, err := appsserialization.DecodeDeploymentConfig(deployment); err == nil {
-		obj = config
-	} else {
-		klog.Errorf("Unable to decode deployment config from %s/%s: %v", deployment.Namespace, deployment.Name, err)
-	}
-	ref, err := reference.GetReference(scheme.Scheme, obj)
-	if err != nil {
-		klog.Errorf("Unable to get reference for %#v: %v", obj, err)
-		return
-	}
-	event := &corev1.Event{
-		ObjectMeta: metav1.ObjectMeta{
-			Name:      fmt.Sprintf("%v.%x", ref.Name, t.UnixNano()),
-			Namespace: ref.Namespace,
-		},
-		InvolvedObject: *ref,
-		Reason:         reason,
-		Message:        msg,
-		Source: corev1.EventSource{
-			Component: appsutil.DeployerPodNameFor(deployment),
-		},
-		FirstTimestamp: t,
-		LastTimestamp:  t,
-		Count:          1,
-		Type:           eventType,
-	}
-	if _, err := client.Events(ref.Namespace).Create(event); err != nil {
-		klog.Errorf("Could not create event '%#v': %v", event, err)
-	}
-}
-
-// RecordConfigWarnings records all warning events from the replication controller to the
-// associated deployment config.
-func RecordConfigWarnings(client corev1client.EventsGetter, rc *corev1.ReplicationController, out io.Writer) {
-	if rc == nil {
-		return
-	}
-	events, err := client.Events(rc.Namespace).Search(scheme.Scheme, rc)
-	if err != nil {
-		fmt.Fprintf(out, "--> Error listing events for replication controller %s: %v\n", rc.Name, err)
-		return
-	}
-	// TODO: Do we need to sort the events?
-	for _, e := range events.Items {
-		if e.Type == corev1.EventTypeWarning {
-			fmt.Fprintf(out, "-->  %s: %s %s\n", e.Reason, rc.Name, e.Message)
-			RecordConfigEvent(client, rc, e.Type, e.Reason, e.Message)
-		}
-	}
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/deployer/version.go b/vendor/github.com/openshift/oc/pkg/cli/deployer/version.go
deleted file mode 100644
index 092c0db51b7c..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/deployer/version.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package deployer
-
-import (
-	"fmt"
-	"io"
-
-	"github.com/spf13/cobra"
-
-	"k8s.io/apimachinery/pkg/version"
-)
-
-// NewCmdVersion provides a shim around version for
-// non-client packages that require version information
-func NewCmdVersion(fullName string, versionInfo version.Info, out io.Writer) *cobra.Command {
-	cmd := &cobra.Command{
-		Use:   "version",
-		Short: "Display version",
-		Long:  "Display version",
-		Run: func(cmd *cobra.Command, args []string) {
-			fmt.Fprintf(out, "%s %v\n", fullName, versionInfo)
-		},
-	}
-
-	return cmd
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/experimental/dockergc/client.go b/vendor/github.com/openshift/oc/pkg/cli/experimental/dockergc/client.go
deleted file mode 100644
index 4a2f206e2d3e..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/experimental/dockergc/client.go
+++ /dev/null
@@ -1,82 +0,0 @@
-package dockergc
-
-import (
-	"context"
-	"time"
-
-	dockertypes "github.com/docker/docker/api/types"
-	dockerapi "github.com/docker/docker/client"
-)
-
-type dockerClient struct {
-	// timeout is the timeout of short running docker operations.
-	timeout time.Duration
-	// docker API client
-	client *dockerapi.Client
-}
-
-func newDockerClient(timeout time.Duration) (*dockerClient, error) {
-	client, err := dockerapi.NewEnvClient()
-	if err != nil {
-		return nil, err
-	}
-	return &dockerClient{
-		client:  client,
-		timeout: timeout,
-	}, nil
-}
-
-func clientErr(ctx context.Context, err error) error {
-	if ctx.Err() != nil {
-		return ctx.Err()
-	}
-	return err
-}
-
-func (c *dockerClient) getTimeoutContext() (context.Context, context.CancelFunc) {
-	return context.WithTimeout(context.Background(), c.timeout)
-}
-
-func (c *dockerClient) Info() (*dockertypes.Info, error) {
-	ctx, cancel := c.getTimeoutContext()
-	defer cancel()
-	info, err := c.client.Info(ctx)
-	if err := clientErr(ctx, err); err != nil {
-		return nil, err
-	}
-	return &info, nil
-}
-
-func (c *dockerClient) ContainerList(options dockertypes.ContainerListOptions) ([]dockertypes.Container, error) {
-	ctx, cancel := c.getTimeoutContext()
-	defer cancel()
-	containers, err := c.client.ContainerList(ctx, options)
-	if err := clientErr(ctx, err); err != nil {
-		return nil, err
-	}
-	return containers, nil
-}
-
-func (c *dockerClient) ContainerRemove(id string, opts dockertypes.ContainerRemoveOptions) error {
-	ctx, cancel := c.getTimeoutContext()
-	defer cancel()
-	err := c.client.ContainerRemove(ctx, id, opts)
-	return clientErr(ctx, err)
-}
-
-func (c *dockerClient) ImageList(opts dockertypes.ImageListOptions) ([]dockertypes.ImageSummary, error) {
-	ctx, cancel := c.getTimeoutContext()
-	defer cancel()
-	images, err := c.client.ImageList(ctx, opts)
-	if err := clientErr(ctx, err); err != nil {
-		return nil, err
-	}
-	return images, nil
-}
-
-func (c *dockerClient) ImageRemove(image string, opts dockertypes.ImageRemoveOptions) error {
-	ctx, cancel := c.getTimeoutContext()
-	defer cancel()
-	_, err := c.client.ImageRemove(ctx, image, opts)
-	return clientErr(ctx, err)
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/experimental/dockergc/dockergc.go b/vendor/github.com/openshift/oc/pkg/cli/experimental/dockergc/dockergc.go
deleted file mode 100644
index 367e144e6fb6..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/experimental/dockergc/dockergc.go
+++ /dev/null
@@ -1,273 +0,0 @@
-package dockergc
-
-import (
-	"fmt"
-	"os"
-	"os/exec"
-	"sort"
-	"strconv"
-	"strings"
-	"time"
-
-	"github.com/spf13/cobra"
-	"k8s.io/klog"
-
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-
-	dockertypes "github.com/docker/docker/api/types"
-	dockerfilters "github.com/docker/docker/api/types/filters"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-const (
-	DefaultImageGCHighThresholdPercent = int32(80)
-	DefaultImageGCLowThresholdPercent  = int32(60)
-)
-
-var (
-	DefaultMinimumGCAge = metav1.Duration{Duration: time.Hour}
-
-	dockerTimeout = time.Duration(2 * time.Minute)
-)
-
-// DockerGCConfigCmdOptions are options supported by the dockergc admin command.
-type dockerGCConfigCmdOptions struct {
-	// DryRun is true if the command was invoked with --dry-run=true
-	DryRun bool
-	// MinimumGCAge is the minimum age for a container or unused image before
-	// it is garbage collected.
-	MinimumGCAge metav1.Duration
-	// ImageGCHighThresholdPercent is the percent of disk usage after which
-	// image garbage collection is always run.
-	ImageGCHighThresholdPercent int32
-	// ImageGCLowThresholdPercent is the percent of disk usage before which
-	// image garbage collection is never run. Lowest disk usage to garbage
-	// collect to.
-	ImageGCLowThresholdPercent int32
-}
-
-var (
-	dockerGC_long = templates.LongDesc(`
-		Perform garbage collection to free space in docker storage
-
-		If the OpenShift node is configured to use a container runtime other than docker,
-		docker will still be used to do builds.  However OpenShift itself may not
-		manage the docker storage since it is not the container runtime for pods.
-
-		This utility allows garbage collection to do be done on the docker storage.
-
-		Only the overlay2 docker storage driver is supported at this time.`)
-
-	dockerGC_example = templates.Examples(`
-	  # Perform garbage collection with the default settings
-	  %[1]s %[2]s`)
-)
-
-func NewCmdDockerGCConfig(f kcmdutil.Factory, parentName, name string, streams genericclioptions.IOStreams) *cobra.Command {
-	options := &dockerGCConfigCmdOptions{
-		DryRun:                      false,
-		MinimumGCAge:                DefaultMinimumGCAge,
-		ImageGCHighThresholdPercent: DefaultImageGCHighThresholdPercent,
-		ImageGCLowThresholdPercent:  DefaultImageGCLowThresholdPercent,
-	}
-	cmd := &cobra.Command{
-		Use:     fmt.Sprintf("%s [NAME]", name),
-		Short:   "Perform garbage collection to free space in docker storage",
-		Long:    dockerGC_long,
-		Example: fmt.Sprintf(dockerGC_example, parentName, name),
-		Run: func(cmd *cobra.Command, args []string) {
-			err := Run(f, options, cmd, args)
-			if err == kcmdutil.ErrExit {
-				os.Exit(1)
-			}
-			kcmdutil.CheckErr(err)
-		},
-	}
-
-	cmd.Flags().DurationVar(&options.MinimumGCAge.Duration, "minimum-ttl-duration", options.MinimumGCAge.Duration, "Minimum age for a container or unused image before it is garbage collected.  Examples: '300ms', '10s' or '2h45m'.")
-	cmd.Flags().Int32Var(&options.ImageGCHighThresholdPercent, "image-gc-high-threshold", options.ImageGCHighThresholdPercent, "The percent of disk usage after which image garbage collection is always run.")
-	cmd.Flags().Int32Var(&options.ImageGCLowThresholdPercent, "image-gc-low-threshold", options.ImageGCLowThresholdPercent, "The percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to.")
-	cmd.Flags().BoolVar(&options.DryRun, "dry-run", options.DryRun, "Run in single-pass mode with no effect.")
-
-	return cmd
-}
-
-// parseInfo parses df output to return capacity and used in bytes
-func parseInfo(str string) (int64, int64, error) {
-	fields := strings.Fields(str)
-	if len(fields) != 4 {
-		return 0, 0, fmt.Errorf("unable to parse df output")
-	}
-	value, err := strconv.ParseInt(fields[2], 10, 64)
-	if err != nil {
-		return 0, 0, err
-	}
-	capacityKBytes := int64(value)
-	value, err = strconv.ParseInt(fields[3], 10, 64)
-	if err != nil {
-		return 0, 0, err
-	}
-	usageKBytes := int64(value)
-	return capacityKBytes * 1024, usageKBytes * 1024, nil
-}
-
-// getRootDirInfo returns the capacity and usage in bytes for the docker root directory
-func getRootDirInfo(rootDir string) (int64, int64, error) {
-	cmd := exec.Command("df", "-k", "--output=size,used", rootDir)
-	output, err := cmd.Output()
-	if err != nil {
-		return 0, 0, err
-	}
-	return parseInfo(string(output))
-}
-
-func bytesToMB(bytes int64) int64 {
-	return bytes / 1024 / 1024
-}
-
-type oldestContainersFirst []dockertypes.Container
-
-func (s oldestContainersFirst) Len() int           { return len(s) }
-func (s oldestContainersFirst) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
-func (s oldestContainersFirst) Less(i, j int) bool { return s[i].Created < s[j].Created }
-
-type oldestImagesFirst []dockertypes.ImageSummary
-
-func (s oldestImagesFirst) Len() int           { return len(s) }
-func (s oldestImagesFirst) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
-func (s oldestImagesFirst) Less(i, j int) bool { return s[i].Created < s[j].Created }
-
-// parseDockerTimestamp parses the timestamp returned by Interface from string to time.Time
-func parseDockerTimestamp(s string) (time.Time, error) {
-	// Timestamp returned by Docker is in time.RFC3339Nano format.
-	return time.Parse(time.RFC3339Nano, s)
-}
-
-func doGarbageCollection(client *dockerClient, options *dockerGCConfigCmdOptions, rootDir string) error {
-	klog.Infof("gathering disk usage data")
-	capacityBytes, usageBytes, err := getRootDirInfo(rootDir)
-	if err != nil {
-		return err
-	}
-
-	highThresholdBytes := capacityBytes * int64(options.ImageGCHighThresholdPercent) / 100
-	lowThresholdBytes := capacityBytes * int64(options.ImageGCLowThresholdPercent) / 100
-	if usageBytes < highThresholdBytes {
-		klog.Infof("usage is under high threshold (%vMB < %vMB)", bytesToMB(usageBytes), bytesToMB(highThresholdBytes))
-		return nil
-	}
-
-	attemptToFreeBytes := usageBytes - lowThresholdBytes
-	freedBytes := int64(0)
-	klog.Infof("usage exceeds high threshold (%vMB > %vMB), attempting to free %vMB", bytesToMB(usageBytes), bytesToMB(highThresholdBytes), bytesToMB(attemptToFreeBytes))
-
-	// conatiners
-	exitedFilter := dockerfilters.NewArgs()
-	exitedFilter.Add("status", "exited")
-	containers, err := client.ContainerList(dockertypes.ContainerListOptions{All: true, Filters: exitedFilter})
-	if err != nil {
-		return err
-	}
-	klog.Infof("%d exited containers found", len(containers))
-	sort.Sort(oldestContainersFirst(containers))
-	for _, c := range containers {
-		if freedBytes > attemptToFreeBytes {
-			klog.Infof("usage is below low threshold, freed %vMB", bytesToMB(freedBytes))
-			return nil
-		}
-		age := time.Now().Sub(time.Unix(c.Created, 0))
-		if age < options.MinimumGCAge.Duration {
-			klog.Infof("remaining containers are too young")
-			break
-		}
-		klog.Infof("removing container %v (size: %v, age: %v)", c.ID, c.SizeRw, age)
-		var err error
-		if !options.DryRun {
-			err = client.ContainerRemove(c.ID, dockertypes.ContainerRemoveOptions{RemoveVolumes: true})
-		}
-		if err != nil {
-			klog.Infof("unable to remove container: %v", err)
-		} else {
-			freedBytes += c.SizeRw
-		}
-	}
-
-	// images
-	images, err := client.ImageList(dockertypes.ImageListOptions{})
-	if err != nil {
-		return err
-	}
-	sort.Sort(oldestImagesFirst(images))
-	klog.Infof("%d images found", len(images))
-	for _, i := range images {
-		if freedBytes > attemptToFreeBytes {
-			klog.Infof("usage is below low threshold, freed %vMB", bytesToMB(freedBytes))
-			return nil
-		}
-		// filter openshift infra images
-		if len(i.RepoTags) > 0 {
-			if strings.HasPrefix(i.RepoTags[0], "registry.ops.openshift.com/openshift3") ||
-				strings.HasPrefix(i.RepoTags[0], "docker.io/openshift") {
-				klog.Infof("skipping infra image: %v", i.RepoTags[0])
-				continue
-			}
-		}
-		// filter young images
-		age := time.Now().Sub(time.Unix(i.Created, 0))
-		if age < options.MinimumGCAge.Duration {
-			klog.Infof("remaining images are too young")
-			break
-		}
-		klog.Infof("removing image %v (size: %v, age: %v)", i.ID, i.Size, age)
-		var err error
-		if !options.DryRun {
-			err = client.ImageRemove(i.ID, dockertypes.ImageRemoveOptions{PruneChildren: true})
-		}
-		if err != nil {
-			klog.Infof("unable to remove image: %v", err)
-		} else {
-			freedBytes += i.Size
-		}
-	}
-	klog.Infof("unable to get below low threshold, %vMB freed", bytesToMB(freedBytes))
-
-	return nil
-}
-
-// Run runs the dockergc command.
-func Run(f kcmdutil.Factory, options *dockerGCConfigCmdOptions, cmd *cobra.Command, args []string) error {
-	klog.Infof("docker build garbage collection daemon")
-	if options.DryRun {
-		klog.Infof("Running in dry-run mode")
-	}
-	klog.Infof("MinimumGCAge: %v, ImageGCHighThresholdPercent: %v, ImageGCLowThresholdPercent: %v", options.MinimumGCAge, options.ImageGCHighThresholdPercent, options.ImageGCLowThresholdPercent)
-	client, err := newDockerClient(dockerTimeout)
-	if err != nil {
-		return err
-	}
-
-	info, err := client.Info()
-	if err != nil {
-		return err
-	}
-	if info.Driver != "overlay2" {
-		return fmt.Errorf("%s storage driver is not supported", info.Driver)
-	}
-	rootDir := info.DockerRootDir
-	if rootDir == "" {
-		return fmt.Errorf("unable to determine docker root directory")
-	}
-
-	for {
-		err := doGarbageCollection(client, options, rootDir)
-		if err != nil {
-			klog.Errorf("garbage collection attempt failed: %v", err)
-		}
-		if options.DryRun {
-			return nil
-		}
-		<-time.After(time.Minute)
-	}
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/expose/expose.go b/vendor/github.com/openshift/oc/pkg/cli/expose/expose.go
deleted file mode 100644
index 37d4966a8b0a..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/expose/expose.go
+++ /dev/null
@@ -1,204 +0,0 @@
-package expose
-
-import (
-	"fmt"
-
-	"github.com/spf13/cobra"
-
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	"k8s.io/cli-runtime/pkg/resource"
-	corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
-	kapi "k8s.io/kubernetes/pkg/apis/core"
-	"k8s.io/kubernetes/pkg/kubectl/cmd/expose"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/scheme"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-
-	"github.com/openshift/oc/pkg/cli/create/route"
-)
-
-var (
-	exposeLong = templates.LongDesc(`
-		Expose containers internally as services or externally via routes
-
-		There is also the ability to expose a deployment configuration, replication controller, service, or pod
-		as a new service on a specified port. If no labels are specified, the new object will re-use the
-		labels from the object it exposes.`)
-
-	exposeExample = templates.Examples(`
-		# Create a route based on service nginx. The new route will re-use nginx's labels
-	  %[1]s expose service nginx
-
-	  # Create a route and specify your own label and route name
-	  %[1]s expose service nginx -l name=myroute --name=fromdowntown
-
-	  # Create a route and specify a hostname
-	  %[1]s expose service nginx --hostname=www.example.com
-
-	  # Create a route with wildcard
-	  %[1]s expose service nginx --hostname=x.example.com --wildcard-policy=Subdomain
-	  This would be equivalent to *.example.com. NOTE: only hosts are matched by the wildcard, subdomains would not be included.
-
-	  # Expose a deployment configuration as a service and use the specified port
-	  %[1]s expose dc ruby-hello-world --port=8080
-
-	  # Expose a service as a route in the specified path
-	  %[1]s expose service nginx --path=/nginx
-
-	  # Expose a service using different generators
-	  %[1]s expose service nginx --name=exposed-svc --port=12201 --protocol="TCP" --generator="service/v2"
-	  %[1]s expose service nginx --name=my-route --port=12201 --generator="route/v1"
-
-	  Exposing a service using the "route/v1" generator (default) will create a new exposed route with the "--name" provided
-	  (or the name of the service otherwise). You may not specify a "--protocol" or "--target-port" option when using this generator.`)
-)
-
-type ExposeOptions struct {
-	Hostname       string
-	Path           string
-	WildcardPolicy string
-
-	Namespace        string
-	EnforceNamespace bool
-	CoreClient       corev1client.CoreV1Interface
-	Builder          func() *resource.Builder
-	Args             []string
-	Generator        string
-	Filenames        []string
-	Port             string
-	Protocol         string
-}
-
-func NewExposeOptions() *ExposeOptions {
-	return &ExposeOptions{}
-}
-
-// NewCmdExpose is a wrapper for the Kubernetes cli expose command
-func NewCmdExpose(fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	o := NewExposeOptions()
-
-	cmd := expose.NewCmdExposeService(f, streams)
-	cmd.Short = "Expose a replicated application as a service or route"
-	cmd.Long = exposeLong
-	cmd.Example = fmt.Sprintf(exposeExample, fullName)
-	// Default generator to an empty string so we can get more flexibility
-	// when setting defaults based on input resources
-	cmd.Flags().Set("generator", "")
-	cmd.Flag("generator").Usage = "The name of the API generator to use. Defaults to \"route/v1\". Available generators include \"service/v1\", \"service/v2\", and \"route/v1\". \"service/v1\" will automatically name the port \"default\", while \"service/v2\" will leave it unnamed."
-	cmd.Flag("generator").DefValue = ""
-	// Default protocol to an empty string so we can get more flexibility
-	// when validating the use of it (invalid for routes)
-	cmd.Flags().Set("protocol", "")
-	cmd.Flag("protocol").DefValue = ""
-	cmd.Flag("protocol").Changed = false
-	cmd.Flag("port").Usage = "The port that the resource should serve on."
-	defRun := cmd.Run
-	cmd.Run = func(cmd *cobra.Command, args []string) {
-		kcmdutil.CheckErr(o.Complete(cmd, f, args))
-		kcmdutil.CheckErr(o.Validate(cmd))
-		defRun(cmd, args)
-	}
-
-	cmd.Flags().StringVar(&o.Hostname, "hostname", o.Hostname, "Set a hostname for the new route")
-	cmd.Flags().StringVar(&o.Path, "path", o.Path, "Set a path for the new route")
-	cmd.Flags().StringVar(&o.WildcardPolicy, "wildcard-policy", o.WildcardPolicy, "Sets the WildcardPolicy for the hostname, the default is \"None\". Valid values are \"None\" and \"Subdomain\"")
-
-	return cmd
-}
-
-func (o *ExposeOptions) Complete(cmd *cobra.Command, f kcmdutil.Factory, args []string) error {
-	var err error
-	o.Namespace, o.EnforceNamespace, err = f.ToRawKubeConfigLoader().Namespace()
-	if err != nil {
-		return err
-	}
-
-	config, err := f.ToRESTConfig()
-	if err != nil {
-		return err
-	}
-
-	o.CoreClient, err = corev1client.NewForConfig(config)
-	if err != nil {
-		return err
-	}
-
-	o.Builder = f.NewBuilder
-	o.Args = args
-	o.Generator = kcmdutil.GetFlagString(cmd, "generator")
-	o.Filenames = kcmdutil.GetFlagStringSlice(cmd, "filename")
-	o.Port = kcmdutil.GetFlagString(cmd, "port")
-	o.Protocol = kcmdutil.GetFlagString(cmd, "protocol")
-
-	return nil
-}
-
-// Validate adds one layer of validation prior to calling the upstream
-// expose command.
-func (o *ExposeOptions) Validate(cmd *cobra.Command) error {
-	r := o.Builder().
-		WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...).
-		ContinueOnError().
-		NamespaceParam(o.Namespace).DefaultNamespace().
-		FilenameParam(o.EnforceNamespace, &resource.FilenameOptions{Recursive: false, Filenames: o.Filenames}).
-		ResourceTypeOrNameArgs(false, o.Args...).
-		Flatten().
-		Do()
-	infos, err := r.Infos()
-	if err != nil {
-		return err
-	}
-
-	if len(o.WildcardPolicy) > 0 && (o.WildcardPolicy != "Subdomain" && o.WildcardPolicy != "None") {
-		return fmt.Errorf("only \"Subdomain\" or \"None\" are supported for wildcard-policy")
-	}
-
-	if len(infos) > 1 {
-		return fmt.Errorf("multiple resources provided: %v", o.Args)
-	}
-	info := infos[0]
-	mapping := info.ResourceMapping()
-
-	switch mapping.GroupVersionKind.GroupKind() {
-	case kapi.Kind("Service"):
-		switch o.Generator {
-		case "service/v1", "service/v2":
-			// Set default protocol back for generating services
-			if len(o.Protocol) == 0 {
-				cmd.Flags().Set("protocol", "TCP")
-			}
-		case "":
-			// Default exposing services as a route
-			cmd.Flags().Set("generator", "route/v1")
-			fallthrough
-		case "route/v1":
-			// The upstream generator will incorrectly chose service.Port instead of service.TargetPort
-			// for the route TargetPort when no port is present.  Passing forcePort=true
-			// causes UnsecuredRoute to always set a Port so the upstream default is not used.
-			route, err := route.UnsecuredRoute(o.CoreClient, o.Namespace, info.Name, info.Name, o.Port, true)
-			if err != nil {
-				return err
-			}
-			if route.Spec.Port != nil {
-				cmd.Flags().Set("port", route.Spec.Port.TargetPort.String())
-			}
-		}
-
-	default:
-		switch o.Generator {
-		case "route/v1":
-			return fmt.Errorf("cannot expose a %s as a route", mapping.GroupVersionKind.Kind)
-		case "":
-			// Default exposing everything except services as a service
-			cmd.Flags().Set("generator", "service/v2")
-			fallthrough
-		case "service/v1", "service/v2":
-			// Set default protocol back for generating services
-			if len(kcmdutil.GetFlagString(cmd, "protocol")) == 0 {
-				cmd.Flags().Set("protocol", "TCP")
-			}
-		}
-	}
-
-	return nil
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/extract/extract.go b/vendor/github.com/openshift/oc/pkg/cli/extract/extract.go
deleted file mode 100644
index 7b51d955ea6b..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/extract/extract.go
+++ /dev/null
@@ -1,225 +0,0 @@
-package extract
-
-import (
-	"bytes"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"os"
-	"path/filepath"
-
-	"github.com/spf13/cobra"
-
-	corev1 "k8s.io/api/core/v1"
-	"k8s.io/apimachinery/pkg/runtime"
-	"k8s.io/apimachinery/pkg/util/sets"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	"k8s.io/cli-runtime/pkg/resource"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/scheme"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-)
-
-var (
-	extractLong = templates.LongDesc(`
-		Extract files out of secrets and config maps
-
-		The extract command makes it easy to download the contents of a config map or secret into a directory.
-		Each key in the config map or secret is created as a separate file with the name of the key, as it
-		is when you mount a secret or config map into a container.
-
-		You may extract the contents of a secret or config map to standard out by passing '-' to --to. The
-		names of each key will be written to stdandard error.
-
-		You can limit which keys are extracted with the --keys=NAME flag, or set the directory to extract to
-		with --to=DIRECTORY.`)
-
-	extractExample = templates.Examples(`
-		# extract the secret "test" to the current directory
-	  %[1]s extract secret/test
-
-	  # extract the config map "nginx" to the /tmp directory
-	  %[1]s extract configmap/nginx --to=/tmp
-
-		# extract the config map "nginx" to STDOUT
-	  %[1]s extract configmap/nginx --to=-
-
-	  # extract only the key "nginx.conf" from config map "nginx" to the /tmp directory
-	  %[1]s extract configmap/nginx --to=/tmp --keys=nginx.conf`)
-)
-
-type ExtractOptions struct {
-	Filenames       []string
-	OnlyKeys        []string
-	TargetDirectory string
-	Overwrite       bool
-
-	Namespace         string
-	ExplicitNamespace bool
-	Resources         []string
-	Builder           func() *resource.Builder
-
-	ExtractFileContentsFn func(runtime.Object) (map[string][]byte, bool, error)
-
-	genericclioptions.IOStreams
-}
-
-func NewExtractOptions(targetDirectory string, streams genericclioptions.IOStreams) *ExtractOptions {
-	return &ExtractOptions{
-		IOStreams:       streams,
-		TargetDirectory: targetDirectory,
-	}
-}
-
-func NewCmdExtract(fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	o := NewExtractOptions(".", streams)
-
-	cmd := &cobra.Command{
-		Use:     "extract RESOURCE/NAME [--to=DIRECTORY] [--keys=KEY ...]",
-		Short:   "Extract secrets or config maps to disk",
-		Long:    extractLong,
-		Example: fmt.Sprintf(extractExample, fullName),
-		Run: func(cmd *cobra.Command, args []string) {
-			kcmdutil.CheckErr(o.Complete(f, cmd, args))
-			kcmdutil.CheckErr(o.Validate())
-			kcmdutil.CheckErr(o.Run())
-		},
-	}
-	cmd.Flags().BoolVar(&o.Overwrite, "confirm", o.Overwrite, "If true, overwrite files that already exist.")
-	cmd.Flags().StringVar(&o.TargetDirectory, "to", o.TargetDirectory, "Directory to extract files to.")
-	cmd.Flags().StringSliceVarP(&o.Filenames, "filename", "f", o.Filenames, "Filename, directory, or URL to file to identify to extract the resource.")
-	cmd.MarkFlagFilename("filename")
-	cmd.Flags().StringSliceVar(&o.OnlyKeys, "keys", o.OnlyKeys, "An optional list of keys to extract (default is all keys).")
-	return cmd
-}
-
-func (o *ExtractOptions) Complete(f kcmdutil.Factory, cmd *cobra.Command, args []string) error {
-	o.ExtractFileContentsFn = extractFileContents
-
-	var err error
-	o.Namespace, o.ExplicitNamespace, err = f.ToRawKubeConfigLoader().Namespace()
-	if err != nil {
-		return err
-	}
-
-	o.Resources = args
-	o.Builder = f.NewBuilder
-
-	return nil
-}
-
-func (o *ExtractOptions) Validate() error {
-	if o.TargetDirectory != "-" {
-		// determine if output location is valid before continuing
-		if _, err := os.Stat(o.TargetDirectory); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-func name(info *resource.Info) string {
-	return fmt.Sprintf("%s/%s", info.Mapping.Resource, info.Name)
-}
-
-func (o *ExtractOptions) Run() error {
-	r := o.Builder().
-		WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...).
-		NamespaceParam(o.Namespace).DefaultNamespace().
-		FilenameParam(o.ExplicitNamespace, &resource.FilenameOptions{Recursive: false, Filenames: o.Filenames}).
-		ResourceNames("", o.Resources...).
-		ContinueOnError().
-		Flatten().Do()
-
-	if err := r.Err(); err != nil {
-		return err
-	}
-
-	count := 0
-	contains := sets.NewString(o.OnlyKeys...)
-	err := r.Visit(func(info *resource.Info, err error) error {
-		if err != nil {
-			return fmt.Errorf("%s: %v", name(info), err)
-		}
-		contents, ok, err := o.ExtractFileContentsFn(info.Object)
-		if err != nil {
-			return fmt.Errorf("%s: %v", name(info), err)
-		}
-		if !ok {
-			fmt.Fprintf(o.ErrOut, "warning: %s does not support extraction\n", name(info))
-			return nil
-		}
-		count++
-		var errs []error
-		for k, v := range contents {
-			if contains.Len() == 0 || contains.Has(k) {
-				switch {
-				case o.TargetDirectory == "-":
-					fmt.Fprintf(o.ErrOut, "# %s\n", k)
-					o.Out.Write(v)
-					if !bytes.HasSuffix(v, []byte("\n")) {
-						fmt.Fprintln(o.Out)
-					}
-				default:
-					target := filepath.Join(o.TargetDirectory, k)
-					if err := o.writeToDisk(target, v); err != nil {
-						if os.IsExist(err) {
-							err = fmt.Errorf("file exists, pass --confirm to overwrite")
-						}
-						errs = append(errs, fmt.Errorf("%s: %v", k, err))
-					}
-				}
-			}
-		}
-		if len(errs) > 0 {
-			return fmt.Errorf(kcmdutil.MultipleErrors("error: ", errs))
-		}
-		return nil
-	})
-	if err != nil {
-		return err
-	}
-	if count == 0 {
-		return fmt.Errorf("you must specify at least one resource to extract")
-	}
-	return nil
-}
-
-func (o *ExtractOptions) writeToDisk(path string, data []byte) error {
-	if o.Overwrite {
-		if err := ioutil.WriteFile(path, data, 0600); err != nil {
-			return err
-		}
-	} else {
-		f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
-		if err != nil {
-			return err
-		}
-		if _, err := io.Copy(f, bytes.NewBuffer(data)); err != nil {
-			f.Close()
-			return err
-		}
-		if err := f.Close(); err != nil {
-			return err
-		}
-	}
-	fmt.Fprintf(o.Out, "%s\n", path)
-	return nil
-}
-
-// ExtractFileContents returns a map of keys to contents, false if the object cannot support such an
-// operation, or an error.
-func extractFileContents(obj runtime.Object) (map[string][]byte, bool, error) {
-	switch t := obj.(type) {
-	case *corev1.Secret:
-		return t.Data, true, nil
-	case *corev1.ConfigMap:
-		out := make(map[string][]byte)
-		for k, v := range t.Data {
-			out[k] = []byte(v)
-		}
-		return out, true, nil
-	default:
-		return nil, false, nil
-	}
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/idle/idle.go b/vendor/github.com/openshift/oc/pkg/cli/idle/idle.go
deleted file mode 100644
index 0dd48a390ce1..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/idle/idle.go
+++ /dev/null
@@ -1,726 +0,0 @@
-package idle
-
-import (
-	"bufio"
-	"encoding/json"
-	"fmt"
-	"io"
-	"os"
-	"time"
-
-	"github.com/spf13/cobra"
-
-	autoscalingv1 "k8s.io/api/autoscaling/v1"
-	corev1 "k8s.io/api/core/v1"
-	"k8s.io/apimachinery/pkg/api/errors"
-	"k8s.io/apimachinery/pkg/api/meta"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/runtime"
-	"k8s.io/apimachinery/pkg/runtime/schema"
-	"k8s.io/apimachinery/pkg/types"
-	"k8s.io/apimachinery/pkg/util/strategicpatch"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	"k8s.io/cli-runtime/pkg/resource"
-	"k8s.io/client-go/dynamic"
-	"k8s.io/client-go/kubernetes"
-	"k8s.io/client-go/rest"
-	"k8s.io/client-go/scale"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/scheme"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-
-	operatorv1 "github.com/openshift/api/operator/v1"
-	unidlingapi "github.com/openshift/api/unidling/v1alpha1"
-	appsclient "github.com/openshift/client-go/apps/clientset/versioned"
-	operatorclient "github.com/openshift/client-go/operator/clientset/versioned"
-	"github.com/openshift/library-go/pkg/unidling/unidlingclient"
-)
-
-var (
-	idleLong = templates.LongDesc(`
-		Idle scalable resources
-
-		Idling discovers the scalable resources (such as deployment configs and replication controllers)
-		associated with a series of services by examining the endpoints of the service.
-		Each service is then marked as idled, the associated resources are recorded, and the resources
-		are scaled down to zero replicas.
-
-		Upon receiving network traffic, the services (and any associated routes) will "wake up" the
-		associated resources by scaling them back up to their previous scale.`)
-
-	idleExample = templates.Examples(`
-		# Idle the scalable controllers associated with the services listed in to-idle.txt
-		$ %[1]s idle --resource-names-file to-idle.txt`)
-)
-
-type IdleOptions struct {
-	dryRun        bool
-	filename      string
-	all           bool
-	selector      string
-	allNamespaces bool
-	resources     []string
-
-	cmdFullName string
-
-	ClientForMappingFn func(*meta.RESTMapping) (resource.RESTClient, error)
-	ClientConfig       *rest.Config
-	ClientSet          kubernetes.Interface
-	AppClient          appsclient.Interface
-	OperatorClient     operatorclient.Interface
-	ScaleClient        scale.ScalesGetter
-	Mapper             meta.RESTMapper
-
-	Builder   func() *resource.Builder
-	Namespace string
-	nowTime   time.Time
-
-	genericclioptions.IOStreams
-}
-
-func NewIdleOptions(name string, streams genericclioptions.IOStreams) *IdleOptions {
-	return &IdleOptions{
-		IOStreams:   streams,
-		cmdFullName: name,
-	}
-}
-
-// NewCmdIdle implements the OpenShift cli idle command
-func NewCmdIdle(fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	o := NewIdleOptions(fullName, streams)
-
-	cmd := &cobra.Command{
-		Use:     "idle (SERVICE_ENDPOINTS... | -l label | --all | --resource-names-file FILENAME)",
-		Short:   "Idle scalable resources",
-		Long:    idleLong,
-		Example: fmt.Sprintf(idleExample, fullName),
-		Run: func(cmd *cobra.Command, args []string) {
-			kcmdutil.CheckErr(o.Complete(f, cmd, args))
-			kcmdutil.CheckErr(o.RunIdle())
-		},
-	}
-
-	cmd.Flags().BoolVar(&o.dryRun, "dry-run", false, "If true, only print the annotations that would be written, without annotating or idling the relevant objects")
-	cmd.Flags().StringVar(&o.filename, "resource-names-file", o.filename, "file containing list of services whose scalable resources to idle")
-	cmd.Flags().StringVarP(&o.selector, "selector", "l", o.selector, "Selector (label query) to use to select services")
-	cmd.Flags().BoolVar(&o.all, "all", o.all, "if true, select all services in the namespace")
-	cmd.Flags().BoolVarP(&o.allNamespaces, "all-namespaces", "A", o.allNamespaces, "if true, select services across all namespaces")
-	cmd.MarkFlagFilename("resource-names-file")
-
-	// TODO: take the `-o name` argument, and only print out names instead of the summary
-
-	return cmd
-}
-
-func (o *IdleOptions) Complete(f kcmdutil.Factory, cmd *cobra.Command, args []string) error {
-	var err error
-	o.Namespace, _, err = f.ToRawKubeConfigLoader().Namespace()
-	if err != nil {
-		return err
-	}
-
-	o.nowTime = time.Now().UTC()
-
-	// NB: our filename arg is different from usual, since it's just a list of service names
-	if o.filename != "" && (o.selector != "" || len(args) > 0 || o.all) {
-		return fmt.Errorf("resource names, selectors, and the all flag may not be be specified if a filename is specified")
-	}
-
-	o.ClientConfig, err = f.ToRESTConfig()
-	if err != nil {
-		return err
-	}
-
-	o.ClientSet, err = kubernetes.NewForConfig(o.ClientConfig)
-	if err != nil {
-		return err
-	}
-
-	o.ScaleClient, err = scaleClient(f)
-	if err != nil {
-		return err
-	}
-
-	o.Mapper, err = f.ToRESTMapper()
-	if err != nil {
-		return err
-	}
-
-	o.AppClient, err = appsclient.NewForConfig(o.ClientConfig)
-	if err != nil {
-		return err
-	}
-
-	o.OperatorClient, err = operatorclient.NewForConfig(o.ClientConfig)
-	if err != nil {
-		return err
-	}
-
-	o.ClientForMappingFn = f.ClientForMapping
-	o.Builder = f.NewBuilder
-
-	o.resources = args
-
-	return nil
-}
-
-// scaleClient gives you back scale getter
-func scaleClient(restClientGetter genericclioptions.RESTClientGetter) (scale.ScalesGetter, error) {
-	discoveryClient, err := restClientGetter.ToDiscoveryClient()
-	if err != nil {
-		return nil, err
-	}
-
-	clientConfig, err := restClientGetter.ToRESTConfig()
-	if err != nil {
-		return nil, err
-	}
-
-	restClient, err := rest.RESTClientFor(clientConfig)
-	if err != nil {
-		return nil, err
-	}
-	resolver := scale.NewDiscoveryScaleKindResolver(discoveryClient)
-	mapper, err := restClientGetter.ToRESTMapper()
-	if err != nil {
-		return nil, err
-	}
-
-	return scale.New(restClient, mapper, dynamic.LegacyAPIPathResolverFunc, resolver), nil
-}
-
-// scanLinesFromFile loads lines from either standard in or a file
-func scanLinesFromFile(filename string) ([]string, error) {
-	var targetsInput io.Reader
-	if filename == "-" {
-		targetsInput = os.Stdin
-	} else if filename == "" {
-		return nil, fmt.Errorf("you must specify an list of resources to idle")
-	} else {
-		inputFile, err := os.Open(filename)
-		if err != nil {
-			return nil, err
-		}
-		defer inputFile.Close()
-		targetsInput = inputFile
-	}
-
-	lines := []string{}
-
-	// grab the raw resources from the file
-	lineScanner := bufio.NewScanner(targetsInput)
-	for lineScanner.Scan() {
-		line := lineScanner.Text()
-		if line == "" {
-			// skip empty lines
-			continue
-		}
-		lines = append(lines, line)
-	}
-	if err := lineScanner.Err(); err != nil {
-		return nil, err
-	}
-
-	return lines, nil
-}
-
-// idleUpdateInfo contains the required info to annotate an endpoints object
-// with the scalable resources that it should unidle
-type idleUpdateInfo struct {
-	obj       *corev1.Endpoints
-	scaleRefs map[unidlingapi.CrossGroupObjectReference]struct{}
-}
-
-// calculateIdlableAnnotationsByService calculates the list of objects involved in the idling process from a list of services in a file.
-// Using the list of services, it figures out the associated scalable objects, and returns a map from the endpoints object for the services to
-// the list of scalable resources associated with that endpoints object, as well as a map from CrossGroupObjectReferences to scale to 0 to the
-// name of the associated service.
-func (o *IdleOptions) calculateIdlableAnnotationsByService(infoVisitor func(resource.VisitorFunc) error) (map[types.NamespacedName]idleUpdateInfo, map[namespacedCrossGroupObjectReference]types.NamespacedName, error) {
-	podsLoaded := make(map[corev1.ObjectReference]*corev1.Pod)
-	getPod := func(ref corev1.ObjectReference) (*corev1.Pod, error) {
-		if pod, ok := podsLoaded[ref]; ok {
-			return pod, nil
-		}
-		pod, err := o.ClientSet.CoreV1().Pods(ref.Namespace).Get(ref.Name, metav1.GetOptions{})
-		if err != nil {
-			return nil, err
-		}
-
-		podsLoaded[ref] = pod
-
-		return pod, nil
-	}
-
-	controllersLoaded := make(map[namespacedOwnerReference]metav1.Object)
-	helpers := make(map[schema.GroupKind]*resource.Helper)
-	getController := func(ref namespacedOwnerReference) (metav1.Object, error) {
-		if controller, ok := controllersLoaded[ref]; ok {
-			return controller, nil
-		}
-		gv, err := schema.ParseGroupVersion(ref.APIVersion)
-		if err != nil {
-			return nil, err
-		}
-		// just get the unversioned version of this
-		gk := schema.GroupKind{Group: gv.Group, Kind: ref.Kind}
-		helper, ok := helpers[gk]
-		if !ok {
-			var mapping *meta.RESTMapping
-			mapping, err = o.Mapper.RESTMapping(schema.GroupKind{Group: gv.Group, Kind: ref.Kind}, "")
-			if err != nil {
-				return nil, err
-			}
-			var client resource.RESTClient
-			client, err = o.ClientForMappingFn(mapping)
-			if err != nil {
-				return nil, err
-			}
-			helper = resource.NewHelper(client, mapping)
-			helpers[gk] = helper
-		}
-
-		var controller runtime.Object
-		controller, err = helper.Get(ref.namespace, ref.Name, false)
-		if err != nil {
-			return nil, err
-		}
-
-		controllerMeta, err := meta.Accessor(controller)
-		if err != nil {
-			return nil, err
-		}
-
-		controllersLoaded[ref] = controllerMeta
-
-		return controllerMeta, nil
-	}
-
-	targetScaleRefs := make(map[namespacedCrossGroupObjectReference]types.NamespacedName)
-	endpointsInfo := make(map[types.NamespacedName]idleUpdateInfo)
-
-	err := infoVisitor(func(info *resource.Info, err error) error {
-		if err != nil {
-			return err
-		}
-
-		endpoints, isEndpoints := info.Object.(*corev1.Endpoints)
-		if !isEndpoints {
-			return fmt.Errorf("you must specify endpoints, not %v (view available endpoints with \"%s get endpoints\").", info.Mapping.Resource, o.cmdFullName)
-		}
-
-		endpointsName := types.NamespacedName{
-			Namespace: endpoints.Namespace,
-			Name:      endpoints.Name,
-		}
-		scaleRefs, err := findScalableResourcesForEndpoints(endpoints, getPod, getController)
-		if err != nil {
-			return fmt.Errorf("unable to calculate scalable resources for service %s/%s: %v", endpoints.Namespace, endpoints.Name, err)
-		}
-
-		nonNamespacedScaleRefs := make(map[unidlingapi.CrossGroupObjectReference]struct{}, len(scaleRefs))
-
-		for ref := range scaleRefs {
-			nonNamespacedScaleRefs[ref.CrossGroupObjectReference] = struct{}{}
-			targetScaleRefs[ref] = endpointsName
-		}
-
-		idleInfo := idleUpdateInfo{
-			obj:       endpoints,
-			scaleRefs: nonNamespacedScaleRefs,
-		}
-
-		endpointsInfo[endpointsName] = idleInfo
-
-		return nil
-	})
-
-	return endpointsInfo, targetScaleRefs, err
-}
-
-func makeCrossGroupObjRef(ref *metav1.OwnerReference) (unidlingapi.CrossGroupObjectReference, error) {
-	gv, err := schema.ParseGroupVersion(ref.APIVersion)
-	if err != nil {
-		return unidlingapi.CrossGroupObjectReference{}, err
-	}
-
-	return unidlingapi.CrossGroupObjectReference{
-		Kind:  ref.Kind,
-		Name:  ref.Name,
-		Group: gv.Group,
-	}, nil
-}
-
-// namespacedOwnerReference is an OwnerReference with Namespace info,
-// so we differentiate different objects across namespaces.
-type namespacedOwnerReference struct {
-	metav1.OwnerReference
-	namespace string
-}
-
-// namespacedCrossGroupObjectReference is a CrossGroupObjectReference
-// with namespace information attached, so that we can track relevant
-// objects in different namespaces with the same name
-type namespacedCrossGroupObjectReference struct {
-	unidlingapi.CrossGroupObjectReference
-	namespace string
-}
-
-// normalizedNSOwnerRef converts an OwnerReference into an namespacedOwnerReference,
-// and ensure that it's comparable to other owner references (clearing pointer fields, etc)
-func normalizedNSOwnerRef(namespace string, ownerRef *metav1.OwnerReference) namespacedOwnerReference {
-	ref := namespacedOwnerReference{
-		namespace:      namespace,
-		OwnerReference: *ownerRef,
-	}
-
-	ref.Controller = nil
-	ref.BlockOwnerDeletion = nil
-
-	return ref
-}
-
-// findScalableResourcesForEndpoints takes an Endpoints object and looks for the associated
-// scalable objects by checking each address in each subset to see if it has a pod
-// reference, and the following that pod reference to find the owning controller,
-// and returning the unique set of controllers found this way.
-func findScalableResourcesForEndpoints(endpoints *corev1.Endpoints, getPod func(corev1.ObjectReference) (*corev1.Pod, error), getController func(namespacedOwnerReference) (metav1.Object, error)) (map[namespacedCrossGroupObjectReference]struct{}, error) {
-	// To find all RCs and DCs for an endpoint, we first figure out which pods are pointed to by that endpoint...
-	podRefs := map[corev1.ObjectReference]*corev1.Pod{}
-	for _, subset := range endpoints.Subsets {
-		for _, addr := range subset.Addresses {
-			if addr.TargetRef != nil && addr.TargetRef.Kind == "Pod" {
-				pod, err := getPod(*addr.TargetRef)
-				if err != nil && !errors.IsNotFound(err) {
-					return nil, fmt.Errorf("unable to find controller for pod %s/%s: %v", addr.TargetRef.Namespace, addr.TargetRef.Name, err)
-				}
-
-				if pod != nil {
-					podRefs[*addr.TargetRef] = pod
-				}
-			}
-		}
-	}
-
-	// ... then, for each pod, we check the controller, and find the set of unique controllers...
-	immediateControllerRefs := make(map[namespacedOwnerReference]struct{})
-	for _, pod := range podRefs {
-		controllerRef := metav1.GetControllerOf(pod)
-		ref := normalizedNSOwnerRef(pod.Namespace, controllerRef)
-		if controllerRef == nil {
-			return nil, fmt.Errorf("unable to find controller for pod %s/%s: no creator reference listed", pod.Namespace, pod.Name)
-		}
-		immediateControllerRefs[ref] = struct{}{}
-	}
-
-	// ... finally, for each controller, we load it, and see if there is a corresponding owner (to cover cases like DCs, Deployments, etc)
-	controllerRefs := make(map[namespacedCrossGroupObjectReference]struct{})
-	for controllerRef := range immediateControllerRefs {
-		controller, err := getController(controllerRef)
-		if err != nil && errors.IsNotFound(err) {
-			return nil, fmt.Errorf("unable to load %s %q: %v", controllerRef.Kind, controllerRef.Name, err)
-		}
-
-		if controller != nil {
-			var parentControllerRef *metav1.OwnerReference
-			parentControllerRef = metav1.GetControllerOf(controller)
-			var crossGroupObjRef unidlingapi.CrossGroupObjectReference
-			if parentControllerRef == nil {
-				// if this is just a plain RC, use it
-				crossGroupObjRef, err = makeCrossGroupObjRef(&controllerRef.OwnerReference)
-			} else {
-				crossGroupObjRef, err = makeCrossGroupObjRef(parentControllerRef)
-			}
-
-			if err != nil {
-				return nil, fmt.Errorf("unable to load the creator of %s %q: %v", controllerRef.Kind, controllerRef.Name, err)
-			}
-			controllerRefs[namespacedCrossGroupObjectReference{
-				CrossGroupObjectReference: crossGroupObjRef,
-				namespace:                 controllerRef.namespace,
-			}] = struct{}{}
-		}
-	}
-
-	return controllerRefs, nil
-}
-
-// pairScalesWithScaleRefs takes some subresource references, a map of new scales for those subresource references,
-// and annotations from an existing object.  It merges the scales and references found in the existing annotations
-// with the new data (using the new scale in case of conflict if present and not 0, and the old scale otherwise),
-// and returns a slice of RecordedScaleReferences suitable for using as the new annotation value.
-func pairScalesWithScaleRefs(serviceName types.NamespacedName, annotations map[string]string, rawScaleRefs map[unidlingapi.CrossGroupObjectReference]struct{}, scales map[namespacedCrossGroupObjectReference]int32) ([]unidlingapi.RecordedScaleReference, error) {
-	oldTargetsRaw, hasOldTargets := annotations[unidlingapi.UnidleTargetAnnotation]
-
-	scaleRefs := make([]unidlingapi.RecordedScaleReference, 0, len(rawScaleRefs))
-
-	// initialize the list of new annotations
-	for rawScaleRef := range rawScaleRefs {
-		scaleRefs = append(scaleRefs, unidlingapi.RecordedScaleReference{
-			CrossGroupObjectReference: rawScaleRef,
-			Replicas:                  0,
-		})
-	}
-
-	// if the new preserved scale would be 0, see if we have an old scale that we can use instead
-	if hasOldTargets {
-		var oldTargets []unidlingapi.RecordedScaleReference
-		oldTargetsSet := make(map[unidlingapi.CrossGroupObjectReference]int)
-		if err := json.Unmarshal([]byte(oldTargetsRaw), &oldTargets); err != nil {
-			return nil, fmt.Errorf("unable to extract existing scale information from endpoints %s: %v", serviceName.String(), err)
-		}
-
-		for i, target := range oldTargets {
-			oldTargetsSet[target.CrossGroupObjectReference] = i
-		}
-
-		// figure out which new targets were already present...
-		for _, newScaleRef := range scaleRefs {
-			if oldTargetInd, ok := oldTargetsSet[newScaleRef.CrossGroupObjectReference]; ok {
-				namespacedScaleRef := namespacedCrossGroupObjectReference{
-					CrossGroupObjectReference: newScaleRef.CrossGroupObjectReference,
-					namespace:                 serviceName.Namespace,
-				}
-				if newScale, ok := scales[namespacedScaleRef]; !ok || newScale == 0 {
-					scales[namespacedScaleRef] = oldTargets[oldTargetInd].Replicas
-				}
-				delete(oldTargetsSet, newScaleRef.CrossGroupObjectReference)
-			}
-		}
-
-		// ...and add in any existing targets not already on the new list to the new list
-		for _, ind := range oldTargetsSet {
-			scaleRefs = append(scaleRefs, oldTargets[ind])
-		}
-	}
-
-	for i := range scaleRefs {
-		scaleRef := &scaleRefs[i]
-		namespacedScaleRef := namespacedCrossGroupObjectReference{
-			CrossGroupObjectReference: scaleRef.CrossGroupObjectReference,
-			namespace:                 serviceName.Namespace,
-		}
-		newScale, ok := scales[namespacedScaleRef]
-		if !ok || newScale == 0 {
-			newScale = 1
-			if scaleRef.Replicas != 0 {
-				newScale = scaleRef.Replicas
-			}
-		}
-
-		scaleRef.Replicas = newScale
-	}
-
-	return scaleRefs, nil
-}
-
-// setIdleAnnotations sets the given annotation on the given object to the marshaled list of CrossGroupObjectReferences
-func setIdleAnnotations(annotations map[string]string, scaleRefs []unidlingapi.RecordedScaleReference, nowTime time.Time) error {
-	var scaleRefsBytes []byte
-	var err error
-	if scaleRefsBytes, err = json.Marshal(scaleRefs); err != nil {
-		return err
-	}
-
-	annotations[unidlingapi.UnidleTargetAnnotation] = string(scaleRefsBytes)
-	annotations[unidlingapi.IdledAtAnnotation] = nowTime.Format(time.RFC3339)
-
-	return nil
-}
-
-// patchObj patches calculates a patch between the given new object and the existing marshaled object
-func patchObj(obj runtime.Object, metadata metav1.Object, oldData []byte, mapping *meta.RESTMapping, clientForMapping resource.RESTClient) (runtime.Object, error) {
-	versionedObj, err := scheme.Scheme.ConvertToVersion(obj, schema.GroupVersions{mapping.GroupVersionKind.GroupVersion()})
-	if err != nil {
-		return nil, err
-	}
-	newData, err := json.Marshal(versionedObj)
-	if err != nil {
-		return nil, err
-	}
-
-	patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, versionedObj)
-	if err != nil {
-		return nil, err
-	}
-
-	helper := resource.NewHelper(clientForMapping, mapping)
-
-	return helper.Patch(metadata.GetNamespace(), metadata.GetName(), types.StrategicMergePatchType, patchBytes, &metav1.PatchOptions{})
-}
-
-type scaleInfo struct {
-	namespace string
-	scale     *autoscalingv1.Scale
-	obj       runtime.Object
-}
-
-// RunIdle runs the idling command logic, taking a list of resources or services in a file, scaling the associated
-// scalable resources to zero, and annotating the associated endpoints objects with the scalable resources to unidle
-// when they receive traffic.
-func (o *IdleOptions) RunIdle() error {
-	clusterNetwork, err := o.OperatorClient.OperatorV1().Networks().Get("cluster", metav1.GetOptions{})
-	if err == nil {
-		sdnType := clusterNetwork.Spec.DefaultNetwork.Type
-
-		if sdnType == operatorv1.NetworkTypeOpenShiftSDN {
-			fmt.Fprintln(o.ErrOut, "WARNING: idling when network policies are in place may cause connections to bypass network policy entirely")
-		}
-	}
-
-	b := o.Builder().
-		WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...).
-		ContinueOnError().
-		NamespaceParam(o.Namespace).DefaultNamespace().AllNamespaces(o.allNamespaces).
-		Flatten().
-		SingleResourceType()
-
-	if len(o.filename) > 0 {
-		targetServiceNames, err := scanLinesFromFile(o.filename)
-		if err != nil {
-			return err
-		}
-		b.ResourceNames("endpoints", targetServiceNames...)
-	} else {
-		// NB: this is a bit weird because the resource builder will complain if we use ResourceTypes and ResourceNames when len(args) > 0
-		if o.selector != "" {
-			b.LabelSelectorParam(o.selector).ResourceTypes("endpoints")
-		}
-
-		b.ResourceNames("endpoints", o.resources...)
-
-		if o.all {
-			b.ResourceTypes("endpoints").SelectAllParam(o.all)
-		}
-	}
-
-	hadError := false
-	nowTime := time.Now().UTC()
-
-	dryRunText := ""
-	if o.dryRun {
-		dryRunText = "(dry run)"
-	}
-
-	// figure out which endpoints and resources we need to idle
-	byService, byScalable, err := o.calculateIdlableAnnotationsByService(b.Do().Visit)
-
-	if err != nil {
-		if len(byService) == 0 || len(byScalable) == 0 {
-			return fmt.Errorf("no valid scalable resources found to idle: %v", err)
-		}
-		fmt.Fprintf(o.ErrOut, "warning: continuing on for valid scalable resources, but an error occurred while finding scalable resources to idle: %v", err)
-	}
-
-	scaleAnnotater := unidlingclient.NewScaleAnnotater(o.ScaleClient, o.Mapper, o.AppClient.AppsV1(), o.ClientSet.CoreV1(), func(currentReplicas int32, annotations map[string]string) {
-		annotations[unidlingapi.IdledAtAnnotation] = nowTime.UTC().Format(time.RFC3339)
-		annotations[unidlingapi.PreviousScaleAnnotation] = fmt.Sprintf("%v", currentReplicas)
-	})
-
-	replicas := make(map[namespacedCrossGroupObjectReference]int32, len(byScalable))
-	toScale := make(map[namespacedCrossGroupObjectReference]scaleInfo)
-
-	// first, collect the scale info
-	for scaleRef, svcName := range byScalable {
-		obj, scale, err := scaleAnnotater.GetObjectWithScale(svcName.Namespace, scaleRef.CrossGroupObjectReference)
-		if err != nil {
-			fmt.Fprintf(o.ErrOut, "error: unable to get scale for %s %s/%s, not marking that scalable as idled: %v\n", scaleRef.Kind, svcName.Namespace, scaleRef.Name, err)
-			svcInfo := byService[svcName]
-			delete(svcInfo.scaleRefs, scaleRef.CrossGroupObjectReference)
-			hadError = true
-			continue
-		}
-		replicas[scaleRef] = scale.Spec.Replicas
-		toScale[scaleRef] = scaleInfo{scale: scale, obj: obj, namespace: svcName.Namespace}
-	}
-
-	// annotate the endpoints objects to indicate which scalable resources need to be unidled on traffic
-	for serviceName, info := range byService {
-		if info.obj.Annotations == nil {
-			info.obj.Annotations = make(map[string]string)
-		}
-		refsWithScale, err := pairScalesWithScaleRefs(serviceName, info.obj.Annotations, info.scaleRefs, replicas)
-		if err != nil {
-			fmt.Fprintf(o.ErrOut, "error: unable to mark service %q as idled: %v", serviceName.String(), err)
-			continue
-		}
-
-		if !o.dryRun {
-			if len(info.scaleRefs) == 0 {
-				fmt.Fprintf(o.ErrOut, "error: unable to mark the service %q as idled.\n", serviceName.String())
-				fmt.Fprintf(o.ErrOut, "Make sure that the service is not already marked as idled and that it is associated with resources that can be scaled.\n")
-				fmt.Fprintf(o.ErrOut, "See 'oc idle -h' for help and examples.\n")
-				hadError = true
-				continue
-			}
-
-			metadata, err := meta.Accessor(info.obj)
-			if err != nil {
-				fmt.Fprintf(o.ErrOut, "error: unable to mark service %q as idled: %v", serviceName.String(), err)
-				hadError = true
-				continue
-			}
-
-			gvks, _, err := scheme.Scheme.ObjectKinds(info.obj)
-			if err != nil {
-				fmt.Fprintf(o.ErrOut, "error: unable to mark service %q as idled: %v", serviceName.String(), err)
-				hadError = true
-				continue
-			}
-			// we need a versioned obj to properly marshal to JSON, so that we can compute the patch
-			mapping, err := o.Mapper.RESTMapping(gvks[0].GroupKind(), gvks[0].Version)
-			if err != nil {
-				fmt.Fprintf(o.ErrOut, "error: unable to mark service %q as idled: %v", serviceName.String(), err)
-				hadError = true
-				continue
-			}
-
-			oldData, err := json.Marshal(info.obj)
-			if err != nil {
-				fmt.Fprintf(o.ErrOut, "error: unable to mark service %q as idled: %v", serviceName.String(), err)
-				hadError = true
-				continue
-			}
-
-			clientForMapping, err := o.ClientForMappingFn(mapping)
-
-			if err = setIdleAnnotations(info.obj.Annotations, refsWithScale, nowTime); err != nil {
-				fmt.Fprintf(o.ErrOut, "error: unable to mark service %q as idled: %v", serviceName.String(), err)
-				hadError = true
-				continue
-			}
-			if _, err := patchObj(info.obj, metadata, oldData, mapping, clientForMapping); err != nil {
-				fmt.Fprintf(o.ErrOut, "error: unable to mark service %q as idled: %v", serviceName.String(), err)
-				hadError = true
-				continue
-			}
-		}
-
-		fmt.Fprintf(o.Out, "The service %q has been marked as idled %s\n", serviceName.String(), dryRunText)
-
-		for _, scaleRef := range refsWithScale {
-			fmt.Fprintf(o.Out, "The service will unidle %s \"%s/%s\" to %v replicas once it receives traffic %s\n", scaleRef.Kind, serviceName.Namespace, scaleRef.Name, scaleRef.Replicas, dryRunText)
-		}
-	}
-
-	// actually "idle" the scalable resources by scaling them down to zero
-	// (scale down to zero *after* we've applied the annotation so that we don't miss any traffic)
-	for scaleRef, info := range toScale {
-		if !o.dryRun {
-			info.scale.Spec.Replicas = 0
-			scaleUpdater := unidlingclient.NewScaleUpdater(scheme.DefaultJSONEncoder(), info.namespace, o.AppClient.AppsV1(), o.ClientSet.CoreV1())
-			if err := scaleAnnotater.UpdateObjectScale(scaleUpdater, info.namespace, scaleRef.CrossGroupObjectReference, info.obj, info.scale); err != nil {
-				fmt.Fprintf(o.ErrOut, "error: unable to scale %s %s/%s to 0, but still listed as target for unidling: %v\n", scaleRef.Kind, info.namespace, scaleRef.Name, err)
-				hadError = true
-				continue
-			}
-		}
-
-		fmt.Fprintf(o.Out, "%s \"%s/%s\" has been idled %s\n", scaleRef.Kind, info.namespace, scaleRef.Name, dryRunText)
-	}
-
-	if hadError {
-		return kcmdutil.ErrExit
-	}
-
-	return nil
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/idle/idle_test.go b/vendor/github.com/openshift/oc/pkg/cli/idle/idle_test.go
deleted file mode 100644
index 6612e36b9151..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/idle/idle_test.go
+++ /dev/null
@@ -1,325 +0,0 @@
-package idle
-
-import (
-	"encoding/json"
-	"fmt"
-	"testing"
-
-	unidlingapi "github.com/openshift/api/unidling/v1alpha1"
-
-	corev1 "k8s.io/api/core/v1"
-	kerrors "k8s.io/apimachinery/pkg/api/errors"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/runtime/schema"
-	ktypes "k8s.io/apimachinery/pkg/types"
-)
-
-func makePod(name string, rc metav1.Object, namespace string, t *testing.T) corev1.Pod {
-	pod := corev1.Pod{
-		ObjectMeta: metav1.ObjectMeta{
-			Name:      name,
-			Namespace: namespace,
-		},
-	}
-	pod.OwnerReferences = append(pod.OwnerReferences,
-		*metav1.NewControllerRef(rc, corev1.SchemeGroupVersion.WithKind("ReplicationController")))
-
-	return pod
-}
-
-func makeRC(name string, dc metav1.Object, namespace string, t *testing.T) *corev1.ReplicationController {
-	rc := corev1.ReplicationController{
-		ObjectMeta: metav1.ObjectMeta{
-			Name:        name,
-			Namespace:   namespace,
-			Annotations: make(map[string]string),
-		},
-	}
-
-	if dc != nil {
-		rc.OwnerReferences = append(rc.OwnerReferences, *metav1.NewControllerRef(dc,
-			schema.GroupVersion{Group: "apps.openshift.io", Version: "__internal"}.WithKind("DeploymentConfig")))
-	}
-
-	return &rc
-}
-
-func makePodRef(name, namespace string) *corev1.ObjectReference {
-	return &corev1.ObjectReference{
-		Kind:      "Pod",
-		Name:      name,
-		Namespace: namespace,
-	}
-}
-
-func makeRCRef(name string) *metav1.OwnerReference {
-	return metav1.NewControllerRef(&metav1.ObjectMeta{Name: name},
-		corev1.SchemeGroupVersion.WithKind("ReplicationController"))
-}
-
-func TestFindIdlablesForEndpoints(t *testing.T) {
-	endpoints := &corev1.Endpoints{
-		Subsets: []corev1.EndpointSubset{
-			{
-				Addresses: []corev1.EndpointAddress{
-					{
-						TargetRef: makePodRef("somepod1", "somens1"),
-					},
-					{
-						TargetRef: makePodRef("somepod2", "somens1"),
-					},
-					{
-						TargetRef: &corev1.ObjectReference{
-							Kind:      "Cheese",
-							Name:      "cheddar",
-							Namespace: "somens",
-						},
-					},
-				},
-			},
-			{
-				Addresses: []corev1.EndpointAddress{
-					{},
-					{
-						TargetRef: makePodRef("somepod3", "somens1"),
-					},
-					{
-						TargetRef: makePodRef("somepod4", "somens1"),
-					},
-					{
-						TargetRef: makePodRef("somepod5", "somens1"),
-					},
-					{
-						TargetRef: makePodRef("missingpod", "somens1"),
-					},
-				},
-			},
-			{
-				Addresses: []corev1.EndpointAddress{
-					{},
-					{
-						TargetRef: makePodRef("somepod1", "somens2"),
-					},
-				},
-			},
-		},
-	}
-
-	controllers := map[string]metav1.Object{
-		"somens1/somerc1": makeRC("somerc1", &metav1.ObjectMeta{Name: "somedc1"}, "somens1", t),
-		"somens1/somerc2": makeRC("somerc2", nil, "somens1", t),
-		"somens1/somerc3": makeRC("somerc3", &metav1.ObjectMeta{Name: "somedc2"}, "somens1", t),
-		"somens1/somerc4": makeRC("somerc4", &metav1.ObjectMeta{Name: "somedc2"}, "somens1", t),
-		// make sure we test having multiple namespaces with identically-named RCs
-		"somens2/somerc2": makeRC("somerc2", nil, "somens2", t),
-	}
-
-	pods := map[corev1.ObjectReference]corev1.Pod{
-		*makePodRef("somepod1", "somens1"): makePod("somepod1", controllers["somens1/somerc1"], "somens1", t),
-		*makePodRef("somepod2", "somens1"): makePod("somepod2", controllers["somens1/somerc2"], "somens1", t),
-		*makePodRef("somepod3", "somens1"): makePod("somepod3", controllers["somens1/somerc1"], "somens1", t),
-		*makePodRef("somepod4", "somens1"): makePod("somepod4", controllers["somens1/somerc3"], "somens1", t),
-		*makePodRef("somepod5", "somens1"): makePod("somepod5", controllers["somens1/somerc4"], "somens1", t),
-		*makePodRef("somepod1", "somens2"): makePod("somepod5", controllers["somens2/somerc2"], "somens2", t),
-	}
-
-	getPod := func(ref corev1.ObjectReference) (*corev1.Pod, error) {
-		if pod, ok := pods[ref]; ok {
-			return &pod, nil
-		}
-		return nil, kerrors.NewNotFound(schema.GroupResource{Group: corev1.GroupName, Resource: "Pod"}, ref.Name)
-	}
-
-	getController := func(ref namespacedOwnerReference) (metav1.Object, error) {
-		if controller, ok := controllers[fmt.Sprintf("%s/%s", ref.namespace, ref.Name)]; ok {
-			return controller, nil
-		}
-
-		// NB: this GroupResource declaration plays fast and loose with various distinctions
-		// but is good enough for being an error in a test
-		return nil, kerrors.NewNotFound(schema.GroupResource{Group: corev1.GroupName, Resource: ref.Kind}, ref.Name)
-
-	}
-
-	refSet, err := findScalableResourcesForEndpoints(endpoints, getPod, getController)
-
-	if err != nil {
-		t.Fatalf("Unexpected error while finding idlables: %v", err)
-	}
-
-	expectedRefs := []namespacedCrossGroupObjectReference{
-		{
-			CrossGroupObjectReference: unidlingapi.CrossGroupObjectReference{
-				Kind:  "DeploymentConfig",
-				Name:  "somedc1",
-				Group: "apps.openshift.io",
-			},
-			namespace: "somens1",
-		},
-		{
-			CrossGroupObjectReference: unidlingapi.CrossGroupObjectReference{
-				Kind:  "DeploymentConfig",
-				Name:  "somedc2",
-				Group: "apps.openshift.io",
-			},
-			namespace: "somens1",
-		},
-		{
-			CrossGroupObjectReference: unidlingapi.CrossGroupObjectReference{
-				Kind:  "ReplicationController",
-				Name:  "somerc2",
-				Group: corev1.GroupName,
-			},
-			namespace: "somens1",
-		},
-		{
-			CrossGroupObjectReference: unidlingapi.CrossGroupObjectReference{
-				Kind:  "ReplicationController",
-				Name:  "somerc2",
-				Group: corev1.GroupName,
-			},
-			namespace: "somens2",
-		},
-	}
-
-	if len(refSet) != len(expectedRefs) {
-		t.Errorf("Expected to get somedc1, somedc2, somerc2, instead got %#v", refSet)
-	}
-
-	for _, ref := range expectedRefs {
-		if _, ok := refSet[ref]; !ok {
-			t.Errorf("expected scalable %q to be present, but was not in %v", ref.Name, refSet)
-		}
-	}
-}
-
-func TestPairScalesWithIdlables(t *testing.T) {
-	oldScaleRefs := []unidlingapi.RecordedScaleReference{
-		{
-			CrossGroupObjectReference: unidlingapi.CrossGroupObjectReference{
-				Kind: "ReplicationController",
-				Name: "somerc1",
-			},
-			Replicas: 5,
-		},
-		{
-			CrossGroupObjectReference: unidlingapi.CrossGroupObjectReference{
-				Kind: "DeploymentConfig",
-				Name: "somedc1",
-			},
-			Replicas: 3,
-		},
-	}
-
-	oldScaleRefBytes, err := json.Marshal(oldScaleRefs)
-	if err != nil {
-		t.Fatalf("Unexpected error: %v", err)
-	}
-	oldAnnotations := map[string]string{
-		unidlingapi.UnidleTargetAnnotation: string(oldScaleRefBytes),
-	}
-
-	newRawRefs := map[unidlingapi.CrossGroupObjectReference]struct{}{
-		{
-			Kind: "ReplicationController",
-			Name: "somerc1",
-		}: {},
-		{
-			Kind: "ReplicationController",
-			Name: "somerc2",
-		}: {},
-		{
-			Kind: "DeploymentConfig",
-			Name: "somedc1",
-		}: {},
-		{
-			Kind: "DeploymentConfig",
-			Name: "somedc2",
-		}: {},
-	}
-
-	scales := map[namespacedCrossGroupObjectReference]int32{
-		{
-			CrossGroupObjectReference: unidlingapi.CrossGroupObjectReference{
-				Kind: "ReplicationController",
-				Name: "somerc1",
-			},
-			namespace: "somens1",
-		}: 2,
-		{
-			CrossGroupObjectReference: unidlingapi.CrossGroupObjectReference{
-				Kind: "ReplicationController",
-				Name: "somerc1",
-			},
-			namespace: "somens2",
-		}: 3,
-		{
-			CrossGroupObjectReference: unidlingapi.CrossGroupObjectReference{
-				Kind: "ReplicationController",
-				Name: "somerc2",
-			},
-			namespace: "somens1",
-		}: 5,
-		{
-			CrossGroupObjectReference: unidlingapi.CrossGroupObjectReference{
-				Kind: "DeploymentConfig",
-				Name: "somedc1",
-			},
-			namespace: "somens1",
-		}: 0,
-		{
-			CrossGroupObjectReference: unidlingapi.CrossGroupObjectReference{
-				Kind: "DeploymentConfig",
-				Name: "somedc2",
-			},
-			namespace: "somens1",
-		}: 0,
-	}
-
-	newScaleRefs, err := pairScalesWithScaleRefs(ktypes.NamespacedName{Name: "somesvc", Namespace: "somens1"}, oldAnnotations, newRawRefs, scales)
-
-	expectedScaleRefs := map[unidlingapi.RecordedScaleReference]struct{}{
-		{
-			CrossGroupObjectReference: unidlingapi.CrossGroupObjectReference{
-				Kind: "ReplicationController",
-				Name: "somerc1",
-			},
-			Replicas: 2,
-		}: {},
-		{
-			CrossGroupObjectReference: unidlingapi.CrossGroupObjectReference{
-				Kind: "ReplicationController",
-				Name: "somerc2",
-			},
-			Replicas: 5,
-		}: {},
-		{
-			CrossGroupObjectReference: unidlingapi.CrossGroupObjectReference{
-				Kind: "DeploymentConfig",
-				Name: "somedc1",
-			},
-			Replicas: 3,
-		}: {},
-		{
-			CrossGroupObjectReference: unidlingapi.CrossGroupObjectReference{
-				Kind: "DeploymentConfig",
-				Name: "somedc2",
-			},
-			Replicas: 1,
-		}: {},
-	}
-
-	if err != nil {
-		t.Fatalf("Unexpected error while generating new annotation value: %v", err)
-	}
-
-	if len(newScaleRefs) != len(expectedScaleRefs) {
-		t.Fatalf("Expected new recorded scale references of %#v, got %#v", expectedScaleRefs, newScaleRefs)
-	}
-
-	for _, scaleRef := range newScaleRefs {
-		if _, wasPresent := expectedScaleRefs[scaleRef]; !wasPresent {
-			t.Errorf("Unexpected recorded scale reference %#v found in the output", scaleRef)
-		}
-	}
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/image/append/append.go b/vendor/github.com/openshift/oc/pkg/cli/image/append/append.go
deleted file mode 100644
index 4ebff98b6f01..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/image/append/append.go
+++ /dev/null
@@ -1,711 +0,0 @@
-package append
-
-import (
-	"bytes"
-	"context"
-	"encoding/json"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"net/http"
-	"os"
-	"strconv"
-	"time"
-
-	units "github.com/docker/go-units"
-	"github.com/spf13/cobra"
-	"k8s.io/klog"
-
-	"github.com/docker/distribution"
-	"github.com/docker/distribution/manifest/schema2"
-	"github.com/docker/distribution/reference"
-	"github.com/docker/distribution/registry/client"
-	digest "github.com/opencontainers/go-digest"
-
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-
-	"github.com/openshift/api/image/docker10"
-	"github.com/openshift/library-go/pkg/image/dockerv1client"
-	imagereference "github.com/openshift/library-go/pkg/image/reference"
-	"github.com/openshift/library-go/pkg/image/registryclient"
-	imagemanifest "github.com/openshift/oc/pkg/cli/image/manifest"
-	"github.com/openshift/oc/pkg/cli/image/workqueue"
-	"github.com/openshift/oc/pkg/helpers/image/dockerlayer"
-	"github.com/openshift/oc/pkg/helpers/image/dockerlayer/add"
-)
-
-var (
-	desc = templates.LongDesc(`
-		Add layers to container images
-
-		Modifies an existing image by adding layers or changing configuration and then pushes that
-		image to a remote registry. Any inherited layers are streamed from registry to registry 
-		without being stored locally. The default docker credentials are used for authenticating 
-		to the registries.
-
-		Layers may be provided as arguments to the command and must each be a gzipped tar archive
-		representing a filesystem overlay to the inherited images. The archive may contain a "whiteout"
-		file (the prefix '.wh.' and the filename) which will hide files in the lower layers. All
-		supported filesystem attributes present in the archive will be used as is.
-
-		Metadata about the image (the configuration passed to the container runtime) may be altered
-		by passing a JSON string to the --image or --meta options. The --image flag changes what
-		the container runtime sees, while the --meta option allows you to change the attributes of
-		the image used by the runtime. Use --dry-run to see the result of your changes. You may
-		add the --drop-history flag to remove information from the image about the system that 
-		built the base image.
-
-		Images in manifest list format will automatically select an image that matches the current
-		operating system and architecture unless you use --filter-by-os to select a different image.
-		This flag has no effect on regular images.
-		`)
-
-	example = templates.Examples(`
-# Remove the entrypoint on the mysql:latest image
-%[1]s --from mysql:latest --to myregistry.com/myimage:latest --image {"Entrypoint":null}
-
-# Add a new layer to the image
-%[1]s --from mysql:latest --to myregistry.com/myimage:latest layer.tar.gz
-`)
-)
-
-type AppendImageOptions struct {
-	From, To    string
-	LayerFiles  []string
-	LayerStream io.Reader
-
-	ConfigPatch string
-	MetaPatch   string
-
-	ConfigurationCallback func(dgst, contentDigest digest.Digest, config *dockerv1client.DockerImageConfig) error
-	// ToDigest is set after a new image is uploaded
-	ToDigest digest.Digest
-
-	DropHistory bool
-	CreatedAt   string
-
-	SecurityOptions imagemanifest.SecurityOptions
-	FilterOptions   imagemanifest.FilterOptions
-	ParallelOptions imagemanifest.ParallelOptions
-
-	DryRun bool
-	Force  bool
-
-	genericclioptions.IOStreams
-}
-
-func NewAppendImageOptions(streams genericclioptions.IOStreams) *AppendImageOptions {
-	return &AppendImageOptions{
-		IOStreams:       streams,
-		ParallelOptions: imagemanifest.ParallelOptions{MaxPerRegistry: 4},
-	}
-}
-
-// New creates a new command
-func NewCmdAppendImage(name string, streams genericclioptions.IOStreams) *cobra.Command {
-	o := NewAppendImageOptions(streams)
-
-	cmd := &cobra.Command{
-		Use:     "append",
-		Short:   "Add layers to images and push them to a registry",
-		Long:    desc,
-		Example: fmt.Sprintf(example, name+" append"),
-		Run: func(c *cobra.Command, args []string) {
-			kcmdutil.CheckErr(o.Complete(c, args))
-			kcmdutil.CheckErr(o.Validate())
-			kcmdutil.CheckErr(o.Run())
-		},
-	}
-
-	flag := cmd.Flags()
-	o.SecurityOptions.Bind(flag)
-	o.FilterOptions.Bind(flag)
-	o.ParallelOptions.Bind(flag)
-
-	flag.BoolVar(&o.DryRun, "dry-run", o.DryRun, "Print the actions that would be taken and exit without writing to the destination.")
-
-	flag.StringVar(&o.From, "from", o.From, "The image to use as a base. If empty, a new scratch image is created.")
-	flag.StringVar(&o.To, "to", o.To, "The Docker repository tag to upload the appended image to.")
-
-	flag.StringVar(&o.ConfigPatch, "image", o.ConfigPatch, "A JSON patch that will be used with the output image data.")
-	flag.StringVar(&o.MetaPatch, "meta", o.MetaPatch, "A JSON patch that will be used with image base metadata (advanced config).")
-	flag.BoolVar(&o.DropHistory, "drop-history", o.DropHistory, "Fields on the image that relate to the history of how the image was created will be removed.")
-	flag.StringVar(&o.CreatedAt, "created-at", o.CreatedAt, "The creation date for this image, in RFC3339 format or milliseconds from the Unix epoch.")
-
-	flag.BoolVar(&o.Force, "force", o.Force, "If set, the command will attempt to upload all layers instead of skipping those that are already uploaded.")
-
-	return cmd
-}
-
-func (o *AppendImageOptions) Complete(cmd *cobra.Command, args []string) error {
-	if err := o.FilterOptions.Complete(cmd.Flags()); err != nil {
-		return err
-	}
-
-	for _, arg := range args {
-		if arg == "-" {
-			if o.LayerStream != nil {
-				return fmt.Errorf("you may only specify '-' as an argument one time")
-			}
-			o.LayerStream = o.In
-			continue
-		}
-		fi, err := os.Stat(arg)
-		if err != nil {
-			return fmt.Errorf("invalid argument: %s", err)
-		}
-		if fi.IsDir() {
-			return fmt.Errorf("invalid argument: %s is a directory", arg)
-		}
-		o.LayerFiles = append(o.LayerFiles, arg)
-	}
-
-	return nil
-}
-
-func (o *AppendImageOptions) Validate() error {
-	return o.FilterOptions.Validate()
-}
-
-func (o *AppendImageOptions) Run() error {
-	var createdAt *time.Time
-	if len(o.CreatedAt) > 0 {
-		if d, err := strconv.ParseInt(o.CreatedAt, 10, 64); err == nil {
-			t := time.Unix(d/1000, (d%1000)*1000000).UTC()
-			createdAt = &t
-		} else {
-			t, err := time.Parse(time.RFC3339, o.CreatedAt)
-			if err != nil {
-				return fmt.Errorf("--created-at must be a relative time (2m, -5h) or an RFC3339 formatted date")
-			}
-			createdAt = &t
-		}
-	}
-
-	var from *imagereference.DockerImageReference
-	if len(o.From) > 0 {
-		src, err := imagereference.Parse(o.From)
-		if err != nil {
-			return err
-		}
-		if len(src.Tag) == 0 && len(src.ID) == 0 {
-			return fmt.Errorf("--from must point to an image ID or image tag")
-		}
-		from = &src
-	}
-	to, err := imagereference.Parse(o.To)
-	if err != nil {
-		return err
-	}
-	if len(to.ID) > 0 {
-		return fmt.Errorf("--to may not point to an image by ID")
-	}
-
-	ctx := context.Background()
-	fromContext, err := o.SecurityOptions.Context()
-	if err != nil {
-		return err
-	}
-	toContext := fromContext.Copy().WithActions("pull", "push")
-
-	toRepo, err := toContext.Repository(ctx, to.DockerClientDefaults().RegistryURL(), to.RepositoryName(), o.SecurityOptions.Insecure)
-	if err != nil {
-		return err
-	}
-	toManifests, err := toRepo.Manifests(ctx)
-	if err != nil {
-		return err
-	}
-
-	var (
-		base              *dockerv1client.DockerImageConfig
-		baseDigest        digest.Digest
-		baseContentDigest digest.Digest
-		layers            []distribution.Descriptor
-		fromRepo          distribution.Repository
-	)
-	if from != nil {
-		repo, err := fromContext.Repository(ctx, from.DockerClientDefaults().RegistryURL(), from.RepositoryName(), o.SecurityOptions.Insecure)
-		if err != nil {
-			return err
-		}
-		fromRepo = repo
-
-		srcManifest, manifestLocation, err := imagemanifest.FirstManifest(ctx, *from, repo, o.FilterOptions.Include)
-		if err != nil {
-			return fmt.Errorf("unable to read image %s: %v", from, err)
-		}
-		base, layers, err = imagemanifest.ManifestToImageConfig(ctx, srcManifest, repo.Blobs(ctx), manifestLocation)
-		if err != nil {
-			return fmt.Errorf("unable to parse image %s: %v", from, err)
-		}
-
-		contentDigest, err := registryclient.ContentDigestForManifest(srcManifest, manifestLocation.Manifest.Algorithm())
-		if err != nil {
-			return err
-		}
-
-		baseDigest = manifestLocation.Manifest
-		baseContentDigest = contentDigest
-
-	} else {
-		base = add.NewEmptyConfig()
-		layers = nil
-		fromRepo = scratchRepo{}
-	}
-
-	if base.Config == nil {
-		base.Config = &docker10.DockerConfig{}
-	}
-
-	if o.ConfigurationCallback != nil {
-		if err := o.ConfigurationCallback(baseDigest, baseContentDigest, base); err != nil {
-			return err
-		}
-	} else {
-		if klog.V(4) {
-			configJSON, _ := json.MarshalIndent(base, "", "  ")
-			klog.Infof("input config:\n%s\nlayers: %#v", configJSON, layers)
-		}
-
-		base.Parent = ""
-
-		if createdAt == nil {
-			t := time.Now()
-			createdAt = &t
-		}
-		base.Created = *createdAt
-
-		if o.DropHistory {
-			base.ContainerConfig = docker10.DockerConfig{}
-			base.History = nil
-			base.Container = ""
-			base.DockerVersion = ""
-			base.Config.Image = ""
-		}
-		if len(o.ConfigPatch) > 0 {
-			if err := json.Unmarshal([]byte(o.ConfigPatch), base.Config); err != nil {
-				return fmt.Errorf("unable to patch image from --image: %v", err)
-			}
-		}
-		if len(o.MetaPatch) > 0 {
-			if err := json.Unmarshal([]byte(o.MetaPatch), base); err != nil {
-				return fmt.Errorf("unable to patch image from --meta: %v", err)
-			}
-		}
-	}
-
-	if klog.V(4) {
-		configJSON, _ := json.MarshalIndent(base, "", "  ")
-		klog.Infof("output config:\n%s", configJSON)
-	}
-
-	numLayers := len(layers)
-	toBlobs := toRepo.Blobs(ctx)
-
-	for _, arg := range o.LayerFiles {
-		layers, err = appendFileAsLayer(ctx, arg, layers, base, o.DryRun, o.Out, toBlobs)
-		if err != nil {
-			return err
-		}
-	}
-	if o.LayerStream != nil {
-		layers, err = appendLayer(ctx, o.LayerStream, layers, base, o.DryRun, o.Out, toBlobs)
-		if err != nil {
-			return err
-		}
-	}
-	if len(layers) == 0 {
-		layers, err = appendLayer(ctx, bytes.NewBuffer(dockerlayer.GzippedEmptyLayer), layers, base, o.DryRun, o.Out, toBlobs)
-		if err != nil {
-			return err
-		}
-	}
-
-	// all v1 schema images must have a history that equals the number of non-zero blob
-	// layers, but v2 images do not require it
-	for i := len(base.History); i < len(layers); i++ {
-		base.History = append(base.History, dockerv1client.DockerConfigHistory{
-			Created: base.Created,
-		})
-	}
-
-	if o.DryRun {
-		toManifests = &dryRunManifestService{}
-		toBlobs = &dryRunBlobStore{layers: layers}
-	}
-
-	// upload base layers in parallel
-	stopCh := make(chan struct{})
-	defer close(stopCh)
-	q := workqueue.New(o.ParallelOptions.MaxPerRegistry, stopCh)
-	err = q.Try(func(w workqueue.Try) {
-		for i := range layers[:numLayers] {
-			layer := &layers[i]
-			index := i
-			needLayerDigest := len(base.RootFS.DiffIDs[i]) == 0
-			w.Try(func() error {
-				fromBlobs := fromRepo.Blobs(ctx)
-
-				// check whether the blob exists
-				if !o.Force {
-					if desc, err := toBlobs.Stat(ctx, layer.Digest); err == nil {
-						// ensure the correct size makes it back to the manifest
-						klog.V(4).Infof("Layer %s already exists in destination (%s)", layer.Digest, units.HumanSizeWithPrecision(float64(layer.Size), 3))
-						if layer.Size == 0 {
-							layer.Size = desc.Size
-						}
-						// we need to calculate the tar sum from the image, requiring us to pull it
-						if needLayerDigest {
-							klog.V(4).Infof("Need tar sum, streaming layer %s", layer.Digest)
-							r, err := fromBlobs.Open(ctx, layer.Digest)
-							if err != nil {
-								return fmt.Errorf("unable to access the layer %s in order to calculate its content ID: %v", layer.Digest, err)
-							}
-							defer r.Close()
-							layerDigest, _, _, _, err := add.DigestCopy(ioutil.Discard.(io.ReaderFrom), r)
-							if err != nil {
-								return fmt.Errorf("unable to calculate contentID for layer %s: %v", layer.Digest, err)
-							}
-							klog.V(4).Infof("Layer %s has tar sum %s", layer.Digest, layerDigest)
-							base.RootFS.DiffIDs[index] = layerDigest.String()
-						}
-						// TODO: due to a bug in the registry, the empty layer is always returned as existing, but
-						// an upload without it will fail - https://bugzilla.redhat.com/show_bug.cgi?id=1599028
-						if layer.Digest != dockerlayer.GzippedEmptyLayerDigest {
-							return nil
-						}
-					}
-				}
-
-				// copy the blob, calculating layer digest if needed
-				var mountFrom reference.Named
-				if from != nil && from.Registry == to.Registry {
-					mountFrom = fromRepo.Named()
-				}
-				desc, layerDigest, err := copyBlob(ctx, fromBlobs, toBlobs, *layer, o.Out, needLayerDigest, mountFrom)
-				if err != nil {
-					return fmt.Errorf("uploading the source layer %s failed: %v", layer.Digest, err)
-				}
-				if needLayerDigest {
-					base.RootFS.DiffIDs[index] = layerDigest.String()
-				}
-
-				// check output
-				if desc.Digest != layer.Digest {
-					return fmt.Errorf("when uploading blob %s, got a different returned digest %s", desc.Digest, layer.Digest)
-				}
-				// ensure the correct size makes it back to the manifest
-				if layer.Size == 0 {
-					layer.Size = desc.Size
-				}
-				return nil
-			})
-		}
-	})
-	if err != nil {
-		return err
-	}
-
-	manifest, configJSON, err := add.UploadSchema2Config(ctx, toBlobs, base, layers)
-	if err != nil {
-		return fmt.Errorf("unable to upload the new image manifest: %v", err)
-	}
-	klog.V(4).Infof("Created config JSON:\n%s", configJSON)
-	toDigest, err := imagemanifest.PutManifestInCompatibleSchema(ctx, manifest, to.Tag, toManifests, toRepo.Named(), fromRepo.Blobs(ctx), configJSON)
-	if err != nil {
-		return fmt.Errorf("unable to convert the image to a compatible schema version: %v", err)
-	}
-	o.ToDigest = toDigest
-	if !o.DryRun {
-		fmt.Fprintf(o.Out, "Pushed %s to %s\n", toDigest, to)
-	}
-	return nil
-}
-
-// copyBlob attempts to mirror a blob from one repo to another, mounting it if possible, and calculating the
-// layerDigest if needLayerDigest is true (mounting is not possible if we need to calculate a layerDigest).
-func copyBlob(ctx context.Context, fromBlobs, toBlobs distribution.BlobService, layer distribution.Descriptor, out io.Writer, needLayerDigest bool, mountFrom reference.Named) (distribution.Descriptor, digest.Digest, error) {
-	// source
-	r, err := fromBlobs.Open(ctx, layer.Digest)
-	if err != nil {
-		return distribution.Descriptor{}, "", fmt.Errorf("unable to access the source layer %s: %v", layer.Digest, err)
-	}
-	defer r.Close()
-
-	// destination
-	mountOptions := []distribution.BlobCreateOption{WithDescriptor(layer)}
-	if mountFrom != nil && !needLayerDigest {
-		source, err := reference.WithDigest(mountFrom, layer.Digest)
-		if err != nil {
-			return distribution.Descriptor{}, "", err
-		}
-		mountOptions = append(mountOptions, client.WithMountFrom(source))
-	}
-	bw, err := toBlobs.Create(ctx, mountOptions...)
-	if err != nil {
-		switch t := err.(type) {
-		case distribution.ErrBlobMounted:
-			// mount successful
-			klog.V(5).Infof("Blob mounted %#v", layer)
-			if t.From.Digest() != layer.Digest {
-				return distribution.Descriptor{}, "", fmt.Errorf("unable to upload layer %s to destination repository: tried to mount source and got back a different digest %s", layer.Digest, t.From.Digest())
-			}
-			if t.Descriptor.Size > 0 {
-				layer.Size = t.Descriptor.Size
-			}
-			return layer, "", nil
-		default:
-			return distribution.Descriptor{}, "", fmt.Errorf("unable to upload layer %s to destination repository: %v", layer.Digest, err)
-		}
-	}
-	defer bw.Close()
-
-	if layer.Size > 0 {
-		fmt.Fprintf(out, "Uploading %s ...\n", units.HumanSize(float64(layer.Size)))
-	} else {
-		fmt.Fprintf(out, "Uploading ...\n")
-	}
-
-	// copy the blob, calculating the diffID if necessary
-	var layerDigest digest.Digest
-	if needLayerDigest {
-		klog.V(4).Infof("Need tar sum, calculating while streaming %s", layer.Digest)
-		calculatedDigest, _, _, _, err := add.DigestCopy(bw, r)
-		if err != nil {
-			return distribution.Descriptor{}, "", err
-		}
-		layerDigest = calculatedDigest
-		klog.V(4).Infof("Layer %s has tar sum %s", layer.Digest, layerDigest)
-
-	} else {
-		if _, err := bw.ReadFrom(r); err != nil {
-			return distribution.Descriptor{}, "", err
-		}
-	}
-
-	desc, err := bw.Commit(ctx, layer)
-	if err != nil {
-		return distribution.Descriptor{}, "", err
-	}
-	return desc, layerDigest, nil
-}
-
-type optionFunc func(interface{}) error
-
-func (f optionFunc) Apply(v interface{}) error {
-	return f(v)
-}
-
-// WithDescriptor returns a BlobCreateOption which provides the expected blob metadata.
-func WithDescriptor(desc distribution.Descriptor) distribution.BlobCreateOption {
-	return optionFunc(func(v interface{}) error {
-		opts, ok := v.(*distribution.CreateOptions)
-		if !ok {
-			return fmt.Errorf("unexpected options type: %T", v)
-		}
-		if opts.Mount.Stat == nil {
-			opts.Mount.Stat = &desc
-		}
-		return nil
-	})
-}
-
-func appendFileAsLayer(ctx context.Context, name string, layers []distribution.Descriptor, config *dockerv1client.DockerImageConfig, dryRun bool, out io.Writer,
-	blobs distribution.BlobService) ([]distribution.Descriptor, error) {
-	f, err := os.Open(name)
-	if err != nil {
-		return nil, err
-	}
-	defer f.Close()
-	return appendLayer(ctx, f, layers, config, dryRun, out, blobs)
-}
-
-func appendLayer(ctx context.Context, r io.Reader, layers []distribution.Descriptor, config *dockerv1client.DockerImageConfig, dryRun bool, out io.Writer, blobs distribution.BlobService) ([]distribution.Descriptor,
-	error) {
-	var readerFrom io.ReaderFrom = ioutil.Discard.(io.ReaderFrom)
-	var done = func(distribution.Descriptor) error { return nil }
-	if !dryRun {
-		fmt.Fprint(out, "Uploading ... ")
-		start := time.Now()
-		bw, err := blobs.Create(ctx)
-		if err != nil {
-			fmt.Fprintln(out, "failed")
-			return nil, err
-		}
-		readerFrom = bw
-		defer bw.Close()
-		done = func(desc distribution.Descriptor) error {
-			_, err := bw.Commit(ctx, desc)
-			if err != nil {
-				fmt.Fprintln(out, "failed")
-				return err
-			}
-			fmt.Fprintf(out, "%s/s\n", units.HumanSize(float64(desc.Size)/float64(time.Now().Sub(start))*float64(time.Second)))
-			return nil
-		}
-	}
-	layerDigest, blobDigest, modTime, n, err := add.DigestCopy(readerFrom, r)
-	if err != nil {
-		return nil, err
-	}
-	desc := distribution.Descriptor{
-		Digest:    blobDigest,
-		Size:      n,
-		MediaType: schema2.MediaTypeLayer,
-	}
-	layers = append(layers, desc)
-	add.AddLayerToConfig(config, desc, layerDigest.String())
-	if modTime != nil && !modTime.IsZero() {
-		config.Created = *modTime
-	}
-	return layers, done(desc)
-}
-
-func calculateLayerDigest(blobs distribution.BlobService, dgst digest.Digest, readerFrom io.ReaderFrom, r io.Reader) (digest.Digest, error) {
-	if readerFrom == nil {
-		readerFrom = ioutil.Discard.(io.ReaderFrom)
-	}
-	layerDigest, _, _, _, err := add.DigestCopy(readerFrom, r)
-	return layerDigest, err
-}
-
-// scratchRepo can serve the scratch image blob.
-type scratchRepo struct{}
-
-var _ distribution.Repository = scratchRepo{}
-
-func (_ scratchRepo) Named() reference.Named { panic("not implemented") }
-func (_ scratchRepo) Tags(ctx context.Context) distribution.TagService {
-	panic("not implemented")
-}
-func (_ scratchRepo) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) {
-	panic("not implemented")
-}
-
-func (r scratchRepo) Blobs(ctx context.Context) distribution.BlobStore { return r }
-
-func (_ scratchRepo) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
-	if dgst != dockerlayer.GzippedEmptyLayerDigest {
-		return distribution.Descriptor{}, distribution.ErrBlobUnknown
-	}
-	return distribution.Descriptor{
-		MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
-		Digest:    digest.Digest(dockerlayer.GzippedEmptyLayerDigest),
-		Size:      int64(len(dockerlayer.GzippedEmptyLayer)),
-	}, nil
-}
-
-func (_ scratchRepo) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) {
-	if dgst != dockerlayer.GzippedEmptyLayerDigest {
-		return nil, distribution.ErrBlobUnknown
-	}
-	return dockerlayer.GzippedEmptyLayer, nil
-}
-
-type nopCloseBuffer struct {
-	*bytes.Buffer
-}
-
-func (_ nopCloseBuffer) Seek(offset int64, whence int) (int64, error) {
-	return 0, nil
-}
-
-func (_ nopCloseBuffer) Close() error {
-	return nil
-}
-
-func (_ scratchRepo) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) {
-	if dgst != dockerlayer.GzippedEmptyLayerDigest {
-		return nil, distribution.ErrBlobUnknown
-	}
-	return nopCloseBuffer{bytes.NewBuffer(dockerlayer.GzippedEmptyLayer)}, nil
-}
-
-func (_ scratchRepo) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) {
-	panic("not implemented")
-}
-
-func (_ scratchRepo) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) {
-	panic("not implemented")
-}
-
-func (_ scratchRepo) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) {
-	panic("not implemented")
-}
-
-func (_ scratchRepo) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error {
-	panic("not implemented")
-}
-
-func (_ scratchRepo) Delete(ctx context.Context, dgst digest.Digest) error {
-	panic("not implemented")
-}
-
-// dryRunManifestService emulates a remote registry for dry run behavior
-type dryRunManifestService struct{}
-
-func (s *dryRunManifestService) Exists(ctx context.Context, dgst digest.Digest) (bool, error) {
-	panic("not implemented")
-}
-
-func (s *dryRunManifestService) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) {
-	panic("not implemented")
-}
-
-func (s *dryRunManifestService) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) {
-	klog.V(4).Infof("Manifest: %#v", manifest.References())
-	return registryclient.ContentDigestForManifest(manifest, digest.SHA256)
-}
-
-func (s *dryRunManifestService) Delete(ctx context.Context, dgst digest.Digest) error {
-	panic("not implemented")
-}
-
-// dryRunBlobStore emulates a remote registry for dry run behavior
-type dryRunBlobStore struct {
-	layers []distribution.Descriptor
-}
-
-func (s *dryRunBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
-	for _, layer := range s.layers {
-		if layer.Digest == dgst {
-			return layer, nil
-		}
-	}
-	return distribution.Descriptor{}, distribution.ErrBlobUnknown
-}
-
-func (s *dryRunBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) {
-	panic("not implemented")
-}
-
-func (s *dryRunBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) {
-	panic("not implemented")
-}
-
-func (s *dryRunBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) {
-	return distribution.Descriptor{
-		MediaType: mediaType,
-		Size:      int64(len(p)),
-		Digest:    digest.SHA256.FromBytes(p),
-	}, nil
-}
-
-func (s *dryRunBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) {
-	panic("not implemented")
-}
-
-func (s *dryRunBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) {
-	panic("not implemented")
-}
-
-func (s *dryRunBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error {
-	panic("not implemented")
-}
-
-func (s *dryRunBlobStore) Delete(ctx context.Context, dgst digest.Digest) error {
-	panic("not implemented")
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/image/archive/archive.go b/vendor/github.com/openshift/oc/pkg/cli/image/archive/archive.go
deleted file mode 100644
index 2f7b8b06997e..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/image/archive/archive.go
+++ /dev/null
@@ -1,438 +0,0 @@
-package archive
-
-import (
-	"archive/tar"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"os"
-	"path/filepath"
-	"runtime"
-	"strings"
-	"syscall"
-
-	"github.com/docker/docker/pkg/archive"
-	"github.com/docker/docker/pkg/idtools"
-	"github.com/docker/docker/pkg/pools"
-	"github.com/docker/docker/pkg/system"
-)
-
-type (
-	// Compression is the state represents if compressed or not.
-	Compression int
-	// WhiteoutFormat is the format of whiteouts unpacked
-	WhiteoutFormat int
-
-	// TarOptions wraps the tar options.
-	TarOptions struct {
-		IncludeFiles    []string
-		ExcludePatterns []string
-		Compression     Compression
-		NoLchown        bool
-		// REMOVED: use remap instead
-		//UIDMaps          []idtools.IDMap
-		//GIDMaps          []idtools.IDMap
-		ChownOpts        *idtools.IDPair
-		IncludeSourceDir bool
-		// WhiteoutFormat is the expected on disk format for whiteout files.
-		// This format will be converted to the standard format on pack
-		// and from the standard format on unpack.
-		WhiteoutFormat WhiteoutFormat
-		// When unpacking, specifies whether overwriting a directory with a
-		// non-directory is allowed and vice versa.
-		NoOverwriteDirNonDir bool
-		// For each include when creating an archive, the included name will be
-		// replaced with the matching name from this map.
-		RebaseNames map[string]string
-		InUserNS    bool
-
-		// ADDED: allow bypassing chown
-		// If false, no chown will be performed
-		Chown bool
-
-		AlterHeaders AlterHeader
-	}
-)
-
-// breakoutError is used to differentiate errors related to breaking out
-// When testing archive breakout in the unit tests, this error is expected
-// in order for the test to pass.
-type breakoutError error
-
-type tarWhiteoutConverter interface {
-	ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error)
-	ConvertRead(*tar.Header, string) (bool, error)
-}
-
-type AlterHeader interface {
-	Alter(*tar.Header) (bool, error)
-}
-
-type RemapIDs struct {
-	mappings *idtools.IDMappings
-}
-
-func (r RemapIDs) Alter(hdr *tar.Header) (bool, error) {
-	ids, err := r.mappings.ToHost(idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid})
-	hdr.Uid, hdr.Gid = ids.UID, ids.GID
-	return true, err
-}
-
-// ApplyLayer is copied from github.com/docker/docker/pkg/archive
-func ApplyLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) {
-	dest = filepath.Clean(dest)
-	var err error
-	layer, err = archive.DecompressStream(layer)
-	if err != nil {
-		return 0, err
-	}
-	return unpackLayer(dest, layer, options)
-}
-
-// unpackLayer is copied from github.com/docker/docker/pkg/archive
-// unpackLayer unpack `layer` to a `dest`. The stream `layer` can be
-// compressed or uncompressed.
-// Returns the size in bytes of the contents of the layer.
-func unpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) {
-	tr := tar.NewReader(layer)
-	trBuf := pools.BufioReader32KPool.Get(tr)
-	defer pools.BufioReader32KPool.Put(trBuf)
-
-	var dirs []*tar.Header
-	unpackedPaths := make(map[string]struct{})
-
-	if options == nil {
-		options = &TarOptions{Chown: true}
-	}
-	if options.ExcludePatterns == nil {
-		options.ExcludePatterns = []string{}
-	}
-	// idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
-
-	aufsTempdir := ""
-	aufsHardlinks := make(map[string]*tar.Header)
-
-	// Iterate through the files in the archive.
-	for {
-		hdr, err := tr.Next()
-		if err == io.EOF {
-			// end of tar archive
-			break
-		}
-		if err != nil {
-			return 0, err
-		}
-
-		size += hdr.Size
-
-		// Normalize name, for safety and for a simple is-root check
-		hdr.Name = filepath.Clean(hdr.Name)
-
-		if options.AlterHeaders != nil {
-			ok, err := options.AlterHeaders.Alter(hdr)
-			if err != nil {
-				return 0, err
-			}
-			if !ok {
-				continue
-			}
-		}
-
-		// Windows does not support filenames with colons in them. Ignore
-		// these files. This is not a problem though (although it might
-		// appear that it is). Let's suppose a client is running docker pull.
-		// The daemon it points to is Windows. Would it make sense for the
-		// client to be doing a docker pull Ubuntu for example (which has files
-		// with colons in the name under /usr/share/man/man3)? No, absolutely
-		// not as it would really only make sense that they were pulling a
-		// Windows image. However, for development, it is necessary to be able
-		// to pull Linux images which are in the repository.
-		//
-		// TODO Windows. Once the registry is aware of what images are Windows-
-		// specific or Linux-specific, this warning should be changed to an error
-		// to cater for the situation where someone does manage to upload a Linux
-		// image but have it tagged as Windows inadvertently.
-		if runtime.GOOS == "windows" {
-			if strings.Contains(hdr.Name, ":") {
-				continue
-			}
-		}
-
-		// Note as these operations are platform specific, so must the slash be.
-		if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
-			// Not the root directory, ensure that the parent directory exists.
-			// This happened in some tests where an image had a tarfile without any
-			// parent directories.
-			parent := filepath.Dir(hdr.Name)
-			parentPath := filepath.Join(dest, parent)
-
-			if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
-				err = system.MkdirAll(parentPath, 0600, "")
-				if err != nil {
-					return 0, err
-				}
-			}
-		}
-
-		// Skip AUFS metadata dirs
-		if strings.HasPrefix(hdr.Name, archive.WhiteoutMetaPrefix) {
-			// Regular files inside /.wh..wh.plnk can be used as hardlink targets
-			// We don't want this directory, but we need the files in them so that
-			// such hardlinks can be resolved.
-			if strings.HasPrefix(hdr.Name, archive.WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg {
-				basename := filepath.Base(hdr.Name)
-				aufsHardlinks[basename] = hdr
-				if aufsTempdir == "" {
-					if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil {
-						return 0, err
-					}
-					defer os.RemoveAll(aufsTempdir)
-				}
-				if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, options.Chown, options.ChownOpts, options.InUserNS); err != nil {
-					return 0, err
-				}
-			}
-
-			if hdr.Name != archive.WhiteoutOpaqueDir {
-				continue
-			}
-		}
-
-		path := filepath.Join(dest, hdr.Name)
-		rel, err := filepath.Rel(dest, path)
-		if err != nil {
-			return 0, err
-		}
-
-		// Note as these operations are platform specific, so must the slash be.
-		if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
-			return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
-		}
-		base := filepath.Base(path)
-
-		if strings.HasPrefix(base, archive.WhiteoutPrefix) {
-			dir := filepath.Dir(path)
-			if base == archive.WhiteoutOpaqueDir {
-				_, err := os.Lstat(dir)
-				if err != nil {
-					return 0, err
-				}
-				err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
-					if err != nil {
-						if os.IsNotExist(err) {
-							err = nil // parent was deleted
-						}
-						return err
-					}
-					if path == dir {
-						return nil
-					}
-					if _, exists := unpackedPaths[path]; !exists {
-						err := os.RemoveAll(path)
-						return err
-					}
-					return nil
-				})
-				if err != nil {
-					return 0, err
-				}
-			} else {
-				originalBase := base[len(archive.WhiteoutPrefix):]
-				originalPath := filepath.Join(dir, originalBase)
-				if err := os.RemoveAll(originalPath); err != nil {
-					return 0, err
-				}
-			}
-		} else {
-			// If path exits we almost always just want to remove and replace it.
-			// The only exception is when it is a directory *and* the file from
-			// the layer is also a directory. Then we want to merge them (i.e.
-			// just apply the metadata from the layer).
-			if fi, err := os.Lstat(path); err == nil {
-				if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
-					if err := os.RemoveAll(path); err != nil {
-						return 0, err
-					}
-				}
-			}
-
-			trBuf.Reset(tr)
-			srcData := io.Reader(trBuf)
-			srcHdr := hdr
-
-			// Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
-			// we manually retarget these into the temporary files we extracted them into
-			if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), archive.WhiteoutLinkDir) {
-				linkBasename := filepath.Base(hdr.Linkname)
-				srcHdr = aufsHardlinks[linkBasename]
-				if srcHdr == nil {
-					return 0, fmt.Errorf("Invalid aufs hardlink")
-				}
-				tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename))
-				if err != nil {
-					return 0, err
-				}
-				defer tmpFile.Close()
-				srcData = tmpFile
-			}
-
-			// if err := remapIDs(idMappings, srcHdr); err != nil {
-			// 	return 0, err
-			// }
-
-			if err := createTarFile(path, dest, srcHdr, srcData, options.Chown, options.ChownOpts, options.InUserNS); err != nil {
-				return 0, err
-			}
-
-			// Directory mtimes must be handled at the end to avoid further
-			// file creation in them to modify the directory mtime
-			if hdr.Typeflag == tar.TypeDir {
-				dirs = append(dirs, hdr)
-			}
-			unpackedPaths[path] = struct{}{}
-		}
-	}
-
-	for _, hdr := range dirs {
-		path := filepath.Join(dest, hdr.Name)
-		if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
-			return 0, err
-		}
-	}
-
-	return size, nil
-}
-
-func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns bool) error {
-	// hdr.Mode is in linux format, which we can use for sycalls,
-	// but for os.Foo() calls we need the mode converted to os.FileMode,
-	// so use hdrInfo.Mode() (they differ for e.g. setuid bits)
-	hdrInfo := hdr.FileInfo()
-
-	switch hdr.Typeflag {
-	case tar.TypeDir:
-		// Create directory unless it exists as a directory already.
-		// In that case we just want to merge the two
-		if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) {
-			if err := os.Mkdir(path, hdrInfo.Mode()); err != nil {
-				return err
-			}
-		}
-
-	case tar.TypeReg, tar.TypeRegA:
-		// Source is regular file. We use system.OpenFileSequential to use sequential
-		// file access to avoid depleting the standby list on Windows.
-		// On Linux, this equates to a regular os.OpenFile
-		file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode())
-		if err != nil {
-			return err
-		}
-		if _, err := io.Copy(file, reader); err != nil {
-			file.Close()
-			return err
-		}
-		file.Close()
-
-	case tar.TypeBlock, tar.TypeChar:
-		if inUserns { // cannot create devices in a userns
-			return nil
-		}
-		// Handle this is an OS-specific way
-		if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
-			return err
-		}
-
-	case tar.TypeFifo:
-		// Handle this is an OS-specific way
-		if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
-			return err
-		}
-
-	case tar.TypeLink:
-		targetPath := filepath.Join(extractDir, hdr.Linkname)
-		// check for hardlink breakout
-		if !strings.HasPrefix(targetPath, extractDir) {
-			return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname))
-		}
-		if err := os.Link(targetPath, path); err != nil {
-			return err
-		}
-
-	case tar.TypeSymlink:
-		// 	path 				-> hdr.Linkname = targetPath
-		// e.g. /extractDir/path/to/symlink 	-> ../2/file	= /extractDir/path/2/file
-		targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname)
-
-		// the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because
-		// that symlink would first have to be created, which would be caught earlier, at this very check:
-		if !strings.HasPrefix(targetPath, extractDir) {
-			return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname))
-		}
-		if err := os.Symlink(hdr.Linkname, path); err != nil {
-			return err
-		}
-
-	case tar.TypeXGlobalHeader:
-		return nil
-
-	default:
-		return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag)
-	}
-
-	// Lchown is not supported on Windows.
-	if Lchown && runtime.GOOS != "windows" {
-		if chownOpts == nil {
-			chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}
-		}
-		if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil {
-			return err
-		}
-	}
-
-	var errors []string
-	for key, value := range hdr.Xattrs {
-		if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil {
-			if err == syscall.ENOTSUP {
-				// We ignore errors here because not all graphdrivers support
-				// xattrs *cough* old versions of AUFS *cough*. However only
-				// ENOTSUP should be emitted in that case, otherwise we still
-				// bail.
-				errors = append(errors, err.Error())
-				continue
-			}
-			return err
-		}
-
-	}
-
-	// There is no LChmod, so ignore mode for symlink. Also, this
-	// must happen after chown, as that can modify the file mode
-	if err := handleLChmod(hdr, path, hdrInfo); err != nil {
-		return err
-	}
-
-	aTime := hdr.AccessTime
-	if aTime.Before(hdr.ModTime) {
-		// Last access time should never be before last modified time.
-		aTime = hdr.ModTime
-	}
-
-	// system.Chtimes doesn't support a NOFOLLOW flag atm
-	if hdr.Typeflag == tar.TypeLink {
-		if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
-			if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil {
-				return err
-			}
-		}
-	} else if hdr.Typeflag != tar.TypeSymlink {
-		if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil {
-			return err
-		}
-	} else {
-		ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)}
-		if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
-			return err
-		}
-	}
-	return nil
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/image/archive/archive_linux.go b/vendor/github.com/openshift/oc/pkg/cli/image/archive/archive_linux.go
deleted file mode 100644
index 52351954d645..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/image/archive/archive_linux.go
+++ /dev/null
@@ -1,93 +0,0 @@
-package archive
-
-import (
-	"archive/tar"
-	"os"
-	"path/filepath"
-	"strings"
-
-	"github.com/docker/docker/pkg/archive"
-	"github.com/docker/docker/pkg/system"
-	"golang.org/x/sys/unix"
-)
-
-func getWhiteoutConverter(format archive.WhiteoutFormat) tarWhiteoutConverter {
-	if format == archive.OverlayWhiteoutFormat {
-		return overlayWhiteoutConverter{}
-	}
-	return nil
-}
-
-type overlayWhiteoutConverter struct{}
-
-func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) {
-	// convert whiteouts to AUFS format
-	if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 {
-		// we just rename the file and make it normal
-		dir, filename := filepath.Split(hdr.Name)
-		hdr.Name = filepath.Join(dir, archive.WhiteoutPrefix+filename)
-		hdr.Mode = 0600
-		hdr.Typeflag = tar.TypeReg
-		hdr.Size = 0
-	}
-
-	if fi.Mode()&os.ModeDir != 0 {
-		// convert opaque dirs to AUFS format by writing an empty file with the prefix
-		opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque")
-		if err != nil {
-			return nil, err
-		}
-		if len(opaque) == 1 && opaque[0] == 'y' {
-			if hdr.Xattrs != nil {
-				delete(hdr.Xattrs, "trusted.overlay.opaque")
-			}
-
-			// create a header for the whiteout file
-			// it should inherit some properties from the parent, but be a regular file
-			wo = &tar.Header{
-				Typeflag:   tar.TypeReg,
-				Mode:       hdr.Mode & int64(os.ModePerm),
-				Name:       filepath.Join(hdr.Name, archive.WhiteoutOpaqueDir),
-				Size:       0,
-				Uid:        hdr.Uid,
-				Uname:      hdr.Uname,
-				Gid:        hdr.Gid,
-				Gname:      hdr.Gname,
-				AccessTime: hdr.AccessTime,
-				ChangeTime: hdr.ChangeTime,
-			}
-		}
-	}
-
-	return
-}
-
-func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) {
-	base := filepath.Base(path)
-	dir := filepath.Dir(path)
-
-	// if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay
-	if base == archive.WhiteoutOpaqueDir {
-		err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0)
-		// don't write the file itself
-		return false, err
-	}
-
-	// if a file was deleted and we are using overlay, we need to create a character device
-	if strings.HasPrefix(base, archive.WhiteoutPrefix) {
-		originalBase := base[len(archive.WhiteoutPrefix):]
-		originalPath := filepath.Join(dir, originalBase)
-
-		if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil {
-			return false, err
-		}
-		if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil {
-			return false, err
-		}
-
-		// don't write the file itself
-		return false, nil
-	}
-
-	return true, nil
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/image/archive/archive_other.go b/vendor/github.com/openshift/oc/pkg/cli/image/archive/archive_other.go
deleted file mode 100644
index 9069c03a398a..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/image/archive/archive_other.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build !linux
-
-package archive
-
-import "github.com/docker/docker/pkg/archive"
-
-func getWhiteoutConverter(format archive.WhiteoutFormat) tarWhiteoutConverter {
-	return nil
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/image/archive/archive_unix.go b/vendor/github.com/openshift/oc/pkg/cli/image/archive/archive_unix.go
deleted file mode 100644
index 9eb92b306515..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/image/archive/archive_unix.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// +build !windows
-
-package archive
-
-import (
-	"archive/tar"
-	"bufio"
-	"fmt"
-	"os"
-
-	"github.com/docker/docker/pkg/system"
-	"golang.org/x/sys/unix"
-)
-
-// runningInUserNS detects whether we are currently running in a user namespace.
-// Copied from github.com/opencontainers/runc/libcontainer/system
-func runningInUserNS() bool {
-	file, err := os.Open("/proc/self/uid_map")
-	if err != nil {
-		// This kernel-provided file only exists if user namespaces are supported
-		return false
-	}
-	defer file.Close()
-
-	buf := bufio.NewReader(file)
-	l, _, err := buf.ReadLine()
-	if err != nil {
-		return false
-	}
-
-	line := string(l)
-	var a, b, c int64
-	fmt.Sscanf(line, "%d %d %d", &a, &b, &c)
-	/*
-	 * We assume we are in the initial user namespace if we have a full
-	 * range - 4294967295 uids starting at uid 0.
-	 */
-	if a == 0 && b == 0 && c == 4294967295 {
-		return false
-	}
-	return true
-}
-
-// handleTarTypeBlockCharFifo is an OS-specific helper function used by
-// createTarFile to handle the following types of header: Block; Char; Fifo
-func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
-	if runningInUserNS() {
-		// cannot create a device if running in user namespace
-		return nil
-	}
-
-	mode := uint32(hdr.Mode & 07777)
-	switch hdr.Typeflag {
-	case tar.TypeBlock:
-		mode |= unix.S_IFBLK
-	case tar.TypeChar:
-		mode |= unix.S_IFCHR
-	case tar.TypeFifo:
-		mode |= unix.S_IFIFO
-	}
-
-	return system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor)))
-}
-
-func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
-	if hdr.Typeflag == tar.TypeLink {
-		if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
-			if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
-				return err
-			}
-		}
-	} else if hdr.Typeflag != tar.TypeSymlink {
-		if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
-			return err
-		}
-	}
-	return nil
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/image/archive/archive_windows.go b/vendor/github.com/openshift/oc/pkg/cli/image/archive/archive_windows.go
deleted file mode 100644
index e9d83376ea5e..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/image/archive/archive_windows.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// +build windows
-
-package archive
-
-import (
-	"archive/tar"
-	"os"
-)
-
-// handleTarTypeBlockCharFifo is an OS-specific helper function used by
-// createTarFile to handle the following types of header: Block; Char; Fifo
-func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
-	return nil
-}
-
-func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
-	return nil
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/image/archive/time_linux.go b/vendor/github.com/openshift/oc/pkg/cli/image/archive/time_linux.go
deleted file mode 100644
index 3448569b1ebb..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/image/archive/time_linux.go
+++ /dev/null
@@ -1,16 +0,0 @@
-package archive
-
-import (
-	"syscall"
-	"time"
-)
-
-func timeToTimespec(time time.Time) (ts syscall.Timespec) {
-	if time.IsZero() {
-		// Return UTIME_OMIT special value
-		ts.Sec = 0
-		ts.Nsec = ((1 << 30) - 2)
-		return
-	}
-	return syscall.NsecToTimespec(time.UnixNano())
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/image/archive/time_unsupported.go b/vendor/github.com/openshift/oc/pkg/cli/image/archive/time_unsupported.go
deleted file mode 100644
index e85aac054080..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/image/archive/time_unsupported.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build !linux
-
-package archive
-
-import (
-	"syscall"
-	"time"
-)
-
-func timeToTimespec(time time.Time) (ts syscall.Timespec) {
-	nsec := int64(0)
-	if !time.IsZero() {
-		nsec = time.UnixNano()
-	}
-	return syscall.NsecToTimespec(nsec)
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/image/extract/extract.go b/vendor/github.com/openshift/oc/pkg/cli/image/extract/extract.go
deleted file mode 100644
index 51b1ec10af59..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/image/extract/extract.go
+++ /dev/null
@@ -1,744 +0,0 @@
-package extract
-
-import (
-	"archive/tar"
-	"context"
-	"fmt"
-	"io"
-	"math"
-	"os"
-	"path"
-	"path/filepath"
-	"regexp"
-	"strconv"
-	"strings"
-
-	"github.com/spf13/cobra"
-
-	"github.com/docker/distribution"
-	dockerarchive "github.com/docker/docker/pkg/archive"
-	digest "github.com/opencontainers/go-digest"
-
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	"k8s.io/klog"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-
-	"github.com/openshift/library-go/pkg/image/dockerv1client"
-	imagereference "github.com/openshift/library-go/pkg/image/reference"
-	"github.com/openshift/library-go/pkg/image/registryclient"
-	"github.com/openshift/oc/pkg/cli/image/archive"
-	imagemanifest "github.com/openshift/oc/pkg/cli/image/manifest"
-	"github.com/openshift/oc/pkg/cli/image/workqueue"
-)
-
-var (
-	desc = templates.LongDesc(`
-		Extract the contents of an image to disk
-
-		Download an image or parts of an image to the filesystem. Allows users to access the
-		contents of images without requiring a container runtime engine running.
-
-		Pass images to extract as arguments. The --paths flag allows you to define multiple
-		source to destination directory mappings. The source section may be either a file, a
-		directory (ends with a '/'), or a file pattern within a directory. The destination
-		section	is a directory to extract to. Both source and destination must be specified.
-
-		If the specified image supports multiple operating systems, the image that matches the
-		current operating system will be chosen. Otherwise you must pass --filter-by-os to
-		select the desired image.
-
-		You may further qualify the image by adding a layer selector to the end of the image
-		string to only extract specific layers within an image. The supported selectors are:
-
-		  [] - select the layer at the provided index (zero-indexed)
-		  [,] - select layers by index, exclusive
-		  [~] - select the layer with the matching digest prefix or return an error
-
-		Negative indices are counted from the end of the list, e.g. [-1] selects the last
-		layer.`)
-
-	example = templates.Examples(`
-# Extract the busybox image into the current directory
-%[1]s docker.io/library/busybox:latest
-
-# Extract the busybox image to a temp directory (must exist)
-%[1]s docker.io/library/busybox:latest --path /:/tmp/busybox
-
-# Extract a single file from the image into the current directory
-%[1]s docker.io/library/centos:7 --path /bin/bash:.
-
-# Extract all .repo files from the image's /etc/yum.repos.d/ folder.
-%[1]s docker.io/library/centos:7 --path /etc/yum.repos.d/*.repo:.
-
-# Extract the last layer in the image
-%[1]s docker.io/library/centos:7[-1]
-
-# Extract the first three layers of the image
-%[1]s docker.io/library/centos:7[:3]
-
-# Extract the last three layers of the image
-%[1]s docker.io/library/centos:7[-3:]
-`)
-)
-
-type LayerInfo struct {
-	Index      int
-	Descriptor distribution.Descriptor
-	Mapping    *Mapping
-}
-
-// TarEntryFunc is called once per entry in the tar file. It may return
-// an error, or false to stop processing.
-type TarEntryFunc func(*tar.Header, LayerInfo, io.Reader) (cont bool, err error)
-
-type Options struct {
-	Mappings []Mapping
-
-	Files []string
-	Paths []string
-
-	OnlyFiles           bool
-	PreservePermissions bool
-
-	SecurityOptions imagemanifest.SecurityOptions
-	FilterOptions   imagemanifest.FilterOptions
-	ParallelOptions imagemanifest.ParallelOptions
-
-	Confirm bool
-	DryRun  bool
-
-	genericclioptions.IOStreams
-
-	// ImageMetadataCallback is invoked once per image retrieved, and may be called in parallel if
-	// MaxPerRegistry is set higher than 1.
-	ImageMetadataCallback func(m *Mapping, dgst, contentDigest digest.Digest, imageConfig *dockerv1client.DockerImageConfig)
-	// TarEntryCallback, if set, is passed each entry in the viewed layers. Entries will be filtered
-	// by name and only the entry in the highest layer will be passed to the callback. Returning false
-	// will halt processing of the image.
-	TarEntryCallback TarEntryFunc
-	// AllLayers ensures the TarEntryCallback is invoked for all files, and will cause the callback
-	// order to start at the lowest layer and work outwards.
-	AllLayers bool
-}
-
-func NewOptions(streams genericclioptions.IOStreams) *Options {
-	return &Options{
-		Paths: []string{},
-
-		IOStreams:       streams,
-		ParallelOptions: imagemanifest.ParallelOptions{MaxPerRegistry: 1},
-	}
-}
-
-// New creates a new command
-func New(name string, streams genericclioptions.IOStreams) *cobra.Command {
-	o := NewOptions(streams)
-
-	cmd := &cobra.Command{
-		Use:     "extract",
-		Short:   "Copy files from an image to the filesystem",
-		Long:    desc,
-		Example: fmt.Sprintf(example, name+" extract"),
-		Run: func(c *cobra.Command, args []string) {
-			kcmdutil.CheckErr(o.Complete(c, args))
-			kcmdutil.CheckErr(o.Validate())
-			kcmdutil.CheckErr(o.Run())
-		},
-	}
-
-	flag := cmd.Flags()
-	o.SecurityOptions.Bind(flag)
-	o.FilterOptions.Bind(flag)
-
-	flag.BoolVar(&o.Confirm, "confirm", o.Confirm, "Pass to allow extracting to non-empty directories.")
-	flag.BoolVar(&o.DryRun, "dry-run", o.DryRun, "Print the actions that would be taken and exit without writing any contents.")
-
-	flag.StringSliceVar(&o.Files, "file", o.Files, "Extract the specified files to the current directory.")
-	flag.StringSliceVar(&o.Paths, "path", o.Paths, "Extract only part of an image. Must be SRC:DST where SRC is the path within the image and DST a local directory. If not specified the default is to extract everything to the current directory.")
-	flag.BoolVarP(&o.PreservePermissions, "preserve-ownership", "p", o.PreservePermissions, "Preserve the permissions of extracted files.")
-	flag.BoolVar(&o.OnlyFiles, "only-files", o.OnlyFiles, "Only extract regular files and directories from the image.")
-	flag.BoolVar(&o.AllLayers, "all-layers", o.AllLayers, "For dry-run mode, process from lowest to highest layer and don't omit duplicate files.")
-
-	return cmd
-}
-
-type LayerFilter interface {
-	Filter(layers []distribution.Descriptor) ([]distribution.Descriptor, error)
-}
-
-type Mapping struct {
-	// Name is provided for caller convenience for associating image callback metadata with a mapping
-	Name string
-	// Image is the raw input image to extract
-	Image string
-	// ImageRef is the parsed version of the raw input image
-	ImageRef imagereference.DockerImageReference
-	// LayerFilter can select which images to load
-	LayerFilter LayerFilter
-	// From is the directory or file in the image to extract
-	From string
-	// To is the directory to extract the contents of the directory or the named file into.
-	To string
-	// ConditionFn is invoked before extracting the content and allows the set of images to be filtered.
-	ConditionFn func(m *Mapping, dgst digest.Digest, imageConfig *dockerv1client.DockerImageConfig) (bool, error)
-}
-
-func parseMappings(images, paths, files []string, requireEmpty bool) ([]Mapping, error) {
-	layerFilter := regexp.MustCompile(`^(.*)\[([^\]]*)\](.*)$`)
-
-	var mappings []Mapping
-
-	// convert paths and files to mappings for each image
-	for _, image := range images {
-		for _, arg := range files {
-			if strings.HasSuffix(arg, "/") {
-				return nil, fmt.Errorf("invalid file: %s must not end with a slash", arg)
-			}
-			mappings = append(mappings, Mapping{
-				Image: image,
-				From:  strings.TrimPrefix(arg, "/"),
-				To:    ".",
-			})
-		}
-
-		for _, arg := range paths {
-			parts := strings.SplitN(arg, ":", 2)
-			var mapping Mapping
-			switch len(parts) {
-			case 2:
-				mapping = Mapping{Image: image, From: parts[0], To: parts[1]}
-			default:
-				return nil, fmt.Errorf("--paths must be of the form SRC:DST")
-			}
-			if len(mapping.From) > 0 {
-				mapping.From = strings.TrimPrefix(mapping.From, "/")
-			}
-			if len(mapping.To) > 0 {
-				fi, err := os.Stat(mapping.To)
-				if os.IsNotExist(err) {
-					return nil, fmt.Errorf("destination path does not exist: %s", mapping.To)
-				}
-				if err != nil {
-					return nil, fmt.Errorf("invalid argument: %s", err)
-				}
-				if !fi.IsDir() {
-					return nil, fmt.Errorf("invalid argument: %s is not a directory", arg)
-				}
-				if requireEmpty {
-					f, err := os.Open(mapping.To)
-					if err != nil {
-						return nil, fmt.Errorf("unable to check directory: %v", err)
-					}
-					names, err := f.Readdirnames(1)
-					f.Close()
-					if err != nil && err != io.EOF {
-						return nil, fmt.Errorf("could not check for empty directory: %v", err)
-					}
-					if len(names) > 0 {
-						return nil, fmt.Errorf("directory %s must be empty, pass --confirm to overwrite contents of directory", mapping.To)
-					}
-				}
-			}
-			mappings = append(mappings, mapping)
-		}
-	}
-
-	// extract layer filter and set the ref
-	for i := range mappings {
-		mapping := &mappings[i]
-
-		if matches := layerFilter.FindStringSubmatch(mapping.Image); len(matches) > 0 {
-			if len(matches[1]) == 0 || len(matches[2]) == 0 || len(matches[3]) != 0 {
-				return nil, fmt.Errorf("layer selectors must be of the form IMAGE[\\d:\\d]")
-			}
-			mapping.Image = matches[1]
-			var err error
-			mapping.LayerFilter, err = parseLayerFilter(matches[2])
-			if err != nil {
-				return nil, err
-			}
-		}
-
-		src, err := imagereference.Parse(mapping.Image)
-		if err != nil {
-			return nil, err
-		}
-		if len(src.Tag) == 0 && len(src.ID) == 0 {
-			return nil, fmt.Errorf("source image must point to an image ID or image tag")
-		}
-		mapping.ImageRef = src
-	}
-
-	return mappings, nil
-}
-
-func (o *Options) Complete(cmd *cobra.Command, args []string) error {
-	if err := o.FilterOptions.Complete(cmd.Flags()); err != nil {
-		return err
-	}
-
-	if len(args) == 0 {
-		return fmt.Errorf("you must specify at least one image to extract as an argument")
-	}
-
-	if len(o.Paths) == 0 && len(o.Files) == 0 {
-		o.Paths = append(o.Paths, "/:.")
-	}
-
-	var err error
-	o.Mappings, err = parseMappings(args, o.Paths, o.Files, !o.Confirm && !o.DryRun)
-	if err != nil {
-		return err
-	}
-	return nil
-}
-
-func (o *Options) Validate() error {
-	if len(o.Mappings) == 0 {
-		return fmt.Errorf("you must specify one or more paths or files")
-	}
-	return o.FilterOptions.Validate()
-}
-
-func (o *Options) Run() error {
-	ctx := context.Background()
-	fromContext, err := o.SecurityOptions.Context()
-	if err != nil {
-		return err
-	}
-
-	stopCh := make(chan struct{})
-	defer close(stopCh)
-	q := workqueue.New(o.ParallelOptions.MaxPerRegistry, stopCh)
-	return q.Try(func(q workqueue.Try) {
-		for i := range o.Mappings {
-			mapping := o.Mappings[i]
-			from := mapping.ImageRef
-			q.Try(func() error {
-				repo, err := fromContext.Repository(ctx, from.DockerClientDefaults().RegistryURL(), from.RepositoryName(), o.SecurityOptions.Insecure)
-				if err != nil {
-					return fmt.Errorf("unable to connect to image repository %s: %v", from.Exact(), err)
-				}
-
-				srcManifest, location, err := imagemanifest.FirstManifest(ctx, from, repo, o.FilterOptions.Include)
-				if err != nil {
-					if imagemanifest.IsImageForbidden(err) {
-						var msg string
-						if len(o.Mappings) == 1 {
-							msg = "image does not exist or you don't have permission to access the repository"
-						} else {
-							msg = fmt.Sprintf("image %q does not exist or you don't have permission to access the repository", from)
-						}
-						return imagemanifest.NewImageForbidden(msg, err)
-					}
-					if imagemanifest.IsImageNotFound(err) {
-						var msg string
-						if len(o.Mappings) == 1 {
-							msg = "image does not exist"
-						} else {
-							msg = fmt.Sprintf("image %q does not exist", from)
-						}
-						return imagemanifest.NewImageNotFound(msg, err)
-					}
-					return fmt.Errorf("unable to read image %s: %v", from, err)
-				}
-
-				contentDigest, err := registryclient.ContentDigestForManifest(srcManifest, location.Manifest.Algorithm())
-				if err != nil {
-					return err
-				}
-
-				imageConfig, layers, err := imagemanifest.ManifestToImageConfig(ctx, srcManifest, repo.Blobs(ctx), location)
-				if err != nil {
-					return fmt.Errorf("unable to parse image %s: %v", from, err)
-				}
-
-				if mapping.ConditionFn != nil {
-					ok, err := mapping.ConditionFn(&mapping, location.Manifest, imageConfig)
-					if err != nil {
-						return fmt.Errorf("unable to check whether to include image %s: %v", from, err)
-					}
-					if !ok {
-						klog.V(2).Infof("Filtered out image %s with digest %s from being extracted", from, location.Manifest)
-						return nil
-					}
-				}
-
-				var alter alterations
-				if o.OnlyFiles {
-					alter = append(alter, filesOnly{})
-				}
-				if len(mapping.From) > 0 {
-					switch {
-					case strings.HasSuffix(mapping.From, "/"):
-						alter = append(alter, newCopyFromDirectory(mapping.From))
-					default:
-						name, parent := path.Base(mapping.From), path.Dir(mapping.From)
-						if name == "." || parent == "." {
-							return fmt.Errorf("unexpected directory from mapping %s", mapping.From)
-						}
-						alter = append(alter, newCopyFromPattern(parent, name))
-					}
-				}
-
-				filteredLayers := layers
-				if mapping.LayerFilter != nil {
-					filteredLayers, err = mapping.LayerFilter.Filter(filteredLayers)
-					if err != nil {
-						return fmt.Errorf("unable to filter layers for %s: %v", from, err)
-					}
-				}
-				if !o.PreservePermissions {
-					alter = append(alter, removePermissions{})
-				}
-
-				var byEntry TarEntryFunc = o.TarEntryCallback
-				if o.DryRun {
-					path := mapping.To
-					out := o.Out
-					byEntry = func(hdr *tar.Header, layerInfo LayerInfo, r io.Reader) (bool, error) {
-						if len(hdr.Name) == 0 {
-							return true, nil
-						}
-						mode := hdr.FileInfo().Mode().String()
-						switch hdr.Typeflag {
-						case tar.TypeDir:
-							fmt.Fprintf(out, "%2d %s %12d %s\n", layerInfo.Index, mode, hdr.Size, filepath.Join(path, hdr.Name))
-						case tar.TypeReg, tar.TypeRegA:
-							fmt.Fprintf(out, "%2d %s %12d %s\n", layerInfo.Index, mode, hdr.Size, filepath.Join(path, hdr.Name))
-						case tar.TypeLink:
-							fmt.Fprintf(out, "%2d %s %12d %s -> %s\n", layerInfo.Index, mode, hdr.Size, hdr.Name, filepath.Join(path, hdr.Linkname))
-						case tar.TypeSymlink:
-							fmt.Fprintf(out, "%2d %s %12d %s -> %s\n", layerInfo.Index, mode, hdr.Size, hdr.Name, filepath.Join(path, hdr.Linkname))
-						default:
-							fmt.Fprintf(out, "%2d %s %12d %s %x\n", layerInfo.Index, mode, hdr.Size, filepath.Join(path, hdr.Name), hdr.Typeflag)
-						}
-						return true, nil
-					}
-				}
-
-				// walk the layers in reverse order, only showing a given path once
-				alreadySeen := make(map[string]struct{})
-				var layerInfos []LayerInfo
-				if byEntry != nil && !o.AllLayers {
-					for i := len(filteredLayers) - 1; i >= 0; i-- {
-						layerInfos = append(layerInfos, LayerInfo{Index: i, Descriptor: filteredLayers[i], Mapping: &mapping})
-					}
-				} else {
-					for i := range filteredLayers {
-						layerInfos = append(layerInfos, LayerInfo{Index: i, Descriptor: filteredLayers[i], Mapping: &mapping})
-					}
-				}
-
-				for _, info := range layerInfos {
-					layer := info.Descriptor
-
-					cont, err := func() (bool, error) {
-						fromBlobs := repo.Blobs(ctx)
-
-						klog.V(5).Infof("Extracting from layer: %#v", layer)
-
-						// source
-						r, err := fromBlobs.Open(ctx, layer.Digest)
-						if err != nil {
-							return false, fmt.Errorf("unable to access the source layer %s: %v", layer.Digest, err)
-						}
-						defer r.Close()
-
-						options := &archive.TarOptions{
-							AlterHeaders: alter,
-							Chown:        o.PreservePermissions,
-						}
-
-						if byEntry != nil {
-							cont, err := layerByEntry(r, options, info, byEntry, o.AllLayers, alreadySeen)
-							if err != nil {
-								err = fmt.Errorf("unable to iterate over layer %s from %s: %v", layer.Digest, from.Exact(), err)
-							}
-							return cont, err
-						}
-
-						klog.V(4).Infof("Extracting layer %s with options %#v", layer.Digest, options)
-						if _, err := archive.ApplyLayer(mapping.To, r, options); err != nil {
-							return false, fmt.Errorf("unable to extract layer %s from %s: %v", layer.Digest, from.Exact(), err)
-						}
-						return true, nil
-					}()
-					if err != nil {
-						return err
-					}
-					if !cont {
-						break
-					}
-				}
-
-				if o.ImageMetadataCallback != nil {
-					o.ImageMetadataCallback(&mapping, location.Manifest, contentDigest, imageConfig)
-				}
-				return nil
-			})
-		}
-	})
-}
-
-func layerByEntry(r io.Reader, options *archive.TarOptions, layerInfo LayerInfo, fn TarEntryFunc, allLayers bool, alreadySeen map[string]struct{}) (bool, error) {
-	rc, err := dockerarchive.DecompressStream(r)
-	if err != nil {
-		return false, err
-	}
-	defer rc.Close()
-	tr := tar.NewReader(rc)
-	for {
-		hdr, err := tr.Next()
-		if err != nil {
-			if err == io.EOF {
-				return true, nil
-			}
-			return false, err
-		}
-		klog.V(6).Infof("Printing layer entry %#v", hdr)
-		if options.AlterHeaders != nil {
-			ok, err := options.AlterHeaders.Alter(hdr)
-			if err != nil {
-				return false, err
-			}
-			if !ok {
-				klog.V(5).Infof("Exclude entry %s %x %d", hdr.Name, hdr.Typeflag, hdr.Size)
-				continue
-			}
-		}
-
-		// prevent duplicates from being sent to the handler
-		if _, ok := alreadySeen[hdr.Name]; ok && !allLayers {
-			continue
-		}
-		alreadySeen[hdr.Name] = struct{}{}
-		// TODO: need to do prefix filtering for whiteouts
-
-		cont, err := fn(hdr, layerInfo, tr)
-		if err != nil {
-			return false, err
-		}
-		if !cont {
-			return false, nil
-		}
-	}
-}
-
-type alterations []archive.AlterHeader
-
-func (a alterations) Alter(hdr *tar.Header) (bool, error) {
-	for _, item := range a {
-		ok, err := item.Alter(hdr)
-		if err != nil {
-			return false, err
-		}
-		if !ok {
-			return false, nil
-		}
-	}
-	return true, nil
-}
-
-type removePermissions struct{}
-
-func (_ removePermissions) Alter(hdr *tar.Header) (bool, error) {
-	switch hdr.Typeflag {
-	case tar.TypeReg, tar.TypeRegA:
-		hdr.Mode = int64(os.FileMode(0640))
-	default:
-		hdr.Mode = int64(os.FileMode(0755))
-	}
-	return true, nil
-}
-
-type writableDirectories struct{}
-
-func (_ writableDirectories) Alter(hdr *tar.Header) (bool, error) {
-	switch hdr.Typeflag {
-	case tar.TypeDir:
-		hdr.Mode = int64(os.FileMode(0600) | os.FileMode(hdr.Mode))
-	}
-	return true, nil
-}
-
-type copyFromDirectory struct {
-	From string
-}
-
-func newCopyFromDirectory(from string) archive.AlterHeader {
-	if !strings.HasSuffix(from, "/") {
-		from = from + "/"
-	}
-	return ©FromDirectory{From: from}
-}
-
-func (n *copyFromDirectory) Alter(hdr *tar.Header) (bool, error) {
-	return changeTarEntryParent(hdr, n.From), nil
-}
-
-type copyFromPattern struct {
-	Base string
-	Name string
-}
-
-func newCopyFromPattern(dir, name string) archive.AlterHeader {
-	if !strings.HasSuffix(dir, "/") {
-		dir = dir + "/"
-	}
-	return ©FromPattern{Base: dir, Name: name}
-}
-
-func (n *copyFromPattern) Alter(hdr *tar.Header) (bool, error) {
-	if !changeTarEntryParent(hdr, n.Base) {
-		return false, nil
-	}
-	matchName := hdr.Name
-	if i := strings.Index(matchName, "/"); i != -1 {
-		matchName = matchName[:i]
-	}
-	if ok, err := path.Match(n.Name, matchName); !ok || err != nil {
-		klog.V(5).Infof("Excluded %s due to filter %s", hdr.Name, n.Name)
-		return false, err
-	}
-	return true, nil
-}
-
-func changeTarEntryParent(hdr *tar.Header, from string) bool {
-	if !strings.HasPrefix(hdr.Name, from) {
-		klog.V(5).Infof("Exclude %s due to missing prefix %s", hdr.Name, from)
-		return false
-	}
-	if len(hdr.Linkname) > 0 {
-		if strings.HasPrefix(hdr.Linkname, from) {
-			hdr.Linkname = strings.TrimPrefix(hdr.Linkname, from)
-			klog.V(5).Infof("Updated link to %s", hdr.Linkname)
-		} else {
-			klog.V(4).Infof("Name %s won't correctly point to %s outside of %s", hdr.Name, hdr.Linkname, from)
-		}
-	}
-	hdr.Name = strings.TrimPrefix(hdr.Name, from)
-	klog.V(5).Infof("Updated name %s", hdr.Name)
-	return true
-}
-
-type filesOnly struct {
-}
-
-func (_ filesOnly) Alter(hdr *tar.Header) (bool, error) {
-	switch hdr.Typeflag {
-	case tar.TypeReg, tar.TypeRegA, tar.TypeDir:
-		return true, nil
-	default:
-		klog.V(6).Infof("Excluded %s because type was not a regular file or directory: %x", hdr.Name, hdr.Typeflag)
-		return false, nil
-	}
-}
-
-func parseLayerFilter(s string) (LayerFilter, error) {
-	if strings.HasPrefix(s, "~") {
-		s = s[1:]
-		return &prefixLayerFilter{Prefix: s}, nil
-	}
-
-	if strings.Contains(s, ":") {
-		l := &indexLayerFilter{From: 0, To: math.MaxInt32}
-		parts := strings.SplitN(s, ":", 2)
-		if len(parts[0]) > 0 {
-			i, err := strconv.Atoi(parts[0])
-			if err != nil {
-				return nil, fmt.Errorf("[from:to] must have valid numbers: %v", err)
-			}
-			l.From = int32(i)
-		}
-		if len(parts[1]) > 0 {
-			i, err := strconv.Atoi(parts[1])
-			if err != nil {
-				return nil, fmt.Errorf("[from:to] must have valid numbers: %v", err)
-			}
-			l.To = int32(i)
-		}
-		if l.To > 0 && l.To < l.From {
-			return nil, fmt.Errorf("[from:to] to must be larger than from")
-		}
-		return l, nil
-	}
-
-	if i, err := strconv.Atoi(s); err == nil {
-		l := NewPositionLayerFilter(int32(i))
-		return l, nil
-	}
-
-	return nil, fmt.Errorf("the layer selector [%s] is not valid, must be [from:to], [index], or [~digest]", s)
-}
-
-type prefixLayerFilter struct {
-	Prefix string
-}
-
-func (s *prefixLayerFilter) Filter(layers []distribution.Descriptor) ([]distribution.Descriptor, error) {
-	var filtered []distribution.Descriptor
-	for _, d := range layers {
-		if strings.HasPrefix(d.Digest.String(), s.Prefix) {
-			filtered = append(filtered, d)
-		}
-	}
-	if len(filtered) == 0 {
-		return nil, fmt.Errorf("no layers start with '%s'", s.Prefix)
-	}
-	if len(filtered) > 1 {
-		return nil, fmt.Errorf("multiple layers start with '%s', you must be more specific", s.Prefix)
-	}
-	return filtered, nil
-}
-
-type indexLayerFilter struct {
-	From int32
-	To   int32
-}
-
-func (s *indexLayerFilter) Filter(layers []distribution.Descriptor) ([]distribution.Descriptor, error) {
-	l := int32(len(layers))
-	from := s.From
-	to := s.To
-	if from < 0 {
-		from = l + from
-	}
-	if to < 0 {
-		to = l + to
-	}
-	if to > l {
-		to = l
-	}
-	if from < 0 || to < 0 || from >= l {
-		if s.To == math.MaxInt32 {
-			return nil, fmt.Errorf("tried to select [%d:], but image only has %d layers", s.From, l)
-		}
-		return nil, fmt.Errorf("tried to select [%d:%d], but image only has %d layers", s.From, s.To, l)
-	}
-	if to < from {
-		to, from = from, to
-	}
-	return layers[from:to], nil
-}
-
-type positionLayerFilter struct {
-	At int32
-}
-
-func NewPositionLayerFilter(at int32) LayerFilter {
-	return &positionLayerFilter{at}
-}
-
-func (s *positionLayerFilter) Filter(layers []distribution.Descriptor) ([]distribution.Descriptor, error) {
-	l := int32(len(layers))
-	at := s.At
-	if at < 0 {
-		at = l + s.At
-	}
-	if at < 0 || at >= l {
-		return nil, fmt.Errorf("tried to select layer %d, but image only has %d layers", s.At, l)
-	}
-	return []distribution.Descriptor{layers[at]}, nil
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/image/image.go b/vendor/github.com/openshift/oc/pkg/cli/image/image.go
deleted file mode 100644
index 5a53cc75107f..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/image/image.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package image
-
-import (
-	"fmt"
-
-	"github.com/spf13/cobra"
-
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	ktemplates "k8s.io/kubernetes/pkg/kubectl/util/templates"
-
-	"github.com/openshift/oc/pkg/cli/image/append"
-	"github.com/openshift/oc/pkg/cli/image/extract"
-	"github.com/openshift/oc/pkg/cli/image/info"
-	"github.com/openshift/oc/pkg/cli/image/mirror"
-	cmdutil "github.com/openshift/oc/pkg/helpers/cmd"
-)
-
-var (
-	imageLong = ktemplates.LongDesc(`
-		Manage images on OpenShift
-
-		These commands help you manage images on OpenShift.`)
-)
-
-// NewCmdImage exposes commands for modifying images.
-func NewCmdImage(fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	image := &cobra.Command{
-		Use:   "image COMMAND",
-		Short: "Useful commands for managing images",
-		Long:  imageLong,
-		Run:   kcmdutil.DefaultSubCommandRun(streams.ErrOut),
-	}
-
-	name := fmt.Sprintf("%s image", fullName)
-
-	groups := ktemplates.CommandGroups{
-		{
-			Message: "Advanced commands:",
-			Commands: []*cobra.Command{
-				append.NewCmdAppendImage(name, streams),
-				info.NewInfo(name, streams),
-				extract.New(name, streams),
-				mirror.NewCmdMirrorImage(name, streams),
-			},
-		},
-	}
-	groups.Add(image)
-	cmdutil.ActsAsRootCommand(image, []string{"options"}, groups...)
-	return image
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/image/info/info.go b/vendor/github.com/openshift/oc/pkg/cli/image/info/info.go
deleted file mode 100644
index 8ecfc868c1d8..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/image/info/info.go
+++ /dev/null
@@ -1,421 +0,0 @@
-package info
-
-import (
-	"bytes"
-	"context"
-	"encoding/json"
-	"fmt"
-	"io"
-	"sort"
-	"strings"
-	"text/tabwriter"
-	"time"
-
-	"github.com/docker/distribution"
-	"github.com/docker/distribution/manifest/manifestlist"
-	units "github.com/docker/go-units"
-	digest "github.com/opencontainers/go-digest"
-	"github.com/spf13/cobra"
-
-	"k8s.io/apimachinery/pkg/util/duration"
-	"k8s.io/apimachinery/pkg/util/sets"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	"k8s.io/klog"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-
-	"github.com/openshift/library-go/pkg/image/dockerv1client"
-	imagereference "github.com/openshift/library-go/pkg/image/reference"
-	"github.com/openshift/library-go/pkg/image/registryclient"
-	imagemanifest "github.com/openshift/oc/pkg/cli/image/manifest"
-	"github.com/openshift/oc/pkg/cli/image/workqueue"
-)
-
-func NewInfoOptions(streams genericclioptions.IOStreams) *InfoOptions {
-	return &InfoOptions{
-		IOStreams: streams,
-	}
-}
-
-func NewInfo(parentName string, streams genericclioptions.IOStreams) *cobra.Command {
-	o := NewInfoOptions(streams)
-	cmd := &cobra.Command{
-		Use:   "info IMAGE",
-		Short: "Display information about an image",
-		Long: templates.LongDesc(`
-			Show information about an image in a remote image registry
-
-			Experimental: This command is under active development and may change without notice.
-		`),
-		Run: func(cmd *cobra.Command, args []string) {
-			kcmdutil.CheckErr(o.Complete(cmd, args))
-			kcmdutil.CheckErr(o.Validate())
-			kcmdutil.CheckErr(o.Run())
-		},
-	}
-	flags := cmd.Flags()
-	o.FilterOptions.Bind(flags)
-	o.SecurityOptions.Bind(flags)
-	flags.StringVarP(&o.Output, "output", "o", o.Output, "Print the image in an alternative format: json")
-	return cmd
-}
-
-type InfoOptions struct {
-	genericclioptions.IOStreams
-
-	SecurityOptions imagemanifest.SecurityOptions
-	FilterOptions   imagemanifest.FilterOptions
-
-	Images []string
-
-	Output string
-}
-
-func (o *InfoOptions) Complete(cmd *cobra.Command, args []string) error {
-	if len(args) < 1 {
-		return fmt.Errorf("info expects at least one argument, an image pull spec")
-	}
-	o.Images = args
-	return nil
-}
-
-func (o *InfoOptions) Validate() error {
-	return o.FilterOptions.Validate()
-}
-
-func (o *InfoOptions) Run() error {
-	if len(o.Images) == 0 {
-		return fmt.Errorf("must specify one or more images as arguments")
-	}
-
-	// cache the context
-	_, err := o.SecurityOptions.Context()
-	if err != nil {
-		return err
-	}
-
-	hadError := false
-	for _, location := range o.Images {
-		src, err := imagereference.Parse(location)
-		if err != nil {
-			return err
-		}
-		if len(src.Tag) == 0 && len(src.ID) == 0 {
-			return fmt.Errorf("--from must point to an image ID or image tag")
-		}
-
-		var image *Image
-		retriever := &ImageRetriever{
-			Image: map[string]imagereference.DockerImageReference{
-				location: src,
-			},
-			SecurityOptions: o.SecurityOptions,
-			ManifestListCallback: func(from string, list *manifestlist.DeserializedManifestList, all map[digest.Digest]distribution.Manifest) (map[digest.Digest]distribution.Manifest, error) {
-				filtered := make(map[digest.Digest]distribution.Manifest)
-				for _, manifest := range list.Manifests {
-					if !o.FilterOptions.Include(&manifest, len(list.Manifests) > 1) {
-						klog.V(5).Infof("Skipping image for %#v from %s", manifest.Platform, from)
-						continue
-					}
-					filtered[manifest.Digest] = all[manifest.Digest]
-				}
-				if len(filtered) == 1 {
-					return filtered, nil
-				}
-
-				buf := &bytes.Buffer{}
-				w := tabwriter.NewWriter(buf, 0, 0, 1, ' ', 0)
-				fmt.Fprintf(w, "  OS\tDIGEST\n")
-				for _, manifest := range list.Manifests {
-					fmt.Fprintf(w, "  %s\t%s\n", imagemanifest.PlatformSpecString(manifest.Platform), manifest.Digest)
-				}
-				w.Flush()
-				return nil, fmt.Errorf("the image is a manifest list and contains multiple images - use --filter-by-os to select from:\n\n%s\n", buf.String())
-			},
-
-			ImageMetadataCallback: func(from string, i *Image, err error) error {
-				if err != nil {
-					return err
-				}
-				image = i
-				return nil
-			},
-		}
-		if err := retriever.Run(); err != nil {
-			return err
-		}
-
-		switch o.Output {
-		case "":
-		case "json":
-			data, err := json.MarshalIndent(image, "", "  ")
-			if err != nil {
-				return err
-			}
-			fmt.Fprintf(o.Out, "%s", string(data))
-			continue
-		default:
-			return fmt.Errorf("unrecognized --output, only 'json' is supported")
-		}
-
-		if err := describeImage(o.Out, image); err != nil {
-			hadError = true
-			if err != kcmdutil.ErrExit {
-				fmt.Fprintf(o.ErrOut, "error: %v", err)
-			}
-		}
-
-	}
-	if hadError {
-		return kcmdutil.ErrExit
-	}
-	return nil
-}
-
-type Image struct {
-	Name          string                              `json:"name"`
-	Ref           imagereference.DockerImageReference `json:"-"`
-	Digest        digest.Digest                       `json:"digest"`
-	ContentDigest digest.Digest                       `json:"contentDigest"`
-	ListDigest    digest.Digest                       `json:"listDigest"`
-	MediaType     string                              `json:"mediaType"`
-	Layers        []distribution.Descriptor           `json:"layers"`
-	Config        *dockerv1client.DockerImageConfig   `json:"config"`
-
-	Manifest distribution.Manifest `json:"-"`
-}
-
-func describeImage(out io.Writer, image *Image) error {
-	var err error
-
-	w := tabwriter.NewWriter(out, 0, 4, 1, ' ', 0)
-	defer w.Flush()
-	fmt.Fprintf(w, "Name:\t%s\n", image.Name)
-	if len(image.Ref.ID) == 0 || image.Ref.ID != image.Digest.String() {
-		fmt.Fprintf(w, "Digest:\t%s\n", image.Digest)
-	}
-	if len(image.ListDigest) > 0 {
-		fmt.Fprintf(w, "Manifest List:\t%s\n", image.ListDigest)
-	}
-	if image.ContentDigest != image.Digest {
-		fmt.Fprintf(w, "Content Digest:\t%s\n\tERROR: the image contents do not match the requested digest, this image has been tampered with\n", image.ContentDigest)
-		err = kcmdutil.ErrExit
-	}
-
-	fmt.Fprintf(w, "Media Type:\t%s\n", image.MediaType)
-	if image.Config.Created.IsZero() {
-		fmt.Fprintf(w, "Created:\t%s\n", "")
-	} else {
-		fmt.Fprintf(w, "Created:\t%s ago\n", duration.ShortHumanDuration(time.Now().Sub(image.Config.Created)))
-	}
-	switch l := len(image.Layers); l {
-	case 0:
-		// legacy case, server does not know individual layers
-		fmt.Fprintf(w, "Layer Size:\t%s\n", units.HumanSize(float64(image.Config.Size)))
-	default:
-		imageSize := fmt.Sprintf("%s in %d layers", units.HumanSize(float64(image.Config.Size)), len(image.Layers))
-		if image.Config.Size == 0 {
-			imageSize = fmt.Sprintf("%d layers (size unavailable)", len(image.Layers))
-		}
-
-		fmt.Fprintf(w, "Image Size:\t%s\n", imageSize)
-		for i, layer := range image.Layers {
-			layerSize := units.HumanSize(float64(layer.Size))
-			if layer.Size == 0 {
-				layerSize = "--"
-			}
-
-			if i == 0 {
-				fmt.Fprintf(w, "%s\t%s\t%s\n", "Layers:", layerSize, layer.Digest)
-			} else {
-				fmt.Fprintf(w, "%s\t%s\t%s\n", "", layerSize, layer.Digest)
-			}
-		}
-	}
-	fmt.Fprintf(w, "OS:\t%s\n", image.Config.OS)
-	fmt.Fprintf(w, "Arch:\t%s\n", image.Config.Architecture)
-	if len(image.Config.Author) > 0 {
-		fmt.Fprintf(w, "Author:\t%s\n", image.Config.Author)
-	}
-
-	config := image.Config.Config
-	if config != nil {
-		hasCommand := false
-		if len(config.Entrypoint) > 0 {
-			hasCommand = true
-			fmt.Fprintf(w, "Entrypoint:\t%s\n", strings.Join(config.Entrypoint, " "))
-		}
-		if len(config.Cmd) > 0 {
-			hasCommand = true
-			fmt.Fprintf(w, "Command:\t%s\n", strings.Join(config.Cmd, " "))
-		}
-		if !hasCommand {
-			fmt.Fprintf(w, "Command:\t%s\n", "")
-		}
-		if len(config.WorkingDir) > 0 {
-			fmt.Fprintf(w, "Working Dir:\t%s\n", config.WorkingDir)
-		}
-		if len(config.User) > 0 {
-			fmt.Fprintf(w, "User:\t%s\n", config.User)
-		}
-		ports := sets.NewString()
-		for k := range config.ExposedPorts {
-			ports.Insert(k)
-		}
-		if len(ports) > 0 {
-			fmt.Fprintf(w, "Exposes Ports:\t%s\n", strings.Join(ports.List(), ", "))
-		}
-	}
-
-	if config != nil && len(config.Env) > 0 {
-		for i, env := range config.Env {
-			if i == 0 {
-				fmt.Fprintf(w, "%s\t%s\n", "Environment:", env)
-			} else {
-				fmt.Fprintf(w, "%s\t%s\n", "", env)
-			}
-		}
-	}
-
-	if config != nil && len(config.Labels) > 0 {
-		var keys []string
-		for k := range config.Labels {
-			keys = append(keys, k)
-		}
-		sort.Strings(keys)
-		for i, key := range keys {
-			if i == 0 {
-				fmt.Fprintf(w, "%s\t%s=%s\n", "Labels:", key, config.Labels[key])
-			} else {
-				fmt.Fprintf(w, "%s\t%s=%s\n", "", key, config.Labels[key])
-			}
-		}
-	}
-
-	if config != nil && len(config.Volumes) > 0 {
-		var keys []string
-		for k := range config.Volumes {
-			keys = append(keys, k)
-		}
-		sort.Strings(keys)
-		for i, volume := range keys {
-			if i == 0 {
-				fmt.Fprintf(w, "%s\t%s\n", "Volumes:", volume)
-			} else {
-				fmt.Fprintf(w, "%s\t%s\n", "", volume)
-			}
-		}
-	}
-
-	fmt.Fprintln(w)
-	return err
-}
-
-func writeTabSection(out io.Writer, fn func(w io.Writer)) {
-	w := tabwriter.NewWriter(out, 0, 4, 1, ' ', 0)
-	fn(w)
-	w.Flush()
-}
-
-type ImageRetriever struct {
-	Image           map[string]imagereference.DockerImageReference
-	SecurityOptions imagemanifest.SecurityOptions
-	ParallelOptions imagemanifest.ParallelOptions
-	// ImageMetadataCallback is invoked once per image retrieved, and may be called in parallel if
-	// MaxPerRegistry is set higher than 1. If err is passed image is nil. If an error is returned
-	// execution will stop.
-	ImageMetadataCallback func(from string, image *Image, err error) error
-	// ManifestListCallback, if specified, is invoked if the root image is a manifest list. If an
-	// error returned processing stops. If zero manifests are returned the next item is rendered
-	// and no ImageMetadataCallback calls occur. If more than one manifest is returned
-	// ImageMetadataCallback will be invoked once for each item.
-	ManifestListCallback func(from string, list *manifestlist.DeserializedManifestList, all map[digest.Digest]distribution.Manifest) (map[digest.Digest]distribution.Manifest, error)
-}
-
-func (o *ImageRetriever) Run() error {
-	ctx := context.Background()
-	fromContext, err := o.SecurityOptions.Context()
-	if err != nil {
-		return err
-	}
-
-	callbackFn := o.ImageMetadataCallback
-	if callbackFn == nil {
-		callbackFn = func(_ string, _ *Image, err error) error {
-			return err
-		}
-	}
-	stopCh := make(chan struct{})
-	defer close(stopCh)
-	q := workqueue.New(o.ParallelOptions.MaxPerRegistry, stopCh)
-	return q.Try(func(q workqueue.Try) {
-		for key := range o.Image {
-			name := key
-			from := o.Image[key]
-			q.Try(func() error {
-				repo, err := fromContext.Repository(ctx, from.DockerClientDefaults().RegistryURL(), from.RepositoryName(), o.SecurityOptions.Insecure)
-				if err != nil {
-					return callbackFn(name, nil, fmt.Errorf("unable to connect to image repository %s: %v", from.Exact(), err))
-				}
-
-				allManifests, manifestList, listDigest, err := imagemanifest.AllManifests(ctx, from, repo)
-				if err != nil {
-					if imagemanifest.IsImageForbidden(err) {
-						var msg string
-						if len(o.Image) == 1 {
-							msg = "image does not exist or you don't have permission to access the repository"
-						} else {
-							msg = fmt.Sprintf("image %q does not exist or you don't have permission to access the repository", from)
-						}
-						return callbackFn(name, nil, imagemanifest.NewImageForbidden(msg, err))
-					}
-					if imagemanifest.IsImageNotFound(err) {
-						var msg string
-						if len(o.Image) == 1 {
-							msg = "image does not exist"
-						} else {
-							msg = fmt.Sprintf("image %q does not exist", from)
-						}
-						return callbackFn(name, nil, imagemanifest.NewImageNotFound(msg, err))
-					}
-					return callbackFn(name, nil, fmt.Errorf("unable to read image %s: %v", from, err))
-				}
-
-				if o.ManifestListCallback != nil && manifestList != nil {
-					allManifests, err = o.ManifestListCallback(name, manifestList, allManifests)
-					if err != nil {
-						return err
-					}
-				}
-
-				if len(allManifests) == 0 {
-					return imagemanifest.NewImageNotFound(fmt.Sprintf("no manifests could be found for %q", from), nil)
-				}
-
-				for srcDigest, srcManifest := range allManifests {
-					contentDigest, contentErr := registryclient.ContentDigestForManifest(srcManifest, srcDigest.Algorithm())
-					if contentErr != nil {
-						return callbackFn(name, nil, contentErr)
-					}
-
-					imageConfig, layers, manifestErr := imagemanifest.ManifestToImageConfig(ctx, srcManifest, repo.Blobs(ctx), imagemanifest.ManifestLocation{ManifestList: listDigest, Manifest: srcDigest})
-					mediaType, _, _ := srcManifest.Payload()
-					if err := callbackFn(name, &Image{
-						Name:          from.Exact(),
-						Ref:           from,
-						MediaType:     mediaType,
-						Digest:        srcDigest,
-						ContentDigest: contentDigest,
-						ListDigest:    listDigest,
-						Config:        imageConfig,
-						Layers:        layers,
-						Manifest:      srcManifest,
-					}, manifestErr); err != nil {
-						return err
-					}
-				}
-				return nil
-			})
-		}
-	})
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/image/manifest/dockercredentials/credentials.go b/vendor/github.com/openshift/oc/pkg/cli/image/manifest/dockercredentials/credentials.go
deleted file mode 100644
index 4a4d7a06f84f..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/image/manifest/dockercredentials/credentials.go
+++ /dev/null
@@ -1,142 +0,0 @@
-package dockercredentials
-
-import (
-	"net/url"
-	"os"
-	"path/filepath"
-	"runtime"
-	"strings"
-
-	"github.com/docker/distribution/registry/client/auth"
-
-	"k8s.io/klog"
-	"k8s.io/kubernetes/pkg/credentialprovider"
-
-	"github.com/openshift/library-go/pkg/image/registryclient"
-)
-
-var (
-	emptyKeyring = &credentialprovider.BasicDockerKeyring{}
-)
-
-// NewLocal creates a new credential store that uses the default
-// local configuration to find a valid authentication for registry
-// targets.
-func NewLocal() auth.CredentialStore {
-	keyring := &credentialprovider.BasicDockerKeyring{}
-	keyring.Add(defaultClientDockerConfig())
-	return &keyringCredentialStore{
-		DockerKeyring:     keyring,
-		RefreshTokenStore: registryclient.NewRefreshTokenStore(),
-	}
-}
-
-// NewFromFile creates a new credential store for the provided Docker config.json
-// authentication file.
-func NewFromFile(path string) (auth.CredentialStore, error) {
-	cfg, err := credentialprovider.ReadSpecificDockerConfigJsonFile(path)
-	if err != nil {
-		return nil, err
-	}
-	keyring := &credentialprovider.BasicDockerKeyring{}
-	keyring.Add(cfg)
-	return &keyringCredentialStore{
-		DockerKeyring:     keyring,
-		RefreshTokenStore: registryclient.NewRefreshTokenStore(),
-	}, nil
-}
-
-type keyringCredentialStore struct {
-	credentialprovider.DockerKeyring
-	registryclient.RefreshTokenStore
-}
-
-func (s *keyringCredentialStore) Basic(url *url.URL) (string, string) {
-	return BasicFromKeyring(s.DockerKeyring, url)
-}
-
-// BasicFromKeyring finds Basic authorization credentials from a Docker keyring for the given URL as username and
-// password. It returns empty strings if no such URL matches.
-func BasicFromKeyring(keyring credentialprovider.DockerKeyring, target *url.URL) (string, string) {
-	// TODO: compare this logic to Docker authConfig in v2 configuration
-	var value string
-	if len(target.Scheme) == 0 || target.Scheme == "https" {
-		value = target.Host + target.Path
-	} else {
-		// always require an explicit port to look up HTTP credentials
-		if !strings.Contains(target.Host, ":") {
-			value = target.Host + ":80" + target.Path
-		} else {
-			value = target.Host + target.Path
-		}
-	}
-
-	// Lookup(...) expects an image (not a URL path).
-	// The keyring strips /v1/ and /v2/ version prefixes,
-	// so we should also when selecting a valid auth for a URL.
-	pathWithSlash := target.Path + "/"
-	if strings.HasPrefix(pathWithSlash, "/v1/") || strings.HasPrefix(pathWithSlash, "/v2/") {
-		value = target.Host + target.Path[3:]
-	}
-
-	configs, found := keyring.Lookup(value)
-
-	if !found || len(configs) == 0 {
-		// do a special case check for docker.io to match historical lookups when we respond to a challenge
-		if value == "auth.docker.io/token" {
-			klog.V(5).Infof("Being asked for %s (%s), trying %s for legacy behavior", target, value, "index.docker.io/v1")
-			return BasicFromKeyring(keyring, &url.URL{Host: "index.docker.io", Path: "/v1"})
-		}
-		// docker 1.9 saves 'docker.io' in config in f23, see https://bugzilla.redhat.com/show_bug.cgi?id=1309739
-		if value == "index.docker.io" {
-			klog.V(5).Infof("Being asked for %s (%s), trying %s for legacy behavior", target, value, "docker.io")
-			return BasicFromKeyring(keyring, &url.URL{Host: "docker.io"})
-		}
-
-		// try removing the canonical ports for the given requests
-		if (strings.HasSuffix(target.Host, ":443") && target.Scheme == "https") ||
-			(strings.HasSuffix(target.Host, ":80") && target.Scheme == "http") {
-			host := strings.SplitN(target.Host, ":", 2)[0]
-			klog.V(5).Infof("Being asked for %s (%s), trying %s without port", target, value, host)
-
-			return BasicFromKeyring(keyring, &url.URL{Scheme: target.Scheme, Host: host, Path: target.Path})
-		}
-
-		klog.V(5).Infof("Unable to find a secret to match %s (%s)", target, value)
-		return "", ""
-	}
-	klog.V(5).Infof("Found secret to match %s (%s): %s", target, value, configs[0].ServerAddress)
-	return configs[0].Username, configs[0].Password
-}
-
-// defaultClientDockerConfig returns the credentials that the docker command line client would
-// return.
-func defaultClientDockerConfig() credentialprovider.DockerConfig {
-	// support the modern config file $HOME/.docker/config.json
-	if cfg, err := credentialprovider.ReadDockerConfigJSONFile(defaultPathsForCredentials()); err == nil {
-		return cfg
-	}
-	// support the legacy config file $HOME/.dockercfg
-	if cfg, err := credentialprovider.ReadDockercfgFile(defaultPathsForLegacyCredentials()); err == nil {
-		return cfg
-	}
-	return credentialprovider.DockerConfig{}
-}
-
-// defaultPathsForCredentials returns the correct search directories for a docker config
-//  file
-func defaultPathsForCredentials() []string {
-	if runtime.GOOS == "windows" { // Windows
-		return []string{filepath.Join(os.Getenv("USERPROFILE"), ".docker")}
-	}
-	return []string{filepath.Join(os.Getenv("HOME"), ".docker")}
-}
-
-// defaultPathsForCredentials returns the correct search directories for a docker config
-//  file
-func defaultPathsForLegacyCredentials() []string {
-	if runtime.GOOS == "windows" { // Windows
-		return []string{os.Getenv("USERPROFILE")}
-	}
-	return []string{os.Getenv("HOME")}
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/image/manifest/dockercredentials/credentials_test.go b/vendor/github.com/openshift/oc/pkg/cli/image/manifest/dockercredentials/credentials_test.go
deleted file mode 100644
index b336462d4391..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/image/manifest/dockercredentials/credentials_test.go
+++ /dev/null
@@ -1,82 +0,0 @@
-package dockercredentials
-
-import (
-	"net/url"
-	"reflect"
-	"testing"
-
-	"k8s.io/kubernetes/pkg/credentialprovider"
-)
-
-type mockKeyring struct {
-	calls []string
-}
-
-func (k *mockKeyring) Lookup(image string) ([]credentialprovider.LazyAuthConfiguration, bool) {
-	k.calls = append(k.calls, image)
-	return nil, false
-}
-
-func TestHubFallback(t *testing.T) {
-	k := &mockKeyring{}
-	BasicFromKeyring(k, &url.URL{Host: "auth.docker.io", Path: "/token"})
-	if !reflect.DeepEqual([]string{"auth.docker.io/token", "index.docker.io", "docker.io"}, k.calls) {
-		t.Errorf("unexpected calls: %v", k.calls)
-	}
-}
-
-func Test_BasicFromKeyring(t *testing.T) {
-	fn := func(host string, entry credentialprovider.DockerConfigEntry) credentialprovider.DockerKeyring {
-		k := &credentialprovider.BasicDockerKeyring{}
-		k.Add(map[string]credentialprovider.DockerConfigEntry{host: entry})
-		return k
-	}
-	def := credentialprovider.DockerConfigEntry{
-		Username: "local_user",
-		Password: "local_pass",
-	}
-	type args struct {
-		keyring credentialprovider.DockerKeyring
-		target  *url.URL
-	}
-	tests := []struct {
-		name     string
-		args     args
-		user     string
-		password string
-	}{
-		{name: "exact", args: args{keyring: fn("localhost", def), target: &url.URL{Host: "localhost"}}, user: def.Username, password: def.Password},
-		{name: "https scheme", args: args{keyring: fn("localhost", def), target: &url.URL{Scheme: "https", Host: "localhost"}}, user: def.Username, password: def.Password},
-		{name: "canonical https", args: args{keyring: fn("localhost", def), target: &url.URL{Scheme: "https", Host: "localhost:443"}}, user: def.Username, password: def.Password},
-		{name: "only https", args: args{keyring: fn("https://localhost", def), target: &url.URL{Host: "localhost"}}, user: def.Username, password: def.Password},
-		{name: "only https scheme", args: args{keyring: fn("https://localhost", def), target: &url.URL{Scheme: "https", Host: "localhost"}}, user: def.Username, password: def.Password},
-
-		{name: "mismatched scheme - http", args: args{keyring: fn("http://localhost", def), target: &url.URL{Scheme: "https", Host: "localhost"}}, user: def.Username, password: def.Password},
-		{name: "don't assume port 80 in keyring is https", args: args{keyring: fn("localhost:80", def), target: &url.URL{Scheme: "http", Host: "localhost"}}, user: def.Username, password: def.Password},
-		{name: "exact http", args: args{keyring: fn("localhost:80", def), target: &url.URL{Scheme: "http", Host: "localhost:80"}}, user: def.Username, password: def.Password},
-
-		// this is not allowed by the credential keyring, but should be
-		{name: "exact http", args: args{keyring: fn("http://localhost", def), target: &url.URL{Scheme: "http", Host: "localhost:80"}}, user: "", password: ""},
-		{name: "keyring canonical https", args: args{keyring: fn("localhost:443", def), target: &url.URL{Scheme: "https", Host: "localhost"}}, user: "", password: ""},
-
-		// these should not be allowed
-		{name: "host is for port 80 only", args: args{keyring: fn("localhost:80", def), target: &url.URL{Host: "localhost"}}, user: "", password: ""},
-		{name: "host is for port 443 only", args: args{keyring: fn("localhost:443", def), target: &url.URL{Host: "localhost"}}, user: "", password: ""},
-		{name: "canonical http", args: args{keyring: fn("localhost", def), target: &url.URL{Scheme: "http", Host: "localhost:80"}}, user: "", password: ""},
-		{name: "http scheme", args: args{keyring: fn("localhost", def), target: &url.URL{Scheme: "http", Host: "localhost"}}, user: "", password: ""},
-		{name: "https not canonical", args: args{keyring: fn("localhost", def), target: &url.URL{Scheme: "https", Host: "localhost:80"}}, user: "", password: ""},
-		{name: "http not canonical", args: args{keyring: fn("localhost", def), target: &url.URL{Scheme: "http", Host: "localhost:443"}}, user: "", password: ""},
-		{name: "mismatched scheme", args: args{keyring: fn("https://localhost", def), target: &url.URL{Scheme: "http", Host: "localhost"}}, user: "", password: ""},
-	}
-	for _, tt := range tests {
-		t.Run(tt.name, func(t *testing.T) {
-			user, password := BasicFromKeyring(tt.args.keyring, tt.args.target)
-			if user != tt.user {
-				t.Errorf("BasicFromKeyring() user = %v, actual = %v", user, tt.user)
-			}
-			if password != tt.password {
-				t.Errorf("BasicFromKeyring() password = %v, actual = %v", password, tt.password)
-			}
-		})
-	}
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/image/manifest/errors.go b/vendor/github.com/openshift/oc/pkg/cli/image/manifest/errors.go
deleted file mode 100644
index 6f48706d4f60..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/image/manifest/errors.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package manifest
-
-import (
-	"github.com/docker/distribution/registry/api/errcode"
-	registryapiv2 "github.com/docker/distribution/registry/api/v2"
-)
-
-type imageNotFound struct {
-	msg string
-	err error
-}
-
-func NewImageNotFound(msg string, err error) error {
-	return &imageNotFound{msg: msg, err: err}
-}
-
-func (e *imageNotFound) Error() string {
-	return e.msg
-}
-
-type imageForbidden struct {
-	msg string
-	err error
-}
-
-func NewImageForbidden(msg string, err error) error {
-	return &imageForbidden{msg: msg, err: err}
-}
-
-func (e *imageForbidden) Error() string {
-	return e.msg
-}
-
-func IsImageForbidden(err error) bool {
-	switch t := err.(type) {
-	case errcode.Errors:
-		for _, err := range t {
-			if IsImageForbidden(err) {
-				return true
-			}
-		}
-		return false
-	case errcode.Error:
-		return t.Code == errcode.ErrorCodeDenied
-	case *imageForbidden:
-		return true
-	default:
-		return false
-	}
-}
-func IsImageNotFound(err error) bool {
-	switch t := err.(type) {
-	case errcode.Errors:
-		for _, err := range t {
-			if IsImageNotFound(err) {
-				return true
-			}
-		}
-		return false
-	case errcode.Error:
-		return t.Code == registryapiv2.ErrorCodeManifestUnknown
-	case *imageNotFound:
-		return true
-	case *imageForbidden:
-		return true
-	default:
-		return false
-	}
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/image/manifest/manifest.go b/vendor/github.com/openshift/oc/pkg/cli/image/manifest/manifest.go
deleted file mode 100644
index fcd8ec46e27f..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/image/manifest/manifest.go
+++ /dev/null
@@ -1,522 +0,0 @@
-package manifest
-
-import (
-	"context"
-	"encoding/json"
-	"fmt"
-	"regexp"
-	"runtime"
-	"sync"
-
-	"github.com/spf13/pflag"
-
-	"github.com/docker/distribution"
-	"github.com/docker/distribution/manifest/manifestlist"
-	"github.com/docker/distribution/manifest/schema1"
-	"github.com/docker/distribution/manifest/schema2"
-	"github.com/docker/distribution/reference"
-	"github.com/docker/distribution/registry/api/errcode"
-	v2 "github.com/docker/distribution/registry/api/v2"
-
-	"github.com/docker/libtrust"
-	"github.com/opencontainers/go-digest"
-	"k8s.io/client-go/rest"
-	"k8s.io/klog"
-
-	"github.com/openshift/library-go/pkg/image/dockerv1client"
-	imagereference "github.com/openshift/library-go/pkg/image/reference"
-	"github.com/openshift/library-go/pkg/image/registryclient"
-	"github.com/openshift/oc/pkg/cli/image/manifest/dockercredentials"
-	"github.com/openshift/oc/pkg/helpers/image/dockerlayer/add"
-)
-
-type ParallelOptions struct {
-	MaxPerRegistry int
-}
-
-func (o *ParallelOptions) Bind(flags *pflag.FlagSet) {
-	flags.IntVar(&o.MaxPerRegistry, "max-per-registry", o.MaxPerRegistry, "Number of concurrent requests allowed per registry.")
-}
-
-type SecurityOptions struct {
-	RegistryConfig   string
-	Insecure         bool
-	SkipVerification bool
-
-	CachedContext *registryclient.Context
-}
-
-func (o *SecurityOptions) Bind(flags *pflag.FlagSet) {
-	flags.StringVarP(&o.RegistryConfig, "registry-config", "a", o.RegistryConfig, "Path to your registry credentials (defaults to ~/.docker/config.json)")
-	flags.BoolVar(&o.Insecure, "insecure", o.Insecure, "Allow push and pull operations to registries to be made over HTTP")
-	flags.BoolVar(&o.SkipVerification, "skip-verification", o.SkipVerification, "Skip verifying the integrity of the retrieved content. This is not recommended, but may be necessary when importing images from older image registries. Only bypass verification if the registry is known to be trustworthy.")
-}
-
-type Verifier interface {
-	Verify(dgst, contentDgst digest.Digest)
-	Verified() bool
-}
-
-func NewVerifier() Verifier {
-	return &verifier{}
-}
-
-type verifier struct {
-	lock     sync.Mutex
-	hadError bool
-}
-
-func (v *verifier) Verify(dgst, contentDgst digest.Digest) {
-	if contentDgst == dgst {
-		return
-	}
-	v.lock.Lock()
-	defer v.lock.Unlock()
-	v.hadError = true
-}
-
-func (v *verifier) Verified() bool {
-	v.lock.Lock()
-	defer v.lock.Unlock()
-	return !v.hadError
-}
-
-func (o *SecurityOptions) Context() (*registryclient.Context, error) {
-	if o.CachedContext != nil {
-		return o.CachedContext, nil
-	}
-	context, err := o.NewContext()
-	o.CachedContext = context
-	return context, err
-}
-
-func (o *SecurityOptions) NewContext() (*registryclient.Context, error) {
-	rt, err := rest.TransportFor(&rest.Config{})
-	if err != nil {
-		return nil, err
-	}
-	insecureRT, err := rest.TransportFor(&rest.Config{TLSClientConfig: rest.TLSClientConfig{Insecure: true}})
-	if err != nil {
-		return nil, err
-	}
-	creds := dockercredentials.NewLocal()
-	if len(o.RegistryConfig) > 0 {
-		creds, err = dockercredentials.NewFromFile(o.RegistryConfig)
-		if err != nil {
-			return nil, fmt.Errorf("unable to load --registry-config: %v", err)
-		}
-	}
-	context := registryclient.NewContext(rt, insecureRT).WithCredentials(creds)
-	context.DisableDigestVerification = o.SkipVerification
-	return context, nil
-}
-
-// FilterOptions assist in filtering out unneeded manifests from ManifestList objects.
-type FilterOptions struct {
-	FilterByOS      string
-	DefaultOSFilter bool
-	OSFilter        *regexp.Regexp
-}
-
-// Bind adds the options to the flag set.
-func (o *FilterOptions) Bind(flags *pflag.FlagSet) {
-	flags.StringVar(&o.FilterByOS, "filter-by-os", o.FilterByOS, "A regular expression to control which images are considered when multiple variants are available. Images will be passed as '/[/]'.")
-}
-
-// Validate checks whether the flags are ready for use.
-func (o *FilterOptions) Validate() error {
-	pattern := o.FilterByOS
-	if len(pattern) > 0 {
-		re, err := regexp.Compile(pattern)
-		if err != nil {
-			return fmt.Errorf("--filter-by-os was not a valid regular expression: %v", err)
-		}
-		o.OSFilter = re
-	}
-	return nil
-}
-
-// Complete performs defaulting by OS.
-func (o *FilterOptions) Complete(flags *pflag.FlagSet) error {
-	pattern := o.FilterByOS
-	if len(pattern) == 0 && !flags.Changed("filter-by-os") {
-		o.DefaultOSFilter = true
-		o.FilterByOS = regexp.QuoteMeta(fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH))
-	}
-	return nil
-}
-
-// Include returns true if the provided manifest should be included, or the first image if the user didn't alter the
-// default selection and there is only one image.
-func (o *FilterOptions) Include(d *manifestlist.ManifestDescriptor, hasMultiple bool) bool {
-	if o.OSFilter == nil {
-		return true
-	}
-	if o.DefaultOSFilter && !hasMultiple {
-		return true
-	}
-	s := PlatformSpecString(d.Platform)
-	return o.OSFilter.MatchString(s)
-}
-
-func PlatformSpecString(platform manifestlist.PlatformSpec) string {
-	if len(platform.Variant) > 0 {
-		return fmt.Sprintf("%s/%s/%s", platform.OS, platform.Architecture, platform.Variant)
-	}
-	return fmt.Sprintf("%s/%s", platform.OS, platform.Architecture)
-}
-
-// IncludeAll returns true if the provided manifest matches the filter, or all if there was no filter.
-func (o *FilterOptions) IncludeAll(d *manifestlist.ManifestDescriptor, hasMultiple bool) bool {
-	if o.OSFilter == nil {
-		return true
-	}
-	s := PlatformSpecString(d.Platform)
-	return o.OSFilter.MatchString(s)
-}
-
-type FilterFunc func(*manifestlist.ManifestDescriptor, bool) bool
-
-// PreferManifestList specifically requests a manifest list first
-var PreferManifestList = distribution.WithManifestMediaTypes([]string{
-	manifestlist.MediaTypeManifestList,
-	schema2.MediaTypeManifest,
-})
-
-// AllManifests returns all non-list manifests, the list manifest (if any), the digest the from refers to, or an error.
-func AllManifests(ctx context.Context, from imagereference.DockerImageReference, repo distribution.Repository) (map[digest.Digest]distribution.Manifest, *manifestlist.DeserializedManifestList, digest.Digest, error) {
-	var srcDigest digest.Digest
-	if len(from.Tag) > 0 {
-		desc, err := repo.Tags(ctx).Get(ctx, from.Tag)
-		if err != nil {
-			return nil, nil, "", err
-		}
-		srcDigest = desc.Digest
-	} else if len(from.ID) > 0 {
-		srcDigest = digest.Digest(from.ID)
-	} else {
-		return nil, nil, "", fmt.Errorf("no tag or digest specified")
-	}
-	manifests, err := repo.Manifests(ctx)
-	if err != nil {
-		return nil, nil, "", err
-	}
-	srcManifest, err := manifests.Get(ctx, srcDigest, PreferManifestList)
-	if err != nil {
-		return nil, nil, "", err
-	}
-
-	return ManifestsFromList(ctx, srcDigest, srcManifest, manifests, from)
-}
-
-type ManifestLocation struct {
-	Manifest     digest.Digest
-	ManifestList digest.Digest
-}
-
-func (m ManifestLocation) IsList() bool {
-	return len(m.ManifestList) > 0
-}
-
-func (m ManifestLocation) String() string {
-	if m.IsList() {
-		return fmt.Sprintf("manifest %s in manifest list %s", m.Manifest, m.ManifestList)
-	}
-	return fmt.Sprintf("manifest %s", m.Manifest)
-}
-
-// FirstManifest returns the first manifest at the request location that matches the filter function.
-func FirstManifest(ctx context.Context, from imagereference.DockerImageReference, repo distribution.Repository, filterFn FilterFunc) (distribution.Manifest, ManifestLocation, error) {
-	var srcDigest digest.Digest
-	if len(from.Tag) > 0 {
-		desc, err := repo.Tags(ctx).Get(ctx, from.Tag)
-		if err != nil {
-			return nil, ManifestLocation{}, err
-		}
-		srcDigest = desc.Digest
-	} else if len(from.ID) > 0 {
-		srcDigest = digest.Digest(from.ID)
-	} else {
-		return nil, ManifestLocation{}, fmt.Errorf("no tag or digest specified")
-	}
-	manifests, err := repo.Manifests(ctx)
-	if err != nil {
-		return nil, ManifestLocation{}, err
-	}
-	srcManifest, err := manifests.Get(ctx, srcDigest, PreferManifestList)
-	if err != nil {
-		return nil, ManifestLocation{}, err
-	}
-
-	originalSrcDigest := srcDigest
-	srcManifests, srcManifest, srcDigest, err := ProcessManifestList(ctx, srcDigest, srcManifest, manifests, from, filterFn)
-	if err != nil {
-		return nil, ManifestLocation{}, err
-	}
-	if len(srcManifests) == 0 {
-		return nil, ManifestLocation{}, fmt.Errorf("filtered all images from manifest list")
-	}
-
-	if srcDigest != originalSrcDigest {
-		return srcManifest, ManifestLocation{Manifest: srcDigest, ManifestList: originalSrcDigest}, nil
-	}
-	return srcManifest, ManifestLocation{Manifest: srcDigest}, nil
-}
-
-// ManifestToImageConfig takes an image manifest and converts it into a structured object.
-func ManifestToImageConfig(ctx context.Context, srcManifest distribution.Manifest, blobs distribution.BlobService, location ManifestLocation) (*dockerv1client.DockerImageConfig, []distribution.Descriptor, error) {
-	switch t := srcManifest.(type) {
-	case *schema2.DeserializedManifest:
-		if t.Config.MediaType != schema2.MediaTypeImageConfig {
-			return nil, nil, fmt.Errorf("%s does not have the expected image configuration media type: %s", location, t.Config.MediaType)
-		}
-		configJSON, err := blobs.Get(ctx, t.Config.Digest)
-		if err != nil {
-			return nil, nil, fmt.Errorf("cannot retrieve image configuration for %s: %v", location, err)
-		}
-		klog.V(4).Infof("Raw image config json:\n%s", string(configJSON))
-		config := &dockerv1client.DockerImageConfig{}
-		if err := json.Unmarshal(configJSON, &config); err != nil {
-			return nil, nil, fmt.Errorf("unable to parse image configuration: %v", err)
-		}
-
-		base := config
-		layers := t.Layers
-		base.Size = 0
-		for _, layer := range t.Layers {
-			base.Size += layer.Size
-		}
-
-		return base, layers, nil
-
-	case *schema1.SignedManifest:
-		if klog.V(4) {
-			_, configJSON, _ := srcManifest.Payload()
-			klog.Infof("Raw image config json:\n%s", string(configJSON))
-		}
-		if len(t.History) == 0 {
-			return nil, nil, fmt.Errorf("input image is in an unknown format: no v1Compatibility history")
-		}
-		config := &dockerv1client.DockerV1CompatibilityImage{}
-		if err := json.Unmarshal([]byte(t.History[0].V1Compatibility), &config); err != nil {
-			return nil, nil, err
-		}
-
-		base := &dockerv1client.DockerImageConfig{}
-		if err := dockerv1client.Convert_DockerV1CompatibilityImage_to_DockerImageConfig(config, base); err != nil {
-			return nil, nil, err
-		}
-
-		// schema1 layers are in reverse order
-		layers := make([]distribution.Descriptor, 0, len(t.FSLayers))
-		for i := len(t.FSLayers) - 1; i >= 0; i-- {
-			layer := distribution.Descriptor{
-				MediaType: schema2.MediaTypeLayer,
-				Digest:    t.FSLayers[i].BlobSum,
-				// size must be reconstructed from the blobs
-			}
-			// we must reconstruct the tar sum from the blobs
-			add.AddLayerToConfig(base, layer, "")
-			layers = append(layers, layer)
-		}
-
-		return base, layers, nil
-
-	default:
-		return nil, nil, fmt.Errorf("unknown image manifest of type %T from %s", srcManifest, location)
-	}
-}
-
-func ProcessManifestList(ctx context.Context, srcDigest digest.Digest, srcManifest distribution.Manifest, manifests distribution.ManifestService, ref imagereference.DockerImageReference, filterFn FilterFunc) ([]distribution.Manifest, distribution.Manifest, digest.Digest, error) {
-	var srcManifests []distribution.Manifest
-	switch t := srcManifest.(type) {
-	case *manifestlist.DeserializedManifestList:
-		manifestDigest := srcDigest
-		manifestList := t
-
-		filtered := make([]manifestlist.ManifestDescriptor, 0, len(t.Manifests))
-		for _, manifest := range t.Manifests {
-			if !filterFn(&manifest, len(t.Manifests) > 1) {
-				klog.V(5).Infof("Skipping image for %#v from %s", manifest.Platform, ref)
-				continue
-			}
-			klog.V(5).Infof("Including image for %#v from %s", manifest.Platform, ref)
-			filtered = append(filtered, manifest)
-		}
-
-		if len(filtered) == 0 {
-			return nil, nil, "", nil
-		}
-
-		// if we're filtering the manifest list, update the source manifest and digest
-		if len(filtered) != len(t.Manifests) {
-			var err error
-			t, err = manifestlist.FromDescriptors(filtered)
-			if err != nil {
-				return nil, nil, "", fmt.Errorf("unable to filter source image %s manifest list: %v", ref, err)
-			}
-			_, body, err := t.Payload()
-			if err != nil {
-				return nil, nil, "", fmt.Errorf("unable to filter source image %s manifest list (bad payload): %v", ref, err)
-			}
-			manifestList = t
-			manifestDigest, err := registryclient.ContentDigestForManifest(t, srcDigest.Algorithm())
-			if err != nil {
-				return nil, nil, "", err
-			}
-			klog.V(5).Infof("Filtered manifest list to new digest %s:\n%s", manifestDigest, body)
-		}
-
-		for i, manifest := range t.Manifests {
-			childManifest, err := manifests.Get(ctx, manifest.Digest, distribution.WithManifestMediaTypes([]string{manifestlist.MediaTypeManifestList, schema2.MediaTypeManifest}))
-			if err != nil {
-				return nil, nil, "", fmt.Errorf("unable to retrieve source image %s manifest #%d from manifest list: %v", ref, i+1, err)
-			}
-			srcManifests = append(srcManifests, childManifest)
-		}
-
-		switch {
-		case len(srcManifests) == 1:
-			manifestDigest, err := registryclient.ContentDigestForManifest(srcManifests[0], srcDigest.Algorithm())
-			if err != nil {
-				return nil, nil, "", err
-			}
-			klog.V(5).Infof("Used only one manifest from the list %s", srcDigest)
-			return srcManifests, srcManifests[0], manifestDigest, nil
-		default:
-			return append(srcManifests, manifestList), manifestList, manifestDigest, nil
-		}
-
-	default:
-		return []distribution.Manifest{srcManifest}, srcManifest, srcDigest, nil
-	}
-}
-
-// ManifestsFromList returns a map of all image manifests for a given manifest. It returns the ManifestList and its digest if
-// srcManifest is a list, or an error.
-func ManifestsFromList(ctx context.Context, srcDigest digest.Digest, srcManifest distribution.Manifest, manifests distribution.ManifestService, ref imagereference.DockerImageReference) (map[digest.Digest]distribution.Manifest, *manifestlist.DeserializedManifestList, digest.Digest, error) {
-	switch t := srcManifest.(type) {
-	case *manifestlist.DeserializedManifestList:
-		allManifests := make(map[digest.Digest]distribution.Manifest)
-		manifestDigest := srcDigest
-		manifestList := t
-
-		for i, manifest := range t.Manifests {
-			childManifest, err := manifests.Get(ctx, manifest.Digest, distribution.WithManifestMediaTypes([]string{manifestlist.MediaTypeManifestList, schema2.MediaTypeManifest}))
-			if err != nil {
-				return nil, nil, "", fmt.Errorf("unable to retrieve source image %s manifest #%d from manifest list: %v", ref, i+1, err)
-			}
-			allManifests[manifest.Digest] = childManifest
-		}
-
-		return allManifests, manifestList, manifestDigest, nil
-
-	default:
-		return map[digest.Digest]distribution.Manifest{srcDigest: srcManifest}, nil, "", nil
-	}
-}
-
-// TDOO: remove when quay.io switches to v2 schema
-func PutManifestInCompatibleSchema(
-	ctx context.Context,
-	srcManifest distribution.Manifest,
-	tag string,
-	toManifests distribution.ManifestService,
-	ref reference.Named,
-	blobs distribution.BlobService, // support schema2 -> schema1 downconversion
-	configJSON []byte, // optional, if not passed blobs will be used
-) (digest.Digest, error) {
-	var options []distribution.ManifestServiceOption
-	if len(tag) > 0 {
-		klog.V(5).Infof("Put manifest %s:%s", ref, tag)
-		options = []distribution.ManifestServiceOption{distribution.WithTag(tag)}
-	} else {
-		klog.V(5).Infof("Put manifest %s", ref)
-	}
-	toDigest, err := toManifests.Put(ctx, srcManifest, options...)
-	if err == nil {
-		return toDigest, nil
-	}
-	errs, ok := err.(errcode.Errors)
-	if !ok || len(errs) == 0 {
-		return toDigest, err
-	}
-	errcode, ok := errs[0].(errcode.Error)
-	if !ok || errcode.ErrorCode() != v2.ErrorCodeManifestInvalid {
-		return toDigest, err
-	}
-	// try downconverting to v2-schema1
-	schema2Manifest, ok := srcManifest.(*schema2.DeserializedManifest)
-	if !ok {
-		return toDigest, err
-	}
-	tagRef, tagErr := reference.WithTag(ref, tag)
-	if tagErr != nil {
-		return toDigest, err
-	}
-	klog.V(5).Infof("Registry reported invalid manifest error, attempting to convert to v2schema1 as ref %s", tagRef)
-	schema1Manifest, convertErr := convertToSchema1(ctx, blobs, configJSON, schema2Manifest, tagRef)
-	if convertErr != nil {
-		if klog.V(6) {
-			_, data, _ := schema2Manifest.Payload()
-			klog.Infof("Input schema\n%s", string(data))
-		}
-		klog.V(2).Infof("Unable to convert manifest to schema1: %v", convertErr)
-		return toDigest, err
-	}
-	if klog.V(6) {
-		_, data, _ := schema1Manifest.Payload()
-		klog.Infof("Converted to v2schema1\n%s", string(data))
-	}
-	return toManifests.Put(ctx, schema1Manifest, distribution.WithTag(tag))
-}
-
-// TDOO: remove when quay.io switches to v2 schema
-func convertToSchema1(ctx context.Context, blobs distribution.BlobService, configJSON []byte, schema2Manifest *schema2.DeserializedManifest, ref reference.Named) (distribution.Manifest, error) {
-	if configJSON == nil {
-		targetDescriptor := schema2Manifest.Target()
-		config, err := blobs.Get(ctx, targetDescriptor.Digest)
-		if err != nil {
-			return nil, err
-		}
-		configJSON = config
-	}
-	trustKey, err := loadPrivateKey()
-	if err != nil {
-		return nil, err
-	}
-	if klog.V(6) {
-		klog.Infof("Down converting v2 schema image:\n%#v\n%s", schema2Manifest.Layers, configJSON)
-	}
-	builder := schema1.NewConfigManifestBuilder(blobs, trustKey, ref, configJSON)
-	for _, d := range schema2Manifest.Layers {
-		if err := builder.AppendReference(d); err != nil {
-			return nil, err
-		}
-	}
-	manifest, err := builder.Build(ctx)
-	if err != nil {
-		return nil, err
-	}
-	return manifest, nil
-}
-
-var (
-	privateKeyLock sync.Mutex
-	privateKey     libtrust.PrivateKey
-)
-
-// TDOO: remove when quay.io switches to v2 schema
-func loadPrivateKey() (libtrust.PrivateKey, error) {
-	privateKeyLock.Lock()
-	defer privateKeyLock.Unlock()
-	if privateKey != nil {
-		return privateKey, nil
-	}
-	trustKey, err := libtrust.GenerateECP256PrivateKey()
-	if err != nil {
-		return nil, err
-	}
-	privateKey = trustKey
-	return privateKey, nil
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/image/mirror/mappings.go b/vendor/github.com/openshift/oc/pkg/cli/image/mirror/mappings.go
deleted file mode 100644
index f26abef6cc93..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/image/mirror/mappings.go
+++ /dev/null
@@ -1,343 +0,0 @@
-package mirror
-
-import (
-	"bufio"
-	"fmt"
-	"io"
-	"os"
-	"path"
-	"strings"
-	"sync"
-
-	"github.com/docker/distribution/registry/client/auth"
-	digest "github.com/opencontainers/go-digest"
-
-	"github.com/openshift/library-go/pkg/image/reference"
-)
-
-// ErrAlreadyExists may be returned by the blob Create function to indicate that the blob already exists.
-var ErrAlreadyExists = fmt.Errorf("blob already exists in the target location")
-
-type Mapping struct {
-	Source      reference.DockerImageReference
-	Destination reference.DockerImageReference
-	Type        DestinationType
-	// Name is an optional field for identifying uniqueness within the mappings
-	Name string
-}
-
-func parseSource(ref string) (reference.DockerImageReference, error) {
-	src, err := reference.Parse(ref)
-	if err != nil {
-		return src, fmt.Errorf("%q is not a valid image reference: %v", ref, err)
-	}
-	if len(src.Tag) == 0 && len(src.ID) == 0 {
-		return src, fmt.Errorf("you must specify a tag or digest for SRC")
-	}
-	return src, nil
-}
-
-type AWSReference struct {
-	Bucket string
-	Region string
-}
-
-type MirrorReference struct {
-	reference.DockerImageReference
-	AWS *AWSReference
-}
-
-func (r MirrorReference) Type() DestinationType {
-	if r.AWS != nil {
-		return DestinationS3
-	}
-	return DestinationRegistry
-}
-
-func (r MirrorReference) Combined() reference.DockerImageReference {
-	if r.AWS == nil {
-		return r.DockerImageReference
-	}
-	copied := r.DockerImageReference
-	copied.Registry = "s3.amazonaws.com"
-	copied.Namespace = path.Join(r.AWS.Region, r.AWS.Bucket, r.Namespace)
-	return copied
-}
-
-func ParseMirrorReference(ref string) (MirrorReference, error) {
-	var dst MirrorReference
-	switch {
-	case strings.HasPrefix(ref, "s3://"):
-		ref = strings.TrimPrefix(ref, "s3://")
-		dst.AWS = &AWSReference{}
-	}
-	image, err := reference.Parse(ref)
-	if err != nil {
-		return dst, fmt.Errorf("%q is not a valid image reference: %v", ref, err)
-	}
-	if len(dst.ID) != 0 {
-		return dst, fmt.Errorf("you must specify a tag for DST or leave it blank to only push by digest")
-	}
-	dst.DockerImageReference = image
-	if dst.AWS != nil {
-		parts := strings.SplitN(image.RepositoryName(), "/", 3)
-		if len(parts) < 3 {
-			return dst, fmt.Errorf("s3 target URLs must have at least 3 path segments: REGION/BUCKET/REPO[/...]")
-		}
-		dst.AWS.Region = parts[0]
-		dst.AWS.Bucket = parts[1]
-		dst.Registry = fmt.Sprintf("%s.s3.amazonaws.com", parts[0])
-		dst.Name = path.Base(parts[2])
-		dst.Namespace = path.Dir(parts[2])
-		if dst.Namespace == "." {
-			dst.Namespace = ""
-		}
-	}
-	return dst, nil
-}
-
-func parseDestination(ref string) (reference.DockerImageReference, DestinationType, error) {
-	dstType := DestinationRegistry
-	switch {
-	case strings.HasPrefix(ref, "s3://"):
-		dstType = DestinationS3
-		ref = strings.TrimPrefix(ref, "s3://")
-	}
-	dst, err := reference.Parse(ref)
-	if err != nil {
-		return dst, dstType, fmt.Errorf("%q is not a valid image reference: %v", ref, err)
-	}
-	if len(dst.ID) != 0 {
-		return dst, dstType, fmt.Errorf("you must specify a tag for DST or leave it blank to only push by digest")
-	}
-	return dst, dstType, nil
-}
-
-func parseArgs(args []string, overlap map[string]string) ([]Mapping, error) {
-	var remainingArgs []string
-	var mappings []Mapping
-	for _, s := range args {
-		parts := strings.SplitN(s, "=", 2)
-		if len(parts) != 2 {
-			remainingArgs = append(remainingArgs, s)
-			continue
-		}
-		if len(parts[0]) == 0 || len(parts[1]) == 0 {
-			return nil, fmt.Errorf("all arguments must be valid SRC=DST mappings")
-		}
-		src, err := parseSource(parts[0])
-		if err != nil {
-			return nil, err
-		}
-		dst, dstType, err := parseDestination(parts[1])
-		if err != nil {
-			return nil, err
-		}
-		if _, ok := overlap[dst.String()]; ok {
-			return nil, fmt.Errorf("each destination tag may only be specified once: %s", dst.String())
-		}
-		overlap[dst.String()] = src.String()
-
-		mappings = append(mappings, Mapping{Source: src, Destination: dst, Type: dstType})
-	}
-
-	switch {
-	case len(remainingArgs) > 1 && len(mappings) == 0:
-		src, err := parseSource(remainingArgs[0])
-		if err != nil {
-			return nil, err
-		}
-		for i := 1; i < len(remainingArgs); i++ {
-			if len(remainingArgs[i]) == 0 {
-				continue
-			}
-			dst, dstType, err := parseDestination(remainingArgs[i])
-			if err != nil {
-				return nil, err
-			}
-			if _, ok := overlap[dst.String()]; ok {
-				return nil, fmt.Errorf("each destination tag may only be specified once: %s", dst.String())
-			}
-			overlap[dst.String()] = src.String()
-			mappings = append(mappings, Mapping{Source: src, Destination: dst, Type: dstType})
-		}
-	case len(remainingArgs) == 1 && len(mappings) == 0:
-		return nil, fmt.Errorf("all arguments must be valid SRC=DST mappings, or you must specify one SRC argument and one or more DST arguments")
-	}
-	return mappings, nil
-}
-
-func parseFile(filename string, overlap map[string]string, in io.Reader) ([]Mapping, error) {
-	var fileMappings []Mapping
-	if filename != "-" {
-		f, err := os.Open(filename)
-		if err != nil {
-			return nil, err
-		}
-		defer f.Close()
-		in = f
-	}
-	s := bufio.NewScanner(in)
-	lineNumber := 0
-	for s.Scan() {
-		line := s.Text()
-		lineNumber++
-
-		// remove comments and whitespace
-		if i := strings.Index(line, "#"); i != -1 {
-			line = line[0:i]
-		}
-		line = strings.TrimSpace(line)
-		if len(line) == 0 {
-			continue
-		}
-
-		args := strings.Split(line, " ")
-		mappings, err := parseArgs(args, overlap)
-		if err != nil {
-			return nil, fmt.Errorf("file %s, line %d: %v", filename, lineNumber, err)
-		}
-		fileMappings = append(fileMappings, mappings...)
-	}
-	if err := s.Err(); err != nil {
-		return nil, err
-	}
-	return fileMappings, nil
-}
-
-type key struct {
-	registry   string
-	repository string
-}
-
-type DestinationType string
-
-var (
-	DestinationRegistry DestinationType = "docker"
-	DestinationS3       DestinationType = "s3"
-)
-
-type destination struct {
-	t    DestinationType
-	ref  reference.DockerImageReference
-	tags []string
-}
-
-type pushTargets map[key]destination
-
-type destinations struct {
-	ref reference.DockerImageReference
-
-	lock    sync.Mutex
-	tags    map[string]pushTargets
-	digests map[string]pushTargets
-}
-
-func (d *destinations) mergeIntoDigests(srcDigest digest.Digest, target pushTargets) {
-	d.lock.Lock()
-	defer d.lock.Unlock()
-	srcKey := srcDigest.String()
-	current, ok := d.digests[srcKey]
-	if !ok {
-		d.digests[srcKey] = target
-		return
-	}
-	for repo, dst := range target {
-		existing, ok := current[repo]
-		if !ok {
-			current[repo] = dst
-			continue
-		}
-		existing.tags = append(existing.tags, dst.tags...)
-	}
-}
-
-type targetTree map[key]*destinations
-
-func buildTargetTree(mappings []Mapping) targetTree {
-	tree := make(targetTree)
-	for _, m := range mappings {
-		srcKey := key{registry: m.Source.Registry, repository: m.Source.RepositoryName()}
-		dstKey := key{registry: m.Destination.Registry, repository: m.Destination.RepositoryName()}
-
-		src, ok := tree[srcKey]
-		if !ok {
-			src = &destinations{}
-			src.ref = m.Source.AsRepository()
-			src.digests = make(map[string]pushTargets)
-			src.tags = make(map[string]pushTargets)
-			tree[srcKey] = src
-		}
-
-		var current pushTargets
-		if tag := m.Source.Tag; len(tag) != 0 {
-			current = src.tags[tag]
-			if current == nil {
-				current = make(pushTargets)
-				src.tags[tag] = current
-			}
-		} else {
-			current = src.digests[m.Source.ID]
-			if current == nil {
-				current = make(pushTargets)
-				src.digests[m.Source.ID] = current
-			}
-		}
-
-		dst, ok := current[dstKey]
-		if !ok {
-			dst.ref = m.Destination.AsRepository()
-			dst.t = m.Type
-		}
-		if len(m.Destination.Tag) > 0 {
-			dst.tags = append(dst.tags, m.Destination.Tag)
-		}
-		current[dstKey] = dst
-	}
-	return tree
-}
-
-func addDockerRegistryScopes(scopes map[string]map[string]bool, targets map[string]pushTargets, srcKey key) {
-	for _, target := range targets {
-		for dstKey, t := range target {
-			m := scopes[dstKey.registry]
-			if m == nil {
-				m = make(map[string]bool)
-				scopes[dstKey.registry] = m
-			}
-			m[dstKey.repository] = true
-			if t.t != DestinationRegistry || dstKey.registry != srcKey.registry || dstKey.repository == srcKey.repository {
-				continue
-			}
-			m = scopes[srcKey.registry]
-			if m == nil {
-				m = make(map[string]bool)
-				scopes[srcKey.registry] = m
-			}
-			if _, ok := m[srcKey.repository]; !ok {
-				m[srcKey.repository] = false
-			}
-		}
-	}
-}
-
-func calculateDockerRegistryScopes(tree targetTree) map[string][]auth.Scope {
-	scopes := make(map[string]map[string]bool)
-	for srcKey, dst := range tree {
-		addDockerRegistryScopes(scopes, dst.tags, srcKey)
-		addDockerRegistryScopes(scopes, dst.digests, srcKey)
-	}
-	uniqueScopes := make(map[string][]auth.Scope)
-	for registry, repos := range scopes {
-		var repoScopes []auth.Scope
-		for name, push := range repos {
-			if push {
-				repoScopes = append(repoScopes, auth.RepositoryScope{Repository: name, Actions: []string{"pull", "push"}})
-			} else {
-				repoScopes = append(repoScopes, auth.RepositoryScope{Repository: name, Actions: []string{"pull"}})
-			}
-		}
-		uniqueScopes[registry] = repoScopes
-	}
-	return uniqueScopes
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/image/mirror/mirror.go b/vendor/github.com/openshift/oc/pkg/cli/image/mirror/mirror.go
deleted file mode 100644
index a5a98963d50e..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/image/mirror/mirror.go
+++ /dev/null
@@ -1,693 +0,0 @@
-package mirror
-
-import (
-	"context"
-	"fmt"
-	"io"
-	"time"
-
-	"github.com/docker/distribution"
-	"github.com/docker/distribution/manifest/manifestlist"
-	"github.com/docker/distribution/manifest/schema1"
-	"github.com/docker/distribution/manifest/schema2"
-	"github.com/docker/distribution/reference"
-	"github.com/docker/distribution/registry/client"
-
-	units "github.com/docker/go-units"
-	godigest "github.com/opencontainers/go-digest"
-	"github.com/spf13/cobra"
-	"k8s.io/klog"
-
-	apirequest "k8s.io/apiserver/pkg/endpoints/request"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-
-	imagereference "github.com/openshift/library-go/pkg/image/reference"
-	"github.com/openshift/library-go/pkg/image/registryclient"
-	imagemanifest "github.com/openshift/oc/pkg/cli/image/manifest"
-	"github.com/openshift/oc/pkg/cli/image/workqueue"
-)
-
-var (
-	mirrorDesc = templates.LongDesc(`
-		Mirror images from one image repository to another.
-
-		Accepts a list of arguments defining source images that should be pushed to the provided
-		destination image tag. The images are streamed from registry to registry without being stored
-		locally. The default docker credentials are used for authenticating to the registries.
-
-		When using S3 mirroring the region and bucket must be the first two segments after the host.
-		Mirroring will create the necessary metadata so that images can be pulled via tag or digest,
-		but listing manifests and tags will not be possible. You may also specify one or more
-		--s3-source-bucket parameters (as /) to designate buckets to look in to find
-		blobs (instead of uploading). The source bucket also supports the suffix "/[store]", which
-		will transform blob identifiers into the form the container image registry uses on disk, allowing
-		you to mirror directly from an existing S3-backed container image registry. Credentials for S3
-		may be stored in your docker credential file and looked up by host, or loaded via the normal
-		AWS client locations for ENV or file.
-
-		Images in manifest list format will be copied as-is unless you use --filter-by-os to restrict
-		the allowed images to copy in a manifest list. This flag has no effect on regular images.
-		`)
-
-	mirrorExample = templates.Examples(`
-# Copy image to another tag
-%[1]s myregistry.com/myimage:latest myregistry.com/myimage:stable
-
-# Copy image to another registry
-%[1]s myregistry.com/myimage:latest docker.io/myrepository/myimage:stable
-
-# Copy image to S3 (pull from .s3.amazonaws.com/image:latest)
-%[1]s myregistry.com/myimage:latest s3://s3.amazonaws.com///image:latest
-
-# Copy image to S3 without setting a tag (pull via @)
-%[1]s myregistry.com/myimage:latest s3://s3.amazonaws.com///image
-
-# Copy image to multiple locations
-%[1]s myregistry.com/myimage:latest docker.io/myrepository/myimage:stable \
-    docker.io/myrepository/myimage:dev
-
-# Copy multiple images
-%[1]s myregistry.com/myimage:latest=myregistry.com/other:test \
-    myregistry.com/myimage:new=myregistry.com/other:target
-`)
-)
-
-type MirrorImageOptions struct {
-	Mappings []Mapping
-
-	SecurityOptions imagemanifest.SecurityOptions
-	FilterOptions   imagemanifest.FilterOptions
-
-	DryRun             bool
-	SkipMount          bool
-	SkipMultipleScopes bool
-	SkipMissing        bool
-	Force              bool
-
-	MaxRegistry     int
-	ParallelOptions imagemanifest.ParallelOptions
-
-	AttemptS3BucketCopy []string
-
-	Filenames []string
-
-	ManifestUpdateCallback func(registry string, manifests map[godigest.Digest]godigest.Digest) error
-
-	genericclioptions.IOStreams
-}
-
-func NewMirrorImageOptions(streams genericclioptions.IOStreams) *MirrorImageOptions {
-	return &MirrorImageOptions{
-		IOStreams:       streams,
-		ParallelOptions: imagemanifest.ParallelOptions{MaxPerRegistry: 6},
-		MaxRegistry:     4,
-	}
-}
-
-// NewCommandMirrorImage copies images from one location to another.
-func NewCmdMirrorImage(name string, streams genericclioptions.IOStreams) *cobra.Command {
-	o := NewMirrorImageOptions(streams)
-
-	cmd := &cobra.Command{
-		Use:     "mirror SRC DST [DST ...]",
-		Short:   "Mirror images from one repository to another",
-		Long:    mirrorDesc,
-		Example: fmt.Sprintf(mirrorExample, name+" mirror"),
-		Run: func(c *cobra.Command, args []string) {
-			kcmdutil.CheckErr(o.Complete(c, args))
-			kcmdutil.CheckErr(o.Validate())
-			kcmdutil.CheckErr(o.Run())
-		},
-	}
-
-	flag := cmd.Flags()
-	o.SecurityOptions.Bind(flag)
-	o.FilterOptions.Bind(flag)
-	o.ParallelOptions.Bind(flag)
-
-	flag.BoolVar(&o.DryRun, "dry-run", o.DryRun, "Print the actions that would be taken and exit without writing to the destinations.")
-	flag.BoolVar(&o.SkipMissing, "skip-missing", o.SkipMissing, "If an input image is not found, skip them.")
-	flag.BoolVar(&o.SkipMount, "skip-mount", o.SkipMount, "Always push layers instead of cross-mounting them")
-	flag.BoolVar(&o.SkipMultipleScopes, "skip-multiple-scopes", o.SkipMultipleScopes, "Some registries do not support multiple scopes passed to the registry login.")
-	flag.BoolVar(&o.Force, "force", o.Force, "Attempt to write all layers and manifests even if they exist in the remote repository.")
-	flag.IntVar(&o.MaxRegistry, "max-registry", o.MaxRegistry, "Number of concurrent registries to connect to at any one time.")
-	flag.StringSliceVar(&o.AttemptS3BucketCopy, "s3-source-bucket", o.AttemptS3BucketCopy, "A list of bucket/path locations on S3 that may contain already uploaded blobs. Add [store] to the end to use the container image registry path convention.")
-	flag.StringSliceVarP(&o.Filenames, "filename", "f", o.Filenames, "One or more files to read SRC=DST or SRC DST [DST ...] mappings from.")
-
-	return cmd
-}
-
-func (o *MirrorImageOptions) Complete(cmd *cobra.Command, args []string) error {
-	if err := o.FilterOptions.Complete(cmd.Flags()); err != nil {
-		return err
-	}
-
-	overlap := make(map[string]string)
-
-	var err error
-	o.Mappings, err = parseArgs(args, overlap)
-	if err != nil {
-		return err
-	}
-	for _, filename := range o.Filenames {
-		mappings, err := parseFile(filename, overlap, o.In)
-		if err != nil {
-			return err
-		}
-		o.Mappings = append(o.Mappings, mappings...)
-	}
-
-	if len(o.Mappings) == 0 {
-		return fmt.Errorf("you must specify at least one source image to pull and the destination to push to as SRC=DST or SRC DST [DST2 DST3 ...]")
-	}
-
-	for _, mapping := range o.Mappings {
-		if mapping.Source.Equal(mapping.Destination) {
-			return fmt.Errorf("SRC and DST may not be the same")
-		}
-	}
-
-	return nil
-}
-
-func (o *MirrorImageOptions) Repository(ctx context.Context, context *registryclient.Context, t DestinationType, ref imagereference.DockerImageReference) (distribution.Repository, error) {
-	switch t {
-	case DestinationRegistry:
-		return context.Repository(ctx, ref.DockerClientDefaults().RegistryURL(), ref.RepositoryName(), o.SecurityOptions.Insecure)
-	case DestinationS3:
-		driver := &s3Driver{
-			Creds:    context.Credentials,
-			CopyFrom: o.AttemptS3BucketCopy,
-		}
-		url := ref.DockerClientDefaults().RegistryURL()
-		return driver.Repository(ctx, url, ref.RepositoryName(), o.SecurityOptions.Insecure)
-	default:
-		return nil, fmt.Errorf("unrecognized destination type %s", t)
-	}
-}
-
-func (o *MirrorImageOptions) Validate() error {
-	return o.FilterOptions.Validate()
-}
-
-func (o *MirrorImageOptions) Run() error {
-	start := time.Now()
-	p, err := o.plan()
-	if err != nil {
-		return err
-	}
-	p.Print(o.ErrOut)
-	fmt.Fprintln(o.ErrOut)
-
-	if errs := p.Errors(); len(errs) > 0 {
-		for _, err := range errs {
-			fmt.Fprintf(o.ErrOut, "error: %v\n", err)
-		}
-		return fmt.Errorf("an error occurred during planning")
-	}
-
-	work := Greedy(p)
-	work.Print(o.ErrOut)
-	fmt.Fprintln(o.ErrOut)
-
-	fmt.Fprintf(o.ErrOut, "info: Planning completed in %s\n", time.Now().Sub(start).Round(10*time.Millisecond))
-
-	if o.DryRun {
-		fmt.Fprintf(o.ErrOut, "info: Dry run complete\n")
-		return nil
-	}
-
-	stopCh := make(chan struct{})
-	defer close(stopCh)
-	q := workqueue.New(o.MaxRegistry, stopCh)
-	registryWorkers := make(map[string]workqueue.Interface)
-	for name := range p.RegistryNames() {
-		registryWorkers[name] = workqueue.New(o.ParallelOptions.MaxPerRegistry, stopCh)
-	}
-
-	next := time.Now()
-	defer func() {
-		d := time.Now().Sub(next)
-		fmt.Fprintf(o.ErrOut, "info: Mirroring completed in %s (%s/s)\n", d.Truncate(10*time.Millisecond), units.HumanSize(float64(work.stats.bytes)/d.Seconds()))
-	}()
-
-	ctx := apirequest.NewContext()
-	for j := range work.phases {
-		phase := &work.phases[j]
-		q.Batch(func(w workqueue.Work) {
-			for i := range phase.independent {
-				unit := phase.independent[i]
-				w.Parallel(func() {
-					// upload blobs
-					registryWorkers[unit.registry.name].Batch(func(w workqueue.Work) {
-						for i := range unit.repository.blobs {
-							op := unit.repository.blobs[i]
-							for digestString := range op.blobs {
-								digest := godigest.Digest(digestString)
-								blob := op.parent.parent.parent.GetBlob(digest)
-								w.Parallel(func() {
-									if err := copyBlob(ctx, work, op, blob, o.Force, o.SkipMount, o.ErrOut); err != nil {
-										phase.ExecutionFailure(err)
-										return
-									}
-									op.parent.parent.AssociateBlob(digest, unit.repository.name)
-								})
-							}
-						}
-					})
-					if phase.IsFailed() {
-						return
-					}
-					// upload manifests
-					op := unit.repository.manifests
-					registryWorkers[unit.registry.name].Batch(func(w workqueue.Work) {
-						ref, err := reference.WithName(op.toRef.RepositoryName())
-						if err != nil {
-							phase.ExecutionFailure(fmt.Errorf("unable to create reference to repository %s: %v", op.toRef, err))
-							return
-						}
-						// upload and tag the manifest
-						for digest := range op.digestsToTags {
-							srcDigest := digest
-							tags := op.digestsToTags[srcDigest].List()
-							w.Parallel(func() {
-								if errs := copyManifestToTags(ctx, ref, srcDigest, tags, op, o.Out); len(errs) > 0 {
-									phase.ExecutionFailure(errs...)
-								}
-							})
-						}
-						// this is a pure manifest move, put the manifest by its id
-						for digest := range op.digestCopies {
-							srcDigest := godigest.Digest(digest)
-							w.Parallel(func() {
-								if err := copyManifest(ctx, ref, srcDigest, op, o.Out); err != nil {
-									phase.ExecutionFailure(err)
-								}
-							})
-						}
-					})
-				})
-			}
-		})
-		if phase.IsFailed() {
-			for _, err := range phase.ExecutionFailures() {
-				fmt.Fprintf(o.ErrOut, "error: %v\n", err)
-			}
-			return fmt.Errorf("one or more errors occurred while uploading images")
-		}
-	}
-
-	if o.ManifestUpdateCallback != nil {
-		for _, reg := range p.registries {
-			klog.V(4).Infof("Manifests mapped %#v", reg.manifestConversions)
-			if err := o.ManifestUpdateCallback(reg.name, reg.manifestConversions); err != nil {
-				return err
-			}
-		}
-	}
-
-	return nil
-}
-
-func (o *MirrorImageOptions) plan() (*plan, error) {
-	ctx := apirequest.NewContext()
-	context, err := o.SecurityOptions.Context()
-	if err != nil {
-		return nil, err
-	}
-	fromContext := context.Copy()
-	toContext := context.Copy().WithActions("pull", "push")
-	toContexts := make(map[string]*registryclient.Context)
-
-	tree := buildTargetTree(o.Mappings)
-	for registry, scopes := range calculateDockerRegistryScopes(tree) {
-		klog.V(5).Infof("Using scopes for registry %s: %v", registry, scopes)
-		if o.SkipMultipleScopes {
-			toContexts[registry] = toContext.Copy()
-		} else {
-			toContexts[registry] = toContext.Copy().WithScopes(scopes...)
-		}
-	}
-
-	stopCh := make(chan struct{})
-	defer close(stopCh)
-	q := workqueue.New(o.MaxRegistry, stopCh)
-	registryWorkers := make(map[string]workqueue.Interface)
-	for name := range tree {
-		if _, ok := registryWorkers[name.registry]; !ok {
-			registryWorkers[name.registry] = workqueue.New(o.ParallelOptions.MaxPerRegistry, stopCh)
-		}
-	}
-
-	plan := newPlan()
-
-	for name := range tree {
-		src := tree[name]
-		q.Queue(func(_ workqueue.Work) {
-			srcRepo, err := fromContext.Repository(ctx, src.ref.DockerClientDefaults().RegistryURL(), src.ref.RepositoryName(), o.SecurityOptions.Insecure)
-			if err != nil {
-				plan.AddError(retrieverError{err: fmt.Errorf("unable to connect to %s: %v", src.ref, err), src: src.ref})
-				return
-			}
-			manifests, err := srcRepo.Manifests(ctx)
-			if err != nil {
-				plan.AddError(retrieverError{src: src.ref, err: fmt.Errorf("unable to access source image %s manifests: %v", src.ref, err)})
-				return
-			}
-			rq := registryWorkers[name.registry]
-			rq.Batch(func(w workqueue.Work) {
-				// convert source tags to digests
-				for tag := range src.tags {
-					srcTag, pushTargets := tag, src.tags[tag]
-					w.Parallel(func() {
-						desc, err := srcRepo.Tags(ctx).Get(ctx, srcTag)
-						if err != nil {
-							if o.SkipMissing && imagemanifest.IsImageNotFound(err) {
-								ref := src.ref
-								ref.Tag = srcTag
-								fmt.Fprintf(o.ErrOut, "warning: Image %s does not exist and will not be mirrored\n", ref)
-								return
-							}
-							plan.AddError(retrieverError{src: src.ref, err: fmt.Errorf("unable to retrieve source image %s by tag %s: %v", src.ref, srcTag, err)})
-							return
-						}
-						srcDigest := desc.Digest
-						klog.V(3).Infof("Resolved source image %s:%s to %s\n", src.ref, srcTag, srcDigest)
-						src.mergeIntoDigests(srcDigest, pushTargets)
-					})
-				}
-			})
-
-			canonicalFrom := srcRepo.Named()
-
-			rq.Queue(func(w workqueue.Work) {
-				for key := range src.digests {
-					srcDigestString, pushTargets := key, src.digests[key]
-					w.Parallel(func() {
-						// load the manifest
-						srcDigest := godigest.Digest(srcDigestString)
-						srcManifest, err := manifests.Get(ctx, godigest.Digest(srcDigest), imagemanifest.PreferManifestList)
-						if err != nil {
-							plan.AddError(retrieverError{src: src.ref, err: fmt.Errorf("unable to retrieve source image %s manifest %s: %v", src.ref, srcDigest, err)})
-							return
-						}
-						klog.V(5).Infof("Found manifest %s with type %T", srcDigest, srcManifest)
-
-						// filter or load manifest list as appropriate
-						originalSrcDigest := srcDigest
-						srcManifests, srcManifest, srcDigest, err := imagemanifest.ProcessManifestList(ctx, srcDigest, srcManifest, manifests, src.ref, o.FilterOptions.IncludeAll)
-						if err != nil {
-							plan.AddError(retrieverError{src: src.ref, err: err})
-							return
-						}
-						if len(srcManifests) == 0 {
-							fmt.Fprintf(o.ErrOut, "info: Filtered all images from %s, skipping\n", src.ref)
-							return
-						}
-
-						var location string
-						if srcDigest == originalSrcDigest {
-							location = fmt.Sprintf("manifest %s", srcDigest)
-						} else {
-							location = fmt.Sprintf("manifest %s in manifest list %s", srcDigest, originalSrcDigest)
-						}
-
-						for _, dst := range pushTargets {
-							toRepo, err := o.Repository(ctx, toContexts[dst.ref.Registry], dst.t, dst.ref)
-							if err != nil {
-								plan.AddError(retrieverError{src: src.ref, dst: dst.ref, err: fmt.Errorf("unable to connect to %s: %v", dst.ref, err)})
-								continue
-							}
-
-							canonicalTo := toRepo.Named()
-
-							registryPlan := plan.RegistryPlan(dst.ref.Registry)
-							repoPlan := registryPlan.RepositoryPlan(canonicalTo.String())
-							blobPlan := repoPlan.Blobs(src.ref, dst.t, location)
-
-							toManifests, err := toRepo.Manifests(ctx)
-							if err != nil {
-								repoPlan.AddError(retrieverError{src: src.ref, dst: dst.ref, err: fmt.Errorf("unable to access destination image %s manifests: %v", src.ref, err)})
-								continue
-							}
-
-							var mustCopyLayers bool
-							switch {
-							case o.Force:
-								mustCopyLayers = true
-							case src.ref.Registry == dst.ref.Registry && canonicalFrom.String() == canonicalTo.String():
-								// if the source and destination repos are the same, we don't need to copy layers unless forced
-							default:
-								if _, err := toManifests.Get(ctx, srcDigest); err != nil {
-									mustCopyLayers = true
-									blobPlan.AlreadyExists(distribution.Descriptor{Digest: srcDigest})
-								} else {
-									klog.V(4).Infof("Manifest exists in %s, no need to copy layers without --force", dst.ref)
-								}
-							}
-
-							toBlobs := toRepo.Blobs(ctx)
-
-							if mustCopyLayers {
-								// upload all the blobs
-								srcBlobs := srcRepo.Blobs(ctx)
-
-								// upload each manifest
-								for _, srcManifest := range srcManifests {
-									switch srcManifest.(type) {
-									case *schema2.DeserializedManifest:
-									case *schema1.SignedManifest:
-									case *manifestlist.DeserializedManifestList:
-										// we do not need to upload layers in a manifestlist
-										continue
-									default:
-										repoPlan.AddError(retrieverError{src: src.ref, dst: dst.ref, err: fmt.Errorf("the manifest type %T is not supported", srcManifest)})
-										continue
-									}
-									for _, blob := range srcManifest.References() {
-										if src.ref.Registry == dst.ref.Registry {
-											registryPlan.AssociateBlob(blob.Digest, canonicalFrom.String())
-										}
-										blobPlan.Copy(blob, srcBlobs, toBlobs)
-									}
-								}
-							}
-
-							repoPlan.Manifests(dst.t).Copy(srcDigest, srcManifest, dst.tags, toManifests, toBlobs)
-						}
-					})
-				}
-			})
-		})
-	}
-	for _, q := range registryWorkers {
-		q.Done()
-	}
-	q.Done()
-
-	plan.trim()
-	plan.calculateStats()
-
-	return plan, nil
-}
-
-func copyBlob(ctx context.Context, plan *workPlan, c *repositoryBlobCopy, blob distribution.Descriptor, force, skipMount bool, errOut io.Writer) error {
-	// if we aren't forcing upload, check to see if the blob aleady exists
-	if !force {
-		_, err := c.to.Stat(ctx, blob.Digest)
-		if err == nil {
-			// blob exists, skip
-			klog.V(5).Infof("Server reports blob exists %#v", blob)
-			c.parent.parent.AssociateBlob(blob.Digest, c.parent.name)
-			c.parent.ExpectBlob(blob.Digest)
-			return nil
-		}
-		if err != distribution.ErrBlobUnknown {
-			klog.V(5).Infof("Server was unable to check whether blob exists %s: %v", blob.Digest, err)
-		}
-	}
-
-	var expectMount string
-	var options []distribution.BlobCreateOption
-	if !skipMount {
-		if repo, ok := c.parent.parent.MountFrom(blob.Digest); ok {
-			expectMount = repo
-			canonicalFrom, err := reference.WithName(repo)
-			if err != nil {
-				return fmt.Errorf("unexpected error building named reference for %s: %v", repo, err)
-			}
-			blobSource, err := reference.WithDigest(canonicalFrom, blob.Digest)
-			if err != nil {
-				return fmt.Errorf("unexpected error building named digest: %v", err)
-			}
-			options = append(options, client.WithMountFrom(blobSource), WithDescriptor(blob))
-		}
-	}
-
-	// if the object is small enough, put directly
-	if blob.Size > 0 && blob.Size < 16384 {
-		data, err := c.from.Get(ctx, blob.Digest)
-		if err != nil {
-			return fmt.Errorf("unable to push %s: failed to retrieve blob %s: %s", c.fromRef, blob.Digest, err)
-		}
-		desc, err := c.to.Put(ctx, blob.MediaType, data)
-		if err != nil {
-			return fmt.Errorf("unable to push %s: failed to upload blob %s: %s", c.fromRef, blob.Digest, err)
-		}
-		if desc.Digest != blob.Digest {
-			return fmt.Errorf("unable to push %s: tried to copy blob %s and got back a different digest %s", c.fromRef, blob.Digest, desc.Digest)
-		}
-		plan.BytesCopied(blob.Size)
-		return nil
-	}
-
-	if c.destinationType == DestinationS3 {
-		options = append(options, WithDescriptor(blob))
-	}
-
-	w, err := c.to.Create(ctx, options...)
-	// no-op
-	if err == ErrAlreadyExists {
-		klog.V(5).Infof("Blob already exists %#v", blob)
-		return nil
-	}
-
-	// mount successful
-	if ebm, ok := err.(distribution.ErrBlobMounted); ok {
-		klog.V(5).Infof("Blob mounted %#v", blob)
-		if ebm.From.Digest() != blob.Digest {
-			return fmt.Errorf("unable to push %s: tried to mount blob %s source and got back a different digest %s", c.fromRef, blob.Digest, ebm.From.Digest())
-		}
-		switch c.destinationType {
-		case DestinationS3:
-			fmt.Fprintf(errOut, "mounted: s3://%s %s %s\n", c.toRef, blob.Digest, units.BytesSize(float64(blob.Size)))
-		default:
-			fmt.Fprintf(errOut, "mounted: %s %s %s\n", c.toRef, blob.Digest, units.BytesSize(float64(blob.Size)))
-		}
-		return nil
-	}
-	if err != nil {
-		return fmt.Errorf("unable to upload blob %s to %s: %v", blob.Digest, c.toRef, err)
-	}
-
-	if len(expectMount) > 0 {
-		fmt.Fprintf(errOut, "warning: Expected to mount %s from %s/%s but mount was ignored\n", blob.Digest, c.parent.parent.name, expectMount)
-	}
-
-	err = func() error {
-		klog.V(5).Infof("Uploading blob %s", blob.Digest)
-		defer w.Cancel(ctx)
-		r, err := c.from.Open(ctx, blob.Digest)
-		if err != nil {
-			return fmt.Errorf("unable to open source layer %s to copy to %s: %v", blob.Digest, c.toRef, err)
-		}
-		defer r.Close()
-
-		switch c.destinationType {
-		case DestinationS3:
-			fmt.Fprintf(errOut, "uploading: s3://%s %s %s\n", c.toRef, blob.Digest, units.BytesSize(float64(blob.Size)))
-		default:
-			fmt.Fprintf(errOut, "uploading: %s %s %s\n", c.toRef, blob.Digest, units.BytesSize(float64(blob.Size)))
-		}
-
-		n, err := w.ReadFrom(r)
-		if err != nil {
-			return fmt.Errorf("unable to copy layer %s to %s: %v", blob.Digest, c.toRef, err)
-		}
-		if n != blob.Size {
-			fmt.Fprintf(errOut, "warning: Layer size mismatch for %s: had %d, wrote %d\n", blob.Digest, blob.Size, n)
-		}
-		if _, err := w.Commit(ctx, blob); err != nil {
-			return fmt.Errorf("failed to commit blob %s from %s to %s: %v", blob.Digest, c.location, c.toRef, err)
-		}
-		plan.BytesCopied(n)
-		return nil
-	}()
-	if err != nil {
-		return err
-	}
-	return nil
-}
-
-func copyManifestToTags(
-	ctx context.Context,
-	ref reference.Named,
-	srcDigest godigest.Digest,
-	tags []string,
-	plan *repositoryManifestPlan,
-	out io.Writer,
-) []error {
-	var errs []error
-	srcManifest, ok := plan.parent.parent.parent.GetManifest(srcDigest)
-	if !ok {
-		panic(fmt.Sprintf("empty source manifest for %s", srcDigest))
-	}
-	for _, tag := range tags {
-		toDigest, err := imagemanifest.PutManifestInCompatibleSchema(ctx, srcManifest, tag, plan.to, ref, plan.toBlobs, nil)
-		if err != nil {
-			errs = append(errs, fmt.Errorf("unable to push manifest to %s:%s: %v", plan.toRef, tag, err))
-			continue
-		}
-		for _, desc := range srcManifest.References() {
-			plan.parent.parent.AssociateBlob(desc.Digest, plan.parent.name)
-		}
-		plan.parent.parent.SavedManifest(srcDigest, toDigest)
-		switch plan.destinationType {
-		case DestinationS3:
-			fmt.Fprintf(out, "%s s3://%s:%s\n", toDigest, plan.toRef, tag)
-		default:
-			fmt.Fprintf(out, "%s %s:%s\n", toDigest, plan.toRef, tag)
-		}
-	}
-	return errs
-}
-
-func copyManifest(
-	ctx context.Context,
-	ref reference.Named,
-	srcDigest godigest.Digest,
-	plan *repositoryManifestPlan,
-	out io.Writer,
-) error {
-	srcManifest, ok := plan.parent.parent.parent.GetManifest(srcDigest)
-	if !ok {
-		panic(fmt.Sprintf("empty source manifest for %s", srcDigest))
-	}
-	toDigest, err := imagemanifest.PutManifestInCompatibleSchema(ctx, srcManifest, "", plan.to, ref, plan.toBlobs, nil)
-	if err != nil {
-		return fmt.Errorf("unable to push manifest to %s: %v", plan.toRef, err)
-	}
-	for _, desc := range srcManifest.References() {
-		plan.parent.parent.AssociateBlob(desc.Digest, plan.parent.name)
-	}
-	plan.parent.parent.SavedManifest(srcDigest, toDigest)
-	switch plan.destinationType {
-	case DestinationS3:
-		fmt.Fprintf(out, "%s s3://%s\n", toDigest, plan.toRef)
-	default:
-		fmt.Fprintf(out, "%s %s\n", toDigest, plan.toRef)
-	}
-	return nil
-}
-
-type optionFunc func(interface{}) error
-
-func (f optionFunc) Apply(v interface{}) error {
-	return f(v)
-}
-
-// WithDescriptor returns a BlobCreateOption which provides the expected blob metadata.
-func WithDescriptor(desc distribution.Descriptor) distribution.BlobCreateOption {
-	return optionFunc(func(v interface{}) error {
-		opts, ok := v.(*distribution.CreateOptions)
-		if !ok {
-			return fmt.Errorf("unexpected options type: %T", v)
-		}
-		if opts.Mount.Stat == nil {
-			opts.Mount.Stat = &desc
-		}
-		return nil
-	})
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/image/mirror/plan.go b/vendor/github.com/openshift/oc/pkg/cli/image/mirror/plan.go
deleted file mode 100644
index a995900a89a6..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/image/mirror/plan.go
+++ /dev/null
@@ -1,781 +0,0 @@
-package mirror
-
-import (
-	"fmt"
-	"io"
-	"sort"
-	"sync"
-	"text/tabwriter"
-
-	"github.com/docker/distribution"
-	"k8s.io/klog"
-
-	units "github.com/docker/go-units"
-	godigest "github.com/opencontainers/go-digest"
-	"k8s.io/apimachinery/pkg/util/sets"
-
-	"github.com/openshift/library-go/pkg/image/reference"
-)
-
-type retrieverError struct {
-	src, dst reference.DockerImageReference
-	err      error
-}
-
-func (e retrieverError) Error() string {
-	return e.err.Error()
-}
-
-type repositoryWork struct {
-	registry   *registryPlan
-	repository *repositoryPlan
-	stats      struct {
-		mountOpportunities int
-	}
-}
-
-func (w *repositoryWork) calculateStats(existing sets.String) sets.String {
-	blobs := sets.NewString()
-	for i := range w.repository.blobs {
-		blobs.Insert(w.repository.blobs[i].blobs.UnsortedList()...)
-	}
-	w.stats.mountOpportunities = blobs.Intersection(existing).Len()
-	return blobs
-}
-
-type phase struct {
-	independent []repositoryWork
-
-	lock   sync.Mutex
-	failed bool
-	errs   []error
-}
-
-func (p *phase) Failed() {
-	p.lock.Lock()
-	defer p.lock.Unlock()
-	p.failed = true
-}
-
-func (p *phase) ExecutionFailure(err ...error) {
-	p.lock.Lock()
-	defer p.lock.Unlock()
-	p.failed = true
-	p.errs = append(p.errs, err...)
-}
-
-func (p *phase) IsFailed() bool {
-	p.lock.Lock()
-	defer p.lock.Unlock()
-	return p.failed
-}
-
-func (p *phase) ExecutionFailures() []error {
-	p.lock.Lock()
-	defer p.lock.Unlock()
-	return p.errs
-}
-
-func (p *phase) calculateStats(existingBlobs map[string]sets.String) {
-	blobs := make(map[string]sets.String)
-	for i, work := range p.independent {
-		blobs[work.registry.name] = p.independent[i].calculateStats(existingBlobs[work.registry.name]).Union(blobs[work.registry.name])
-	}
-	for name, registryBlobs := range blobs {
-		existingBlobs[name] = existingBlobs[name].Union(registryBlobs)
-	}
-}
-
-type workPlan struct {
-	phases []phase
-
-	lock  sync.Mutex
-	stats struct {
-		bytes int64
-	}
-}
-
-func (w *workPlan) calculateStats() {
-	blobs := make(map[string]sets.String)
-	for i := range w.phases {
-		w.phases[i].calculateStats(blobs)
-	}
-}
-
-func (w *workPlan) BytesCopied(bytes int64) {
-	w.lock.Lock()
-	defer w.lock.Unlock()
-	w.stats.bytes += bytes
-}
-
-func (w *workPlan) Print(out io.Writer) {
-	tabw := tabwriter.NewWriter(out, 0, 0, 1, ' ', 0)
-	for i := range w.phases {
-		phase := &w.phases[i]
-		fmt.Fprintf(out, "phase %d:\n", i)
-		for _, unit := range phase.independent {
-			fmt.Fprintf(tabw, "  %s\t%s\tblobs=%d\tmounts=%d\tmanifests=%d\tshared=%d\n", unit.registry.name, unit.repository.name, unit.repository.stats.sharedCount+unit.repository.stats.uniqueCount, unit.stats.mountOpportunities, unit.repository.manifests.stats.count, unit.repository.stats.sharedCount)
-		}
-		tabw.Flush()
-	}
-}
-
-type plan struct {
-	lock       sync.Mutex
-	registries map[string]*registryPlan
-	errs       []error
-	blobs      map[godigest.Digest]distribution.Descriptor
-	manifests  map[godigest.Digest]distribution.Manifest
-
-	work *workPlan
-
-	stats struct {
-	}
-}
-
-func newPlan() *plan {
-	return &plan{
-		registries: make(map[string]*registryPlan),
-		manifests:  make(map[godigest.Digest]distribution.Manifest),
-		blobs:      make(map[godigest.Digest]distribution.Descriptor),
-	}
-}
-
-func (p *plan) AddError(errs ...error) {
-	p.lock.Lock()
-	defer p.lock.Unlock()
-
-	p.errs = append(p.errs, errs...)
-}
-
-func (p *plan) RegistryPlan(name string) *registryPlan {
-	p.lock.Lock()
-	defer p.lock.Unlock()
-
-	plan, ok := p.registries[name]
-	if ok {
-		return plan
-	}
-	plan = ®istryPlan{
-		parent:      p,
-		name:        name,
-		blobsByRepo: make(map[godigest.Digest]string),
-
-		manifestConversions: make(map[godigest.Digest]godigest.Digest),
-	}
-	p.registries[name] = plan
-	return plan
-}
-
-func (p *plan) CacheManifest(digest godigest.Digest, manifest distribution.Manifest) {
-	p.lock.Lock()
-	defer p.lock.Unlock()
-
-	if _, ok := p.manifests[digest]; ok {
-		return
-	}
-	p.manifests[digest] = manifest
-}
-
-func (p *plan) GetManifest(digest godigest.Digest) (distribution.Manifest, bool) {
-	p.lock.Lock()
-	defer p.lock.Unlock()
-
-	existing, ok := p.manifests[digest]
-	return existing, ok
-}
-
-func (p *plan) CacheBlob(blob distribution.Descriptor) {
-	p.lock.Lock()
-	defer p.lock.Unlock()
-
-	if existing, ok := p.blobs[blob.Digest]; ok && existing.Size > 0 {
-		return
-	}
-	p.blobs[blob.Digest] = blob
-}
-
-func (p *plan) GetBlob(digest godigest.Digest) distribution.Descriptor {
-	p.lock.Lock()
-	defer p.lock.Unlock()
-
-	return p.blobs[digest]
-}
-
-func (p *plan) RegistryNames() sets.String {
-	p.lock.Lock()
-	defer p.lock.Unlock()
-
-	names := sets.NewString()
-	for name := range p.registries {
-		names.Insert(name)
-	}
-	return names
-}
-
-func (p *plan) Errors() []error {
-	var errs []error
-	for _, r := range p.registries {
-		for _, repo := range r.repositories {
-			errs = append(errs, repo.errs...)
-		}
-	}
-	errs = append(errs, p.errs...)
-	return errs
-}
-
-func (p *plan) BlobDescriptors(blobs sets.String) []distribution.Descriptor {
-	descriptors := make([]distribution.Descriptor, 0, len(blobs))
-	for s := range blobs {
-		if desc, ok := p.blobs[godigest.Digest(s)]; ok {
-			descriptors = append(descriptors, desc)
-		} else {
-			descriptors = append(descriptors, distribution.Descriptor{
-				Digest: godigest.Digest(s),
-			})
-		}
-	}
-	return descriptors
-}
-
-func (p *plan) Print(w io.Writer) {
-	for _, name := range p.RegistryNames().List() {
-		r := p.registries[name]
-		fmt.Fprintf(w, "%s/\n", name)
-		for _, repoName := range r.RepositoryNames().List() {
-			repo := r.repositories[repoName]
-			fmt.Fprintf(w, "  %s\n", repoName)
-			for _, err := range repo.errs {
-				fmt.Fprintf(w, "    error: %s\n", err)
-			}
-			for _, blob := range repo.blobs {
-				fmt.Fprintf(w, "    blobs:\n")
-				blobs := p.BlobDescriptors(blob.blobs)
-				sort.Slice(blobs, func(i, j int) bool {
-					if blobs[i].Size == blobs[j].Size {
-						return blobs[i].Digest.String() < blobs[j].Digest.String()
-					}
-					return blobs[i].Size < blobs[j].Size
-				})
-				for _, b := range blobs {
-					if size := b.Size; size > 0 {
-						fmt.Fprintf(w, "      %s %s %s\n", blob.fromRef, b.Digest, units.BytesSize(float64(size)))
-					} else {
-						fmt.Fprintf(w, "      %s %s\n", blob.fromRef, b.Digest)
-					}
-				}
-			}
-			fmt.Fprintf(w, "    manifests:\n")
-			for _, s := range repo.manifests.digestCopies {
-				fmt.Fprintf(w, "      %s\n", s)
-			}
-			for _, digest := range repo.manifests.inputDigests().List() {
-				tags := repo.manifests.digestsToTags[godigest.Digest(digest)]
-				for _, s := range tags.List() {
-					fmt.Fprintf(w, "      %s -> %s\n", digest, s)
-				}
-			}
-		}
-		totalSize := r.stats.uniqueSize + r.stats.sharedSize
-		if totalSize > 0 {
-			fmt.Fprintf(w, "  stats: shared=%d unique=%d size=%s ratio=%.2f\n", r.stats.sharedCount, r.stats.uniqueCount, units.BytesSize(float64(totalSize)), float32(r.stats.uniqueSize)/float32(totalSize))
-		} else {
-			fmt.Fprintf(w, "  stats: shared=%d unique=%d size=%s\n", r.stats.sharedCount, r.stats.uniqueCount, units.BytesSize(float64(totalSize)))
-		}
-	}
-}
-
-func (p *plan) trim() {
-	for name, registry := range p.registries {
-		if registry.trim() {
-			delete(p.registries, name)
-		}
-	}
-}
-
-func (p *plan) calculateStats() {
-	for _, registry := range p.registries {
-		registry.calculateStats()
-	}
-}
-
-type registryPlan struct {
-	parent *plan
-	name   string
-
-	lock         sync.Mutex
-	repositories map[string]*repositoryPlan
-	blobsByRepo  map[godigest.Digest]string
-
-	manifestConversions map[godigest.Digest]godigest.Digest
-
-	stats struct {
-		uniqueSize  int64
-		sharedSize  int64
-		uniqueCount int32
-		sharedCount int32
-	}
-}
-
-func (p *registryPlan) AssociateBlob(digest godigest.Digest, repo string) {
-	p.lock.Lock()
-	defer p.lock.Unlock()
-
-	p.blobsByRepo[digest] = repo
-}
-
-func (p *registryPlan) SavedManifest(srcDigest, dstDigest godigest.Digest) {
-	if srcDigest == dstDigest {
-		return
-	}
-	p.lock.Lock()
-	defer p.lock.Unlock()
-	klog.V(4).Infof("Associated digest %s with converted digest %s", srcDigest, dstDigest)
-	p.manifestConversions[srcDigest] = dstDigest
-}
-
-func (p *registryPlan) MountFrom(digest godigest.Digest) (string, bool) {
-	p.lock.Lock()
-	defer p.lock.Unlock()
-
-	repo, ok := p.blobsByRepo[digest]
-	return repo, ok
-}
-
-func (p *registryPlan) RepositoryNames() sets.String {
-	p.lock.Lock()
-	defer p.lock.Unlock()
-
-	names := sets.NewString()
-	for name := range p.repositories {
-		names.Insert(name)
-	}
-	return names
-}
-
-func (p *registryPlan) RepositoryPlan(name string) *repositoryPlan {
-	p.lock.Lock()
-	defer p.lock.Unlock()
-
-	if p.repositories == nil {
-		p.repositories = make(map[string]*repositoryPlan)
-	}
-	plan, ok := p.repositories[name]
-	if ok {
-		return plan
-	}
-	plan = &repositoryPlan{
-		parent:        p,
-		name:          name,
-		existingBlobs: sets.NewString(),
-		absentBlobs:   sets.NewString(),
-	}
-	p.repositories[name] = plan
-	return plan
-}
-
-func (p *registryPlan) trim() bool {
-	for name, plan := range p.repositories {
-		if plan.trim() {
-			delete(p.repositories, name)
-		}
-	}
-	return len(p.repositories) == 0
-}
-
-func (p *registryPlan) calculateStats() {
-	counts := make(map[string]int)
-	for _, plan := range p.repositories {
-		plan.blobCounts(counts)
-	}
-	for _, plan := range p.repositories {
-		plan.calculateStats(counts)
-	}
-	for digest, count := range counts {
-		if count > 1 {
-			p.stats.sharedSize += p.parent.GetBlob(godigest.Digest(digest)).Size
-			p.stats.sharedCount++
-		} else {
-			p.stats.uniqueSize += p.parent.GetBlob(godigest.Digest(digest)).Size
-			p.stats.uniqueCount++
-		}
-	}
-}
-
-type repositoryPlan struct {
-	parent *registryPlan
-	name   string
-
-	lock          sync.Mutex
-	existingBlobs sets.String
-	absentBlobs   sets.String
-	blobs         []*repositoryBlobCopy
-	manifests     *repositoryManifestPlan
-	errs          []error
-
-	stats struct {
-		size        int64
-		sharedSize  int64
-		uniqueSize  int64
-		sharedCount int32
-		uniqueCount int32
-	}
-}
-
-func (p *repositoryPlan) AddError(errs ...error) {
-	p.lock.Lock()
-	defer p.lock.Unlock()
-
-	p.errs = append(p.errs, errs...)
-}
-
-func (p *repositoryPlan) Blobs(from reference.DockerImageReference, t DestinationType, location string) *repositoryBlobCopy {
-	p.lock.Lock()
-	defer p.lock.Unlock()
-
-	for _, blob := range p.blobs {
-		if blob.fromRef == from {
-			return blob
-		}
-	}
-	p.blobs = append(p.blobs, &repositoryBlobCopy{
-		parent: p,
-
-		fromRef:         from,
-		toRef:           reference.DockerImageReference{Registry: p.parent.name, Name: p.name},
-		destinationType: t,
-		location:        location,
-
-		blobs: sets.NewString(),
-	})
-	return p.blobs[len(p.blobs)-1]
-}
-
-func (p *repositoryPlan) ExpectBlob(digest godigest.Digest) {
-	p.lock.Lock()
-	defer p.lock.Unlock()
-
-	p.absentBlobs.Delete(digest.String())
-	p.existingBlobs.Insert(digest.String())
-}
-
-func (p *repositoryPlan) Manifests(destinationType DestinationType) *repositoryManifestPlan {
-	p.lock.Lock()
-	defer p.lock.Unlock()
-
-	if p.manifests == nil {
-		p.manifests = &repositoryManifestPlan{
-			parent:          p,
-			toRef:           reference.DockerImageReference{Registry: p.parent.name, Name: p.name},
-			destinationType: destinationType,
-			digestsToTags:   make(map[godigest.Digest]sets.String),
-			digestCopies:    sets.NewString(),
-		}
-	}
-	return p.manifests
-}
-
-func (p *repositoryPlan) blobCounts(registryCounts map[string]int) {
-	for i := range p.blobs {
-		for digest := range p.blobs[i].blobs {
-			registryCounts[digest]++
-		}
-	}
-}
-
-func (p *repositoryPlan) trim() bool {
-	var blobs []*repositoryBlobCopy
-	for _, blob := range p.blobs {
-		if blob.trim() {
-			continue
-		}
-		blobs = append(blobs, blob)
-	}
-	p.blobs = blobs
-	if p.manifests != nil {
-		if p.manifests.trim() {
-			p.manifests = nil
-		}
-	}
-	return len(p.blobs) == 0 && p.manifests == nil
-}
-
-func (p *repositoryPlan) calculateStats(registryCounts map[string]int) {
-	p.manifests.calculateStats()
-	blobs := sets.NewString()
-	for i := range p.blobs {
-		for digest := range p.blobs[i].blobs {
-			blobs.Insert(digest)
-		}
-		p.blobs[i].calculateStats()
-		p.stats.size += p.blobs[i].stats.size
-	}
-	for digest := range blobs {
-		count := registryCounts[digest]
-		if count > 1 {
-			p.stats.sharedSize += p.parent.parent.GetBlob(godigest.Digest(digest)).Size
-			p.stats.sharedCount++
-		} else {
-			p.stats.uniqueSize += p.parent.parent.GetBlob(godigest.Digest(digest)).Size
-			p.stats.uniqueCount++
-		}
-	}
-}
-
-type repositoryBlobCopy struct {
-	parent          *repositoryPlan
-	fromRef         reference.DockerImageReference
-	toRef           reference.DockerImageReference
-	destinationType DestinationType
-	location        string
-
-	lock  sync.Mutex
-	from  distribution.BlobService
-	to    distribution.BlobService
-	blobs sets.String
-
-	stats struct {
-		size        int64
-		averageSize int64
-	}
-}
-
-func (p *repositoryBlobCopy) AlreadyExists(blob distribution.Descriptor) {
-	p.parent.parent.parent.CacheBlob(blob)
-	p.parent.parent.AssociateBlob(blob.Digest, p.parent.name)
-	p.parent.ExpectBlob(blob.Digest)
-
-	p.lock.Lock()
-	defer p.lock.Unlock()
-
-	p.blobs.Delete(blob.Digest.String())
-}
-
-func (p *repositoryBlobCopy) Copy(blob distribution.Descriptor, from, to distribution.BlobService) {
-	p.parent.parent.parent.CacheBlob(blob)
-
-	p.lock.Lock()
-	defer p.lock.Unlock()
-
-	if p.from == nil {
-		p.from = from
-	}
-	if p.to == nil {
-		p.to = to
-	}
-	p.blobs.Insert(blob.Digest.String())
-}
-
-func (p *repositoryBlobCopy) trim() bool {
-	return len(p.blobs) == 0
-}
-
-func (p *repositoryBlobCopy) calculateStats() {
-	for digest := range p.blobs {
-		p.stats.size += p.parent.parent.parent.GetBlob(godigest.Digest(digest)).Size
-	}
-	if len(p.blobs) > 0 {
-		p.stats.averageSize = p.stats.size / int64(len(p.blobs))
-	}
-}
-
-type repositoryManifestPlan struct {
-	parent          *repositoryPlan
-	toRef           reference.DockerImageReference
-	destinationType DestinationType
-
-	lock    sync.Mutex
-	to      distribution.ManifestService
-	toBlobs distribution.BlobService
-
-	digestsToTags map[godigest.Digest]sets.String
-	digestCopies  sets.String
-
-	stats struct {
-		count int
-	}
-}
-
-func (p *repositoryManifestPlan) Copy(srcDigest godigest.Digest, srcManifest distribution.Manifest, tags []string, to distribution.ManifestService, toBlobs distribution.BlobService) {
-	p.parent.parent.parent.CacheManifest(srcDigest, srcManifest)
-
-	p.lock.Lock()
-	defer p.lock.Unlock()
-
-	if p.to == nil {
-		p.to = to
-	}
-	if p.toBlobs == nil {
-		p.toBlobs = toBlobs
-	}
-
-	if len(tags) == 0 {
-		p.digestCopies.Insert(srcDigest.String())
-		return
-	}
-	allTags := p.digestsToTags[srcDigest]
-	if allTags == nil {
-		allTags = sets.NewString()
-		p.digestsToTags[srcDigest] = allTags
-	}
-	allTags.Insert(tags...)
-}
-
-func (p *repositoryManifestPlan) inputDigests() sets.String {
-	p.lock.Lock()
-	defer p.lock.Unlock()
-
-	names := sets.NewString()
-	for digest := range p.digestsToTags {
-		names.Insert(digest.String())
-	}
-	return names
-}
-
-func (p *repositoryManifestPlan) trim() bool {
-	for digest, tags := range p.digestsToTags {
-		if len(tags) == 0 {
-			delete(p.digestsToTags, digest)
-		}
-	}
-	return len(p.digestCopies) == 0 && len(p.digestsToTags) == 0
-}
-
-func (p *repositoryManifestPlan) calculateStats() {
-	p.stats.count += len(p.digestCopies)
-	for _, tags := range p.digestsToTags {
-		p.stats.count += len(tags)
-	}
-}
-
-// Greedy turns a plan into parallizable work by taking one repo at a time. It guarantees
-// that no two phases in the plan attempt to upload the same blob at the same time. In the
-// worst case each phase has one unit of work.
-func Greedy(plan *plan) *workPlan {
-	remaining := make(map[string]map[string]repositoryWork)
-	for name, registry := range plan.registries {
-		work := make(map[string]repositoryWork)
-		remaining[name] = work
-		for repoName, repository := range registry.repositories {
-			work[repoName] = repositoryWork{
-				registry:   registry,
-				repository: repository,
-			}
-		}
-	}
-
-	alreadyUploaded := make(map[string]sets.String)
-
-	var phases []phase
-	for len(remaining) > 0 {
-		var independent []repositoryWork
-		for name, registry := range remaining {
-			// we can always take any repository that has no shared layers
-			if found := takeIndependent(registry); len(found) > 0 {
-				independent = append(independent, found...)
-			}
-			exists := alreadyUploaded[name]
-			if exists == nil {
-				exists = sets.NewString()
-				alreadyUploaded[name] = exists
-			}
-
-			// take the most shared repositories and any that don't overlap with it
-			independent = append(independent, takeMostSharedWithoutOverlap(registry, exists)...)
-			if len(registry) == 0 {
-				delete(remaining, name)
-			}
-		}
-		for _, work := range independent {
-			repositoryPlanAddAllExcept(work.repository, alreadyUploaded[work.registry.name], nil)
-		}
-		phases = append(phases, phase{independent: independent})
-	}
-	work := &workPlan{
-		phases: phases,
-	}
-	work.calculateStats()
-	return work
-}
-
-func takeIndependent(all map[string]repositoryWork) []repositoryWork {
-	var work []repositoryWork
-	for k, v := range all {
-		if v.repository.stats.sharedCount == 0 {
-			delete(all, k)
-			work = append(work, v)
-		}
-	}
-	return work
-}
-
-type keysWithCount struct {
-	name  string
-	count int
-}
-
-// takeMostSharedWithoutOverlap is a greedy algorithm that finds the repositories with the
-// most shared layers that do not overlap. It will always return at least one unit of work.
-func takeMostSharedWithoutOverlap(all map[string]repositoryWork, alreadyUploaded sets.String) []repositoryWork {
-	keys := make([]keysWithCount, 0, len(all))
-	for k, v := range all {
-		keys = append(keys, keysWithCount{name: k, count: int(v.repository.stats.sharedCount)})
-	}
-	sort.Slice(keys, func(i, j int) bool { return keys[i].count > keys[j].count })
-
-	// from the set of possible work, ordered from most shared to least shared, take:
-	// 1. the first available unit of work
-	// 2. any other unit of work that does not have overlapping shared blobs
-	uploadingBlobs := sets.NewString()
-	var work []repositoryWork
-	for _, key := range keys {
-		name := key.name
-		next, ok := all[name]
-		if !ok {
-			continue
-		}
-		if repositoryPlanHasAnyBlobs(next.repository, uploadingBlobs) {
-			continue
-		}
-		repositoryPlanAddAllExcept(next.repository, uploadingBlobs, alreadyUploaded)
-		delete(all, name)
-		work = append(work, next)
-	}
-	return work
-}
-
-func repositoryPlanAddAllExcept(plan *repositoryPlan, blobs sets.String, ignore sets.String) {
-	for i := range plan.blobs {
-		for key := range plan.blobs[i].blobs {
-			if !ignore.Has(key) {
-				blobs.Insert(key)
-			}
-		}
-	}
-}
-
-func repositoryPlanHasAnyBlobs(plan *repositoryPlan, blobs sets.String) bool {
-	for i := range plan.blobs {
-		if stringsIntersects(blobs, plan.blobs[i].blobs) {
-			return true
-		}
-	}
-	return false
-}
-
-func stringsIntersects(a, b sets.String) bool {
-	for key := range a {
-		if _, ok := b[key]; ok {
-			return true
-		}
-	}
-	return false
-}
-
-func takeOne(all map[string]repositoryWork) []repositoryWork {
-	for k, v := range all {
-		delete(all, k)
-		return []repositoryWork{v}
-	}
-	return nil
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/image/mirror/s3.go b/vendor/github.com/openshift/oc/pkg/cli/image/mirror/s3.go
deleted file mode 100644
index 96c53dfb65cd..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/image/mirror/s3.go
+++ /dev/null
@@ -1,465 +0,0 @@
-package mirror
-
-import (
-	"bytes"
-	"context"
-	"fmt"
-	"io"
-	"net/http"
-	"net/url"
-	"path"
-	"strings"
-	"sync"
-	"time"
-
-	"k8s.io/klog"
-
-	"github.com/aws/aws-sdk-go/aws"
-	"github.com/aws/aws-sdk-go/aws/awserr"
-	"github.com/aws/aws-sdk-go/aws/credentials"
-	"github.com/aws/aws-sdk-go/aws/session"
-	"github.com/aws/aws-sdk-go/service/s3"
-	"github.com/aws/aws-sdk-go/service/s3/s3manager"
-
-	"github.com/docker/distribution"
-	"github.com/docker/distribution/reference"
-	"github.com/docker/distribution/registry/client/auth"
-	"github.com/docker/distribution/registry/client/transport"
-	godigest "github.com/opencontainers/go-digest"
-)
-
-type s3Driver struct {
-	UserAgent string
-	Region    string
-	Creds     auth.CredentialStore
-	CopyFrom  []string
-
-	repositories map[string]*s3.S3
-}
-
-type s3CredentialStore struct {
-	store     auth.CredentialStore
-	url       *url.URL
-	retrieved bool
-}
-
-func (s *s3CredentialStore) IsExpired() bool { return !s.retrieved }
-
-func (s *s3CredentialStore) Retrieve() (credentials.Value, error) {
-	s.retrieved = false
-	accessKeyID, secretAccessKey := s.store.Basic(s.url)
-	if len(accessKeyID) == 0 || len(secretAccessKey) == 0 {
-		return credentials.Value{}, fmt.Errorf("no AWS credentials located for %s", s.url)
-	}
-	s.retrieved = true
-	klog.V(4).Infof("found credentials for %s", s.url)
-	return credentials.Value{
-		AccessKeyID:     accessKeyID,
-		SecretAccessKey: secretAccessKey,
-		ProviderName:    "DockerCfg",
-	}, nil
-}
-
-func (d *s3Driver) newObject(server *url.URL, region string, insecure bool, securityDomain *url.URL) (*s3.S3, error) {
-	key := fmt.Sprintf("%s:%s:%t:%s", server, region, insecure, securityDomain)
-	s3obj, ok := d.repositories[key]
-	if ok {
-		return s3obj, nil
-	}
-
-	awsConfig := aws.NewConfig()
-
-	var creds *credentials.Credentials
-	creds = credentials.NewChainCredentials([]credentials.Provider{
-		&s3CredentialStore{store: d.Creds, url: securityDomain},
-		&credentials.EnvProvider{},
-		&credentials.SharedCredentialsProvider{},
-	})
-
-	awsConfig.WithCredentials(creds)
-	awsConfig.WithRegion(region)
-	awsConfig.WithDisableSSL(insecure)
-
-	switch {
-	case bool(klog.V(10)):
-		awsConfig.WithLogLevel(aws.LogDebugWithHTTPBody | aws.LogDebugWithRequestErrors | aws.LogDebugWithSigning)
-	case bool(klog.V(8)):
-		awsConfig.WithLogLevel(aws.LogDebugWithRequestErrors)
-	case bool(klog.V(6)):
-		awsConfig.WithLogLevel(aws.LogDebug)
-	}
-
-	if d.UserAgent != "" {
-		awsConfig.WithHTTPClient(&http.Client{
-			Transport: transport.NewTransport(http.DefaultTransport, transport.NewHeaderRequestModifier(http.Header{http.CanonicalHeaderKey("User-Agent"): []string{d.UserAgent}})),
-		})
-	}
-	s, err := session.NewSession(awsConfig)
-	if err != nil {
-		return nil, err
-	}
-	s3obj = s3.New(s)
-	if d.repositories == nil {
-		d.repositories = make(map[string]*s3.S3)
-	}
-	d.repositories[key] = s3obj
-	return s3obj, nil
-}
-
-func (d *s3Driver) Repository(ctx context.Context, server *url.URL, repoName string, insecure bool) (distribution.Repository, error) {
-	parts := strings.SplitN(repoName, "/", 3)
-	if len(parts) < 3 {
-		return nil, fmt.Errorf("you must pass a three segment repository name for s3 uploads, where the first segment is the region and the second segment is the bucket")
-	}
-	s3obj, err := d.newObject(server, parts[0], insecure, &url.URL{Scheme: server.Scheme, Host: server.Host, Path: "/" + repoName})
-	if err != nil {
-		return nil, err
-	}
-
-	ref, err := reference.Parse(parts[2])
-	if err != nil {
-		return nil, err
-	}
-	named, ok := ref.(reference.Named)
-	if !ok {
-		return nil, fmt.Errorf("%s is not a valid repository name", parts[2])
-	}
-
-	repo := &s3Repository{
-		ctx:      ctx,
-		s3:       s3obj,
-		bucket:   parts[1],
-		repoName: named,
-		copyFrom: d.CopyFrom,
-	}
-	return repo, nil
-}
-
-type s3Repository struct {
-	ctx      context.Context
-	s3       *s3.S3
-	bucket   string
-	once     sync.Once
-	initErr  error
-	copyFrom []string
-
-	repoName reference.Named
-}
-
-// Named returns the name of the repository.
-func (r *s3Repository) Named() reference.Named {
-	return r.repoName
-}
-
-// Manifests returns a reference to this repository's manifest service.
-// with the supplied options applied.
-func (r *s3Repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) {
-	return &s3ManifestService{r: r}, nil
-}
-
-// Blobs returns a reference to this repository's blob service.
-func (r *s3Repository) Blobs(ctx context.Context) distribution.BlobStore {
-	return &s3BlobStore{r: r}
-}
-
-// Tags returns a reference to this repositories tag service
-func (r *s3Repository) Tags(ctx context.Context) distribution.TagService {
-	return nil
-}
-
-func (r *s3Repository) attemptCopy(id string, bucket, key string) bool {
-	if _, err := r.s3.HeadObject(&s3.HeadObjectInput{
-		Bucket: aws.String(bucket),
-		Key:    aws.String(key),
-	}); err == nil {
-		return true
-	}
-	if len(id) == 0 {
-		return false
-	}
-	for _, copyFrom := range r.copyFrom {
-		var sourceKey string
-		if strings.HasSuffix(copyFrom, "[store]") {
-			sourceKey = strings.TrimSuffix(copyFrom, "[store]")
-			d, err := godigest.Parse(id)
-			if err != nil {
-				klog.V(4).Infof("Object %q is not a valid digest, cannot perform [store] copy: %v", id, err)
-				continue
-			}
-			sourceKey = fmt.Sprintf("%s%s/%s/%s/data", sourceKey, d.Algorithm().String(), d.Hex()[:2], d.Hex())
-		} else {
-			sourceKey = path.Join(copyFrom, id)
-		}
-		_, err := r.s3.CopyObject(&s3.CopyObjectInput{
-			CopySource: aws.String(sourceKey),
-			Bucket:     aws.String(bucket),
-			Key:        aws.String(key),
-		})
-		if err == nil {
-			klog.V(4).Infof("Copied existing object from %s to %s", sourceKey, key)
-			return true
-		}
-		if a, ok := err.(awserr.Error); ok && a.Code() == "NoSuchKey" {
-			klog.V(4).Infof("No existing object matches source %s", sourceKey)
-			continue
-		}
-		klog.V(4).Infof("Unable to copy from %s to %s: %v", sourceKey, key, err)
-	}
-	return false
-}
-
-func (r *s3Repository) conditionalUpload(input *s3manager.UploadInput, id string) error {
-	if r.attemptCopy(id, *input.Bucket, *input.Key) {
-		return nil
-	}
-	_, err := s3manager.NewUploaderWithClient(r.s3).Upload(input)
-	return err
-}
-
-func (r *s3Repository) init() error {
-	r.once.Do(func() {
-		r.initErr = r.conditionalUpload(&s3manager.UploadInput{
-			Bucket:   aws.String(r.bucket),
-			Metadata: map[string]*string{"X-Docker-Distribution-API-Version": aws.String("registry/2.0")},
-			Body:     bytes.NewBufferString(""),
-			Key:      aws.String("/v2/"),
-		}, "")
-	})
-	return r.initErr
-}
-
-type noSeekReader struct {
-	io.Reader
-}
-
-var _ io.ReadSeeker = noSeekReader{}
-
-func (noSeekReader) Seek(offset int64, whence int) (int64, error) {
-	return 0, fmt.Errorf("unable to seek to %d via %d", offset, whence)
-}
-
-type s3ManifestService struct {
-	r *s3Repository
-}
-
-// Exists returns true if the manifest exists.
-func (s *s3ManifestService) Exists(ctx context.Context, dgst godigest.Digest) (bool, error) {
-	return false, fmt.Errorf("unimplemented")
-}
-
-// Get retrieves the manifest specified by the given digest
-func (s *s3ManifestService) Get(ctx context.Context, dgst godigest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) {
-	return nil, fmt.Errorf("unimplemented")
-}
-
-// Put creates or updates the given manifest returning the manifest digest
-func (s *s3ManifestService) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (godigest.Digest, error) {
-	if err := s.r.init(); err != nil {
-		return "", err
-	}
-	mediaType, payload, err := manifest.Payload()
-	if err != nil {
-		return "", err
-	}
-	dgst := godigest.FromBytes(payload)
-	blob := fmt.Sprintf("/v2/%s/blobs/%s", s.r.repoName, dgst)
-
-	if err := s.r.conditionalUpload(&s3manager.UploadInput{
-		Bucket:      aws.String(s.r.bucket),
-		ContentType: aws.String(mediaType),
-		Body:        bytes.NewBuffer(payload),
-		Key:         aws.String(blob),
-	}, dgst.String()); err != nil {
-		return "", err
-	}
-
-	// set manifests
-	tags := []string{dgst.String()}
-	for _, option := range options {
-		if opt, ok := option.(distribution.WithTagOption); ok {
-			tags = append(tags, opt.Tag)
-		}
-	}
-	for _, tag := range tags {
-		if _, err := s.r.s3.CopyObject(&s3.CopyObjectInput{
-			Bucket:      aws.String(s.r.bucket),
-			ContentType: aws.String(mediaType),
-			CopySource:  aws.String(path.Join(s.r.bucket, blob)),
-			Key:         aws.String(fmt.Sprintf("/v2/%s/manifests/%s", s.r.repoName, tag)),
-		}); err != nil {
-			return "", err
-		}
-	}
-	return dgst, nil
-}
-
-// Delete removes the manifest specified by the given digest. Deleting
-// a manifest that doesn't exist will return ErrManifestNotFound
-func (s *s3ManifestService) Delete(ctx context.Context, dgst godigest.Digest) error {
-	return fmt.Errorf("unimplemented")
-}
-
-type s3BlobStore struct {
-	r *s3Repository
-}
-
-func (s *s3BlobStore) Stat(ctx context.Context, dgst godigest.Digest) (distribution.Descriptor, error) {
-	return distribution.Descriptor{}, fmt.Errorf("unimplemented")
-}
-
-func (s *s3BlobStore) Delete(ctx context.Context, dgst godigest.Digest) error {
-	return fmt.Errorf("unimplemented")
-}
-
-func (s *s3BlobStore) Get(ctx context.Context, dgst godigest.Digest) ([]byte, error) {
-	return nil, fmt.Errorf("unimplemented")
-}
-
-func (s *s3BlobStore) Open(ctx context.Context, dgst godigest.Digest) (distribution.ReadSeekCloser, error) {
-	return nil, fmt.Errorf("unimplemented")
-}
-
-func (s *s3BlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst godigest.Digest) error {
-	return fmt.Errorf("unimplemented")
-}
-
-func (s *s3BlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) {
-	if err := s.r.init(); err != nil {
-		return distribution.Descriptor{}, err
-	}
-	d := godigest.FromBytes(p)
-	if err := s.r.conditionalUpload(&s3manager.UploadInput{
-		Bucket:      aws.String(s.r.bucket),
-		ContentType: aws.String(mediaType),
-		Body:        bytes.NewBuffer(p),
-		Key:         aws.String(fmt.Sprintf("/v2/%s/blobs/%s", s.r.repoName, d)),
-	}, d.String()); err != nil {
-		return distribution.Descriptor{}, err
-	}
-	return distribution.Descriptor{MediaType: mediaType, Size: int64(len(p)), Digest: d}, nil
-}
-
-func (s *s3BlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) {
-	var opts distribution.CreateOptions
-	for _, option := range options {
-		err := option.Apply(&opts)
-		if err != nil {
-			return nil, err
-		}
-	}
-	if opts.Mount.Stat == nil || len(opts.Mount.Stat.Digest) == 0 {
-		return nil, fmt.Errorf("S3 target blob store requires blobs to have mount stats that include a digest")
-	}
-	d := opts.Mount.Stat.Digest
-
-	// attempt to copy before returning a writer
-	key := fmt.Sprintf("/v2/%s/blobs/%s", s.r.repoName, d)
-	if s.r.attemptCopy(d.String(), s.r.bucket, key) {
-		return nil, ErrAlreadyExists
-	}
-
-	return s.r.newWriter(key, d.String(), opts.Mount.Stat.Size), nil
-}
-
-func (s *s3BlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) {
-	return nil, fmt.Errorf("unimplemented")
-}
-
-// writer attempts to upload parts to S3 in a buffered fashion where the last
-// part is at least as large as the chunksize, so the multipart upload could be
-// cleanly resumed in the future. This is violated if Close is called after less
-// than a full chunk is written.
-type writer struct {
-	driver    *s3Repository
-	key       string
-	uploadID  string
-	closed    bool
-	committed bool
-	cancelled bool
-	size      int64
-	startedAt time.Time
-}
-
-func (d *s3Repository) newWriter(key, uploadID string, size int64) distribution.BlobWriter {
-	return &writer{
-		driver:   d,
-		key:      key,
-		uploadID: uploadID,
-		size:     size,
-	}
-}
-
-func (w *writer) ID() string {
-	return w.uploadID
-}
-
-func (w *writer) StartedAt() time.Time {
-	return w.startedAt
-}
-
-func (w *writer) ReadFrom(r io.Reader) (int64, error) {
-	switch {
-	case w.closed:
-		return 0, fmt.Errorf("already closed")
-	case w.committed:
-		return 0, fmt.Errorf("already committed")
-	case w.cancelled:
-		return 0, fmt.Errorf("already cancelled")
-	}
-	if w.startedAt.IsZero() {
-		w.startedAt = time.Now()
-	}
-	_, err := s3manager.NewUploaderWithClient(w.driver.s3).Upload(&s3manager.UploadInput{
-		Bucket:      aws.String(w.driver.bucket),
-		ContentType: aws.String("application/octet-stream"),
-		Key:         aws.String(w.key),
-		Body:        r,
-	})
-	if err != nil {
-		return 0, err
-	}
-	return w.size, nil
-}
-
-func (w *writer) Write(p []byte) (int, error) {
-	return 0, fmt.Errorf("already closed")
-}
-
-func (w *writer) Size() int64 {
-	return w.size
-}
-
-func (w *writer) Close() error {
-	switch {
-	case w.closed:
-		return fmt.Errorf("already closed")
-	}
-	w.closed = true
-	return nil
-}
-
-func (w *writer) Cancel(ctx context.Context) error {
-	switch {
-	case w.closed:
-		return fmt.Errorf("already closed")
-	case w.committed:
-		return fmt.Errorf("already committed")
-	}
-	w.cancelled = true
-	return nil
-}
-
-// TODO: verify uploaded descriptor matches
-func (w *writer) Commit(ctx context.Context, descriptor distribution.Descriptor) (distribution.Descriptor, error) {
-	desc := descriptor
-	switch {
-	case w.closed:
-		return desc, fmt.Errorf("already closed")
-	case w.committed:
-		return desc, fmt.Errorf("already committed")
-	case w.cancelled:
-		return desc, fmt.Errorf("already cancelled")
-	}
-	w.committed = true
-	return desc, nil
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/image/workqueue/workqueue.go b/vendor/github.com/openshift/oc/pkg/cli/image/workqueue/workqueue.go
deleted file mode 100644
index d9cc7629991e..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/image/workqueue/workqueue.go
+++ /dev/null
@@ -1,147 +0,0 @@
-package workqueue
-
-import (
-	"sync"
-
-	"k8s.io/klog"
-)
-
-type Work interface {
-	Parallel(fn func())
-}
-
-type Try interface {
-	Try(fn func() error)
-}
-
-type Interface interface {
-	Batch(func(Work))
-	Try(func(Try)) error
-	Queue(func(Work))
-	Done()
-}
-
-type workQueue struct {
-	ch chan workUnit
-	wg *sync.WaitGroup
-}
-
-func New(workers int, stopCh <-chan struct{}) Interface {
-	q := &workQueue{
-		ch: make(chan workUnit, 100),
-		wg: &sync.WaitGroup{},
-	}
-	go q.run(workers, stopCh)
-	return q
-}
-
-func (q *workQueue) run(workers int, stopCh <-chan struct{}) {
-	if workers <= 0 {
-		workers = 1
-	}
-	for i := 0; i < workers; i++ {
-		go func(i int) {
-			defer klog.V(4).Infof("worker %d stopping", i)
-			for {
-				select {
-				case work, ok := <-q.ch:
-					if !ok {
-						return
-					}
-					work.fn()
-					work.wg.Done()
-				case <-stopCh:
-					return
-				}
-			}
-		}(i)
-	}
-	<-stopCh
-	klog.V(4).Infof("work queue exiting")
-}
-
-func (q *workQueue) Batch(fn func(Work)) {
-	w := &worker{
-		wg: &sync.WaitGroup{},
-		ch: q.ch,
-	}
-	fn(w)
-	w.wg.Wait()
-}
-
-func (q *workQueue) Try(fn func(Try)) error {
-	w := &worker{
-		wg:  &sync.WaitGroup{},
-		ch:  q.ch,
-		err: make(chan error, 1),
-	}
-	w.wg.Add(1)
-	go func() {
-		fn(w)
-		w.wg.Done()
-	}()
-	return w.FirstError()
-}
-
-func (q *workQueue) Queue(fn func(Work)) {
-	w := &worker{
-		wg: q.wg,
-		ch: q.ch,
-	}
-	fn(w)
-}
-
-func (q *workQueue) Done() {
-	q.wg.Wait()
-}
-
-type workUnit struct {
-	fn func()
-	wg *sync.WaitGroup
-}
-
-type worker struct {
-	wg  *sync.WaitGroup
-	ch  chan workUnit
-	err chan error
-}
-
-func (w *worker) FirstError() error {
-	done := make(chan struct{})
-	go func() {
-		w.wg.Wait()
-		close(done)
-	}()
-	for {
-		select {
-		case err := <-w.err:
-			if err != nil {
-				return err
-			}
-		case <-done:
-			return nil
-		}
-	}
-}
-
-func (w *worker) Parallel(fn func()) {
-	w.wg.Add(1)
-	w.ch <- workUnit{wg: w.wg, fn: fn}
-}
-
-func (w *worker) Try(fn func() error) {
-	w.wg.Add(1)
-	w.ch <- workUnit{
-		wg: w.wg,
-		fn: func() {
-			err := fn()
-			if w.err == nil {
-				// TODO: have the work queue accumulate errors and release them with Done()
-				klog.Errorf("Worker error: %v", err)
-				return
-			}
-			klog.V(4).Infof("about to send work queue error: %v", err)
-			w.err <- err
-		},
-	}
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/importer/appjson/appjson.go b/vendor/github.com/openshift/oc/pkg/cli/importer/appjson/appjson.go
deleted file mode 100644
index d62893a3ec6b..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/importer/appjson/appjson.go
+++ /dev/null
@@ -1,309 +0,0 @@
-package appjson
-
-import (
-	"fmt"
-	"io"
-	"io/ioutil"
-	"net/http"
-	"net/url"
-	"os"
-	"path"
-	"path/filepath"
-	"strings"
-
-	"github.com/spf13/cobra"
-
-	corev1 "k8s.io/api/core/v1"
-	"k8s.io/apimachinery/pkg/api/meta"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
-	"k8s.io/apimachinery/pkg/runtime"
-	"k8s.io/apimachinery/pkg/runtime/schema"
-	kerrors "k8s.io/apimachinery/pkg/util/errors"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	"k8s.io/cli-runtime/pkg/printers"
-	"k8s.io/cli-runtime/pkg/resource"
-	"k8s.io/client-go/dynamic"
-	"k8s.io/client-go/kubernetes"
-	"k8s.io/client-go/rest"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/scheme"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-
-	"github.com/openshift/oc/pkg/helpers/newapp/appjson"
-	appcmd "github.com/openshift/oc/pkg/helpers/newapp/cmd"
-	"github.com/openshift/oc/pkg/helpers/template/templateprocessorclient"
-)
-
-const AppJSONV1GeneratorName = "app-json/v1"
-
-var (
-	appJSONLong = templates.LongDesc(`
-		Import app.json files as OpenShift objects
-
-		app.json defines the pattern of a simple, stateless web application that can be horizontally scaled.
-		This command will transform a provided app.json object into its OpenShift equivalent.
-		During transformation fields in the app.json syntax that are not relevant when running on top of
-		a containerized platform will be ignored and a warning printed.
-
-		The command will create objects unless you pass the -o yaml or --as-template flags to generate a
-		configuration file for later use.
-
-		Experimental: This command is under active development and may change without notice.`)
-
-	appJSONExample = templates.Examples(`
-		# Import a directory containing an app.json file
-	  $ %[1]s app.json -f .
-
-	  # Turn an app.json file into a template
-	  $ %[1]s app.json -f ./app.json -o yaml --as-template`)
-)
-
-type AppJSONOptions struct {
-	PrintFlags *genericclioptions.PrintFlags
-
-	Printer printers.ResourcePrinter
-
-	BaseImage        string
-	Generator        string
-	AsTemplate       string
-	OutputVersionStr string
-
-	OutputVersions []schema.GroupVersion
-
-	Namespace     string
-	RESTMapper    meta.RESTMapper
-	DynamicClient dynamic.Interface
-	Client        rest.Interface
-
-	genericclioptions.IOStreams
-	resource.FilenameOptions
-}
-
-func NewAppJSONOptions(streams genericclioptions.IOStreams) *AppJSONOptions {
-	return &AppJSONOptions{
-		IOStreams:  streams,
-		PrintFlags: genericclioptions.NewPrintFlags("created").WithTypeSetter(scheme.Scheme),
-		Generator:  AppJSONV1GeneratorName,
-	}
-}
-
-// NewCmdAppJSON imports an app.json file (schema described here: https://devcenter.heroku.com/articles/app-json-schema)
-// as a template.
-func NewCmdAppJSON(fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	o := NewAppJSONOptions(streams)
-	cmd := &cobra.Command{
-		Use:     "app.json -f APPJSON",
-		Short:   "Import an app.json definition into OpenShift (experimental)",
-		Long:    appJSONLong,
-		Example: fmt.Sprintf(appJSONExample, fullName),
-		Run: func(cmd *cobra.Command, args []string) {
-			kcmdutil.CheckErr(o.Complete(f, cmd, args))
-			kcmdutil.CheckErr(o.Validate())
-			kcmdutil.CheckErr(o.Run())
-		},
-	}
-	usage := "Filename, directory, or URL to app.json file to use"
-	kcmdutil.AddJsonFilenameFlag(cmd.Flags(), &o.Filenames, usage)
-	cmd.MarkFlagRequired("filename")
-	cmd.Flags().StringVar(&o.BaseImage, "image", o.BaseImage, "An optional image to use as your base Docker build (must have ONBUILD directives)")
-	cmd.Flags().StringVar(&o.Generator, "generator", o.Generator, "The name of the generator strategy to use - specify this value to for backwards compatibility.")
-	cmd.Flags().StringVar(&o.AsTemplate, "as-template", o.AsTemplate, "If set, generate a template with the provided name")
-	cmd.Flags().StringVar(&o.OutputVersionStr, "output-version", o.OutputVersionStr, "The preferred API versions of the output objects")
-	cmd.Flags().MarkDeprecated("output-version", "this flag is deprecated and will be removed in the future")
-
-	o.PrintFlags.AddFlags(cmd)
-	return cmd
-}
-
-func (o *AppJSONOptions) createResources(list *corev1.List) (*corev1.List, []error) {
-	errors := []error{}
-	created := &corev1.List{}
-
-	for i, item := range list.Items {
-		var err error
-		unstructuredObj := &unstructured.Unstructured{}
-		unstructuredObj.Object, err = runtime.DefaultUnstructuredConverter.ToUnstructured(item)
-		if err != nil {
-			errors = append(errors, err)
-			continue
-		}
-
-		mapping, err := o.RESTMapper.RESTMapping(unstructuredObj.GroupVersionKind().GroupKind(), unstructuredObj.GroupVersionKind().Version)
-		if err != nil {
-			errors = append(errors, err)
-			continue
-		}
-
-		_, err = o.DynamicClient.Resource(mapping.Resource).Namespace(o.Namespace).Create(unstructuredObj, metav1.CreateOptions{})
-		if err != nil {
-			errors = append(errors, err)
-			continue
-		}
-
-		created.Items = append(created.Items, list.Items[i])
-	}
-
-	return created, errors
-}
-
-func (o *AppJSONOptions) Complete(f kcmdutil.Factory, cmd *cobra.Command, args []string) error {
-	for _, v := range strings.Split(o.OutputVersionStr, ",") {
-		gv, err := schema.ParseGroupVersion(v)
-		if err != nil {
-			return fmt.Errorf("provided output-version %q is not valid: %v", v, err)
-		}
-		o.OutputVersions = append(o.OutputVersions, gv)
-	}
-	o.OutputVersions = append(o.OutputVersions, scheme.Scheme.PrioritizedVersionsAllGroups()...)
-
-	clientConfig, err := f.ToRESTConfig()
-	if err != nil {
-		return err
-	}
-
-	o.RESTMapper, err = f.ToRESTMapper()
-	if err != nil {
-		return err
-	}
-
-	o.DynamicClient, err = dynamic.NewForConfig(clientConfig)
-
-	o.Printer, err = o.PrintFlags.ToPrinter()
-	if err != nil {
-		return err
-	}
-
-	o.Namespace, _, err = f.ToRawKubeConfigLoader().Namespace()
-	if err != nil {
-		return err
-	}
-
-	clientset, err := kubernetes.NewForConfig(clientConfig)
-	if err != nil {
-		return err
-	}
-
-	o.Client = clientset.CoreV1().RESTClient()
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (o *AppJSONOptions) Validate() error {
-	if len(o.Filenames) != 1 {
-		return fmt.Errorf("you must provide the path to an app.json file or directory containing app.json")
-	}
-	switch o.Generator {
-	case AppJSONV1GeneratorName:
-	default:
-		return fmt.Errorf("the generator %q is not supported, use: %s", o.Generator, AppJSONV1GeneratorName)
-	}
-	return nil
-}
-
-func (o *AppJSONOptions) Run() error {
-	localPath, contents, err := contentsForPathOrURL(o.Filenames[0], o.In, "app.json")
-	if err != nil {
-		return err
-	}
-
-	g := &appjson.Generator{
-		LocalPath: localPath,
-		BaseImage: o.BaseImage,
-	}
-	switch {
-	case len(o.AsTemplate) > 0:
-		g.Name = o.AsTemplate
-	case len(localPath) > 0:
-		g.Name = filepath.Base(localPath)
-	default:
-		g.Name = path.Base(path.Dir(o.Filenames[0]))
-	}
-	if len(g.Name) == 0 {
-		g.Name = "app"
-	}
-
-	template, err := g.Generate(contents)
-	if err != nil {
-		return err
-	}
-
-	template.ObjectLabels = map[string]string{"app.json": template.Name}
-
-	// TODO: stop implying --dry-run behavior when an --output value is provided
-	if o.PrintFlags.OutputFormat != nil && len(*o.PrintFlags.OutputFormat) > 0 || len(o.AsTemplate) > 0 {
-		var obj runtime.Object
-		if len(o.AsTemplate) > 0 {
-			template.Name = o.AsTemplate
-			obj = template
-		} else {
-			obj = &corev1.List{Items: template.Objects}
-		}
-		return o.Printer.PrintObj(obj, o.Out)
-	}
-
-	templateProcessor := templateprocessorclient.NewTemplateProcessorClient(o.Client, o.Namespace)
-	result, err := appcmd.TransformTemplate(template, templateProcessor, o.Namespace, nil, false)
-	if err != nil {
-		return err
-	}
-
-	appcmd.DescribeGeneratedTemplate(o.Out, "", result, o.Namespace)
-
-	objs := &corev1.List{Items: result.Objects}
-
-	// actually create the objects
-	created, errs := o.createResources(objs)
-
-	// print what we have created first, then return a potential set of errors
-	if err := o.Printer.PrintObj(created, o.Out); err != nil {
-		errs = append(errs, err)
-	}
-
-	return kerrors.NewAggregate(errs)
-}
-
-func contentsForPathOrURL(s string, in io.Reader, subpaths ...string) (string, []byte, error) {
-	switch {
-	case s == "-":
-		contents, err := ioutil.ReadAll(in)
-		return "", contents, err
-	case strings.Index(s, "http://") == 0 || strings.Index(s, "https://") == 0:
-		_, err := url.Parse(s)
-		if err != nil {
-			return "", nil, fmt.Errorf("the URL passed to filename %q is not valid: %v", s, err)
-		}
-		res, err := http.Get(s)
-		if err != nil {
-			return "", nil, err
-		}
-		defer res.Body.Close()
-		contents, err := ioutil.ReadAll(res.Body)
-		return "", contents, err
-	default:
-		stat, err := os.Stat(s)
-		if err != nil {
-			return s, nil, err
-		}
-		if !stat.IsDir() {
-			contents, err := ioutil.ReadFile(s)
-			return s, contents, err
-		}
-		for _, sub := range subpaths {
-			path := filepath.Join(s, sub)
-			stat, err := os.Stat(path)
-			if err != nil {
-				continue
-			}
-			if stat.IsDir() {
-				continue
-			}
-			contents, err := ioutil.ReadFile(s)
-			return path, contents, err
-		}
-		return s, nil, os.ErrNotExist
-	}
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/importer/import.go b/vendor/github.com/openshift/oc/pkg/cli/importer/import.go
deleted file mode 100644
index a9c07070b0c5..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/importer/import.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package importer
-
-import (
-	"fmt"
-
-	"github.com/spf13/cobra"
-
-	"github.com/openshift/oc/pkg/cli/importer/appjson"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-)
-
-var (
-	importLong = templates.LongDesc(`
-		Import outside applications into OpenShift
-
-		These commands assist in bringing existing applications into OpenShift.`)
-)
-
-// NewCmdImport exposes commands for modifying objects.
-func NewCmdImport(fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	cmd := &cobra.Command{
-		Use:   "import COMMAND",
-		Short: "Commands that import applications",
-		Long:  importLong,
-		Run:   kcmdutil.DefaultSubCommandRun(streams.ErrOut),
-	}
-
-	name := fmt.Sprintf("%s import", fullName)
-
-	cmd.AddCommand(appjson.NewCmdAppJSON(name, f, streams))
-	return cmd
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/importimage/importimage.go b/vendor/github.com/openshift/oc/pkg/cli/importimage/importimage.go
deleted file mode 100644
index efa76952738f..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/importimage/importimage.go
+++ /dev/null
@@ -1,551 +0,0 @@
-package importimage
-
-import (
-	"fmt"
-	"strings"
-
-	"github.com/spf13/cobra"
-
-	corev1 "k8s.io/api/core/v1"
-	"k8s.io/apimachinery/pkg/api/errors"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	"k8s.io/cli-runtime/pkg/printers"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-
-	imagev1 "github.com/openshift/api/image/v1"
-	imagev1client "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1"
-	"github.com/openshift/library-go/pkg/image/imageutil"
-	"github.com/openshift/library-go/pkg/image/reference"
-	"github.com/openshift/oc/pkg/cli/tag"
-	"github.com/openshift/oc/pkg/helpers/describe"
-	imagehelpers "github.com/openshift/oc/pkg/helpers/image"
-)
-
-var (
-	importImageLong = templates.LongDesc(`
-		Import the latest image information from a tag in a container image registry
-
-		Image streams allow you to control which images are rolled out to your builds
-		and applications. This command fetches the latest version of an image from a
-		remote repository and updates the image stream tag if it does not match the
-		previous value. Running the command multiple times will not create duplicate
-		entries. When importing an image, only the image metadata is copied, not the
-		image contents.
-
-		If you wish to change the image stream tag or provide more advanced options,
-		see the 'tag' command.`)
-
-	importImageExample = templates.Examples(`
-	  %[1]s import-image mystream
-		`)
-)
-
-// ImageImportOptions contains all the necessary information to perform an import.
-type ImportImageOptions struct {
-	PrintFlags *genericclioptions.PrintFlags
-
-	ToPrinter func(string) (printers.ResourcePrinter, error)
-
-	// user set values
-	From                 string
-	Confirm              bool
-	All                  bool
-	Scheduled            bool
-	Insecure             bool
-	InsecureFlagProvided bool
-
-	DryRun bool
-
-	// internal values
-	Namespace       string
-	Name            string
-	Tag             string
-	Target          string
-	ReferencePolicy string
-
-	// helpers
-	imageClient imagev1client.ImageV1Interface
-	isClient    imagev1client.ImageStreamInterface
-
-	genericclioptions.IOStreams
-}
-
-func NewImportImageOptions(name string, streams genericclioptions.IOStreams) *ImportImageOptions {
-	return &ImportImageOptions{
-		PrintFlags:      genericclioptions.NewPrintFlags("imported"),
-		IOStreams:       streams,
-		ReferencePolicy: tag.SourceReferencePolicy,
-	}
-}
-
-// NewCmdImportImage implements the OpenShift cli import-image command.
-func NewCmdImportImage(fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	o := NewImportImageOptions(fullName, streams)
-
-	cmd := &cobra.Command{
-		Use:     "import-image IMAGESTREAM[:TAG]",
-		Short:   "Imports images from a container image registry",
-		Long:    importImageLong,
-		Example: fmt.Sprintf(importImageExample, fullName),
-		Run: func(cmd *cobra.Command, args []string) {
-			kcmdutil.CheckErr(o.Complete(f, cmd, args))
-			kcmdutil.CheckErr(o.Validate())
-			kcmdutil.CheckErr(o.Run())
-		},
-	}
-
-	o.PrintFlags.AddFlags(cmd)
-
-	cmd.Flags().StringVar(&o.From, "from", o.From, "A Docker image repository to import images from")
-	cmd.Flags().BoolVar(&o.Confirm, "confirm", o.Confirm, "If true, allow the image stream import location to be set or changed")
-	cmd.Flags().BoolVar(&o.All, "all", o.All, "If true, import all tags from the provided source on creation or if --from is specified")
-	cmd.Flags().StringVar(&o.ReferencePolicy, "reference-policy", o.ReferencePolicy, "Allow to request pullthrough for external image when set to 'local'. Defaults to 'source'.")
-	cmd.Flags().BoolVar(&o.DryRun, "dry-run", o.DryRun, "Fetch information about images without creating or updating an image stream.")
-	cmd.Flags().BoolVar(&o.Scheduled, "scheduled", o.Scheduled, "Set each imported Docker image to be periodically imported from a remote repository. Defaults to false.")
-	cmd.Flags().BoolVar(&o.Insecure, "insecure", o.Insecure, "If true, allow importing from registries that have invalid HTTPS certificates or are hosted via HTTP. This flag will take precedence over the insecure annotation.")
-
-	return cmd
-}
-
-// Complete turns a partially defined ImportImageOptions into a solvent structure
-// which can be validated and used for aa import.
-func (o *ImportImageOptions) Complete(f kcmdutil.Factory, cmd *cobra.Command, args []string) error {
-	if len(args) > 0 {
-		o.Target = args[0]
-	}
-
-	o.InsecureFlagProvided = cmd.Flags().Lookup("insecure").Changed
-	if !cmd.Flags().Lookup("reference-policy").Changed {
-		o.ReferencePolicy = ""
-	}
-
-	clientConfig, err := f.ToRESTConfig()
-	if err != nil {
-		return err
-	}
-
-	o.Namespace, _, err = f.ToRawKubeConfigLoader().Namespace()
-	if err != nil {
-		return err
-	}
-
-	o.imageClient, err = imagev1client.NewForConfig(clientConfig)
-	if err != nil {
-		return err
-	}
-
-	o.isClient = o.imageClient.ImageStreams(o.Namespace)
-
-	o.ToPrinter = func(operation string) (printers.ResourcePrinter, error) {
-		o.PrintFlags.NamePrintFlags.Operation = operation
-
-		// We assume that the (dry run) message has already been added (if need be)
-		// by the caller of this method.
-		return o.PrintFlags.ToPrinter()
-	}
-
-	return o.parseImageReference()
-}
-
-func (o *ImportImageOptions) parseImageReference() error {
-	targetRef, err := reference.Parse(o.Target)
-	switch {
-	case err != nil:
-		return fmt.Errorf("the image name must be a valid Docker image pull spec or reference to an image stream (e.g. myregistry/myteam/image:tag)")
-	case len(targetRef.ID) > 0:
-		return fmt.Errorf("to import images by ID, use the 'tag' command")
-	case len(targetRef.Tag) != 0 && o.All:
-		// error out
-		return fmt.Errorf("cannot specify a tag %q as well as --all", o.Target)
-	case len(targetRef.Tag) == 0 && !o.All:
-		// apply the default tag
-		targetRef.Tag = imagev1.DefaultImageTag
-	}
-	o.Name = targetRef.Name
-	o.Tag = targetRef.Tag
-
-	return nil
-}
-
-// Validate ensures that a ImportImageOptions is valid and can be used to execute
-// an import.
-func (o *ImportImageOptions) Validate() error {
-	if len(o.Target) == 0 {
-		return fmt.Errorf("you must specify the name of an image stream")
-	}
-
-	return nil
-}
-
-// Run contains all the necessary functionality for the OpenShift cli import-image command.
-func (o *ImportImageOptions) Run() error {
-	stream, isi, err := o.createImageImport()
-	if err != nil {
-		return err
-	}
-
-	result, err := o.imageClient.ImageStreamImports(isi.Namespace).Create(isi)
-	if err != nil {
-		return err
-	}
-
-	message := "imported"
-	if wasError(result) {
-		message = "imported with errors"
-	}
-
-	if o.DryRun {
-		message = fmt.Sprintf("%s (dry run)", message)
-	}
-	message = fmt.Sprintf("%s\n\n", message)
-
-	if result.Status.Import != nil {
-		// TODO: dry-run doesn't return an image stream, so we have to display partial results
-		info, err := describe.DescribeImageStream(result.Status.Import)
-		if err != nil {
-			return err
-		}
-
-		message += fmt.Sprintln(info)
-	}
-
-	if repo := result.Status.Repository; repo != nil {
-		for _, image := range repo.Images {
-			if image.Image != nil {
-				info, err := describe.DescribeImage(image.Image, imageutil.JoinImageStreamTag(stream.Name, image.Tag))
-				if err != nil {
-					fmt.Fprintf(o.ErrOut, "error: tag %s failed: %v\n", image.Tag, err)
-				} else {
-					message += fmt.Sprintln(info)
-				}
-			} else {
-				fmt.Fprintf(o.ErrOut, "error: repository tag %s failed: %v\n", image.Tag, image.Status.Message)
-			}
-		}
-	}
-
-	for _, image := range result.Status.Images {
-		if image.Image != nil {
-			info, err := describe.DescribeImage(image.Image, imageutil.JoinImageStreamTag(stream.Name, image.Tag))
-			if err != nil {
-				fmt.Fprintf(o.ErrOut, "error: tag %s failed: %v\n", image.Tag, err)
-			} else {
-				message += fmt.Sprintln(info)
-			}
-		} else {
-			fmt.Fprintf(o.ErrOut, "error: tag %s failed: %v\n", image.Tag, image.Status.Message)
-		}
-	}
-
-	if r := result.Status.Repository; r != nil && len(r.AdditionalTags) > 0 {
-		message += fmt.Sprintf("\ninfo: The remote repository contained %d additional tags which were not imported: %s\n", len(r.AdditionalTags), strings.Join(r.AdditionalTags, ", "))
-	}
-
-	printer, err := o.ToPrinter(message)
-	if err != nil {
-		return err
-	}
-
-	return printer.PrintObj(stream, o.Out)
-}
-
-func wasError(isi *imagev1.ImageStreamImport) bool {
-	for _, image := range isi.Status.Images {
-		if image.Status.Status == metav1.StatusFailure {
-			return true
-		}
-	}
-	if isi.Status.Repository != nil && isi.Status.Repository.Status.Status == metav1.StatusFailure {
-		return true
-	}
-	return false
-}
-
-// TODO: move to image/api as a helper
-type importError struct {
-	annotation string
-}
-
-func (e importError) Error() string {
-	return fmt.Sprintf("unable to import image: %s", e.annotation)
-}
-
-func (o *ImportImageOptions) createImageImport() (*imagev1.ImageStream, *imagev1.ImageStreamImport, error) {
-	var isi *imagev1.ImageStreamImport
-	stream, err := o.isClient.Get(o.Name, metav1.GetOptions{})
-	// no stream, try creating one
-	if err != nil {
-		if !errors.IsNotFound(err) {
-			return nil, nil, err
-		}
-		if !o.Confirm {
-			return nil, nil, fmt.Errorf("no image stream named %q exists, pass --confirm to create and import", o.Name)
-		}
-		stream, isi = o.newImageStream()
-		return stream, isi, nil
-	}
-
-	if o.All {
-		// importing the entire repository
-		isi, err = o.importAll(stream)
-		if err != nil {
-			return nil, nil, err
-		}
-	} else {
-		// importing a single tag
-		isi, err = o.importTag(stream)
-		if err != nil {
-			return nil, nil, err
-		}
-	}
-
-	// this is ok because we know exactly how we want to be serialized
-	if stream.GetObjectKind().GroupVersionKind().Empty() {
-		stream.GetObjectKind().SetGroupVersionKind(imagev1.SchemeGroupVersion.WithKind("ImageStream"))
-	}
-
-	return stream, isi, nil
-}
-
-func (o *ImportImageOptions) importAll(stream *imagev1.ImageStream) (*imagev1.ImageStreamImport, error) {
-	from := o.From
-	// update ImageStream appropriately
-	if len(from) == 0 {
-		if len(stream.Spec.DockerImageRepository) != 0 {
-			from = stream.Spec.DockerImageRepository
-		} else {
-			tags := make(map[string]string)
-			for _, tag := range stream.Spec.Tags {
-				if tag.From != nil && tag.From.Kind == "DockerImage" {
-					tags[tag.Name] = tag.From.Name
-				}
-			}
-			if len(tags) == 0 {
-				return nil, fmt.Errorf("image stream does not have tags pointing to external container images")
-			}
-			return o.newImageStreamImportTags(stream, tags), nil
-		}
-	}
-	if from != stream.Spec.DockerImageRepository {
-		if !o.Confirm {
-			if len(stream.Spec.DockerImageRepository) == 0 {
-				return nil, fmt.Errorf("the image stream does not currently import an entire Docker repository, pass --confirm to update")
-			}
-			return nil, fmt.Errorf("the image stream has a different import spec %q, pass --confirm to update", stream.Spec.DockerImageRepository)
-		}
-		stream.Spec.DockerImageRepository = from
-	}
-
-	// and create accompanying ImageStreamImport
-	return o.newImageStreamImportAll(stream, from), nil
-}
-
-func (o *ImportImageOptions) importTag(stream *imagev1.ImageStream) (*imagev1.ImageStreamImport, error) {
-	from := o.From
-	tag := o.Tag
-
-	// follow any referential tags to the destination
-	finalTag, existing, multiple, err := imagehelpers.FollowTagReference(stream, tag)
-	switch err {
-	case imagehelpers.ErrInvalidReference:
-		return nil, fmt.Errorf("tag %q points to an invalid imagestreamtag", tag)
-	case imagehelpers.ErrCrossImageStreamReference:
-		return nil, fmt.Errorf("tag %q points to an imagestreamtag from another ImageStream", tag)
-	case imagehelpers.ErrCircularReference:
-		return nil, fmt.Errorf("tag %q on the image stream is a reference to same tag", tag)
-	case imagehelpers.ErrNotFoundReference:
-		// create a new tag
-		if len(from) == 0 && tag == imagev1.DefaultImageTag {
-			from = stream.Spec.DockerImageRepository
-		}
-		// if the from is still empty this means there's no such tag defined
-		// nor we can't create any from .spec.dockerImageRepository
-		if len(from) == 0 {
-			return nil, fmt.Errorf("the tag %q does not exist on the image stream - choose an existing tag to import or use the 'tag' command to create a new tag", tag)
-		}
-		existing = &imagev1.TagReference{
-			From: &corev1.ObjectReference{
-				Kind: "DockerImage",
-				Name: from,
-			},
-		}
-	case nil:
-		// disallow re-importing anything other than DockerImage
-		if existing.From != nil && existing.From.Kind != "DockerImage" {
-			return nil, fmt.Errorf("tag %q points to existing %s %q, it cannot be re-imported", tag, existing.From.Kind, existing.From.Name)
-		}
-		// disallow changing an existing tag
-		if existing.From == nil {
-			return nil, fmt.Errorf("tag %q already exists - you must use the 'tag' command if you want to change the source to %q", tag, from)
-		}
-		if len(from) != 0 && from != existing.From.Name {
-			if multiple {
-				return nil, fmt.Errorf("the tag %q points to the tag %q which points to %q - use the 'tag' command if you want to change the source to %q",
-					tag, finalTag, existing.From.Name, from)
-			}
-			return nil, fmt.Errorf("the tag %q points to %q - use the 'tag' command if you want to change the source to %q", tag, existing.From.Name, from)
-		}
-
-		// set the target item to import
-		from = existing.From.Name
-		if multiple {
-			tag = finalTag
-		}
-
-		// clear the legacy annotation
-		delete(existing.Annotations, imagev1.DockerImageRepositoryCheckAnnotation)
-		// reset the generation
-		zero := int64(0)
-		existing.Generation = &zero
-
-	}
-
-	tagFound := false
-	for i := range stream.Spec.Tags {
-		if stream.Spec.Tags[i].Name == tag {
-			stream.Spec.Tags[i] = *existing
-			tagFound = true
-			break
-		}
-	}
-
-	if !tagFound {
-		stream.Spec.Tags = append(stream.Spec.Tags, *existing)
-	}
-
-	// and create accompanying ImageStreamImport
-	return o.newImageStreamImportTags(stream, map[string]string{tag: from}), nil
-}
-
-func (o *ImportImageOptions) newImageStream() (*imagev1.ImageStream, *imagev1.ImageStreamImport) {
-	from := o.From
-	tag := o.Tag
-	if len(from) == 0 {
-		from = o.Target
-	}
-	var (
-		stream *imagev1.ImageStream
-		isi    *imagev1.ImageStreamImport
-	)
-	// create new ImageStream and accompanying ImageStreamImport
-	// TODO: this should be removed along with the legacy path, we don't need to
-	// create the IS in the new path, the import mechanism will do that for us,
-	// this is only for the legacy path that we need to create the IS.
-	if o.All {
-		stream = &imagev1.ImageStream{
-			// this is ok because we know exactly how we want to be serialized
-			TypeMeta:   metav1.TypeMeta{APIVersion: imagev1.SchemeGroupVersion.String(), Kind: "ImageStream"},
-			ObjectMeta: metav1.ObjectMeta{Name: o.Name},
-			Spec:       imagev1.ImageStreamSpec{DockerImageRepository: from},
-		}
-		isi = o.newImageStreamImportAll(stream, from)
-	} else {
-		stream = &imagev1.ImageStream{
-			// this is ok because we know exactly how we want to be serialized
-			TypeMeta:   metav1.TypeMeta{APIVersion: imagev1.SchemeGroupVersion.String(), Kind: "ImageStream"},
-			ObjectMeta: metav1.ObjectMeta{Name: o.Name},
-			Spec: imagev1.ImageStreamSpec{
-				Tags: []imagev1.TagReference{
-					{
-						From: &corev1.ObjectReference{
-							Kind: "DockerImage",
-							Name: from,
-						},
-						ReferencePolicy: o.getReferencePolicy(),
-					},
-				},
-			},
-		}
-		isi = o.newImageStreamImportTags(stream, map[string]string{tag: from})
-	}
-
-	return stream, isi
-}
-
-func (o *ImportImageOptions) getReferencePolicy() imagev1.TagReferencePolicy {
-	ref := imagev1.TagReferencePolicy{}
-	if len(o.ReferencePolicy) == 0 {
-		return ref
-	}
-	switch o.ReferencePolicy {
-	case tag.SourceReferencePolicy:
-		ref.Type = imagev1.SourceTagReferencePolicy
-	case tag.LocalReferencePolicy:
-		ref.Type = imagev1.LocalTagReferencePolicy
-	}
-	return ref
-}
-
-func (o *ImportImageOptions) newImageStreamImport(stream *imagev1.ImageStream) (*imagev1.ImageStreamImport, bool) {
-	isi := &imagev1.ImageStreamImport{
-		ObjectMeta: metav1.ObjectMeta{
-			Name:            stream.Name,
-			Namespace:       o.Namespace,
-			ResourceVersion: stream.ResourceVersion,
-		},
-		Spec: imagev1.ImageStreamImportSpec{Import: !o.DryRun},
-	}
-	insecureAnnotation := stream.Annotations[imagev1.InsecureRepositoryAnnotation]
-	insecure := insecureAnnotation == "true"
-	// --insecure flag (if provided) takes precedence over insecure annotation
-	if o.InsecureFlagProvided {
-		insecure = o.Insecure
-	}
-
-	return isi, insecure
-}
-
-func (o *ImportImageOptions) newImageStreamImportAll(stream *imagev1.ImageStream, from string) *imagev1.ImageStreamImport {
-	isi, insecure := o.newImageStreamImport(stream)
-	isi.Spec.Repository = &imagev1.RepositoryImportSpec{
-		From: corev1.ObjectReference{
-			Kind: "DockerImage",
-			Name: from,
-		},
-		ImportPolicy: imagev1.TagImportPolicy{
-			Insecure:  insecure,
-			Scheduled: o.Scheduled,
-		},
-		ReferencePolicy: o.getReferencePolicy(),
-	}
-
-	return isi
-}
-
-func (o *ImportImageOptions) newImageStreamImportTags(stream *imagev1.ImageStream, tags map[string]string) *imagev1.ImageStreamImport {
-	isi, streamInsecure := o.newImageStreamImport(stream)
-	for tag, from := range tags {
-		insecure := streamInsecure
-		scheduled := o.Scheduled
-
-		oldTagFound := false
-		var oldTag imagev1.TagReference
-		for _, t := range stream.Spec.Tags {
-			if t.Name == tag {
-				oldTag = t
-				oldTagFound = true
-				break
-			}
-		}
-
-		if oldTagFound {
-			insecure = insecure || oldTag.ImportPolicy.Insecure
-			scheduled = scheduled || oldTag.ImportPolicy.Scheduled
-		}
-		isi.Spec.Images = append(isi.Spec.Images, imagev1.ImageImportSpec{
-			From: corev1.ObjectReference{
-				Kind: "DockerImage",
-				Name: from,
-			},
-			To: &corev1.LocalObjectReference{Name: tag},
-			ImportPolicy: imagev1.TagImportPolicy{
-				Insecure:  insecure,
-				Scheduled: scheduled,
-			},
-			ReferencePolicy: o.getReferencePolicy(),
-		})
-	}
-	return isi
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/importimage/importimage_test.go b/vendor/github.com/openshift/oc/pkg/cli/importimage/importimage_test.go
deleted file mode 100644
index a518a9da57ce..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/importimage/importimage_test.go
+++ /dev/null
@@ -1,692 +0,0 @@
-package importimage
-
-import (
-	"strings"
-	"testing"
-
-	corev1 "k8s.io/api/core/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	kapihelper "k8s.io/kubernetes/pkg/apis/core/helper"
-
-	imagev1 "github.com/openshift/api/image/v1"
-	imagefake "github.com/openshift/client-go/image/clientset/versioned/fake"
-	"github.com/openshift/oc/pkg/cli/tag"
-)
-
-func TestCreateImageImport(t *testing.T) {
-	testCases := map[string]struct {
-		name               string
-		from               string
-		stream             *imagev1.ImageStream
-		all                bool
-		confirm            bool
-		scheduled          bool
-		insecure           *bool
-		referencePolicy    string
-		err                string
-		expectedImages     []imagev1.ImageImportSpec
-		expectedRepository *imagev1.RepositoryImportSpec
-	}{
-		"import from non-existing": {
-			name: "nonexisting",
-			err:  "pass --confirm to create and import",
-		},
-		"confirmed import from non-existing": {
-			name:    "nonexisting",
-			confirm: true,
-			expectedImages: []imagev1.ImageImportSpec{{
-				From: corev1.ObjectReference{Kind: "DockerImage", Name: "nonexisting"},
-				To:   &corev1.LocalObjectReference{Name: "latest"},
-			}},
-		},
-		"confirmed import all from non-existing": {
-			name:    "nonexisting",
-			all:     true,
-			confirm: true,
-			expectedRepository: &imagev1.RepositoryImportSpec{
-				From: corev1.ObjectReference{Kind: "DockerImage", Name: "nonexisting"},
-			},
-		},
-		"import from .spec.dockerImageRepository": {
-			name: "testis",
-			stream: &imagev1.ImageStream{
-				ObjectMeta: metav1.ObjectMeta{Name: "testis", Namespace: "other"},
-				Spec: imagev1.ImageStreamSpec{
-					DockerImageRepository: "repo.com/somens/someimage",
-					Tags:                  []imagev1.TagReference{},
-				},
-			},
-			expectedImages: []imagev1.ImageImportSpec{{
-				From: corev1.ObjectReference{Kind: "DockerImage", Name: "repo.com/somens/someimage"},
-				To:   &corev1.LocalObjectReference{Name: "latest"},
-			}},
-		},
-		"import from .spec.dockerImageRepository non-existing tag": {
-			name: "testis:nonexisting",
-			stream: &imagev1.ImageStream{
-				ObjectMeta: metav1.ObjectMeta{Name: "testis", Namespace: "other"},
-				Spec: imagev1.ImageStreamSpec{
-					DockerImageRepository: "repo.com/somens/someimage",
-					Tags:                  []imagev1.TagReference{},
-				},
-			},
-			err: `"nonexisting" does not exist on the image stream`,
-		},
-		"import all from .spec.dockerImageRepository": {
-			name: "testis",
-			all:  true,
-			stream: &imagev1.ImageStream{
-				ObjectMeta: metav1.ObjectMeta{Name: "testis", Namespace: "other"},
-				Spec: imagev1.ImageStreamSpec{
-					DockerImageRepository: "repo.com/somens/someimage",
-					Tags:                  []imagev1.TagReference{},
-				},
-			},
-			expectedRepository: &imagev1.RepositoryImportSpec{
-				From: corev1.ObjectReference{Kind: "DockerImage", Name: "repo.com/somens/someimage"},
-			},
-		},
-		"import all from .spec.dockerImageRepository with different from": {
-			name: "testis",
-			from: "totally_different_spec",
-			all:  true,
-			err:  "different import spec",
-			stream: &imagev1.ImageStream{
-				ObjectMeta: metav1.ObjectMeta{Name: "testis", Namespace: "other"},
-				Spec: imagev1.ImageStreamSpec{
-					DockerImageRepository: "repo.com/somens/someimage",
-					Tags:                  []imagev1.TagReference{},
-				},
-			},
-		},
-		"import all from .spec.dockerImageRepository with confirmed different from": {
-			name:    "testis",
-			from:    "totally/different/spec",
-			all:     true,
-			confirm: true,
-			stream: &imagev1.ImageStream{
-				ObjectMeta: metav1.ObjectMeta{Name: "testis", Namespace: "other"},
-				Spec: imagev1.ImageStreamSpec{
-					DockerImageRepository: "repo.com/somens/someimage",
-					Tags:                  []imagev1.TagReference{},
-				},
-			},
-			expectedRepository: &imagev1.RepositoryImportSpec{
-				From: corev1.ObjectReference{Kind: "DockerImage", Name: "totally/different/spec"},
-			},
-		},
-		"import all from .spec.tags": {
-			name: "testis",
-			all:  true,
-			stream: &imagev1.ImageStream{
-				ObjectMeta: metav1.ObjectMeta{Name: "testis", Namespace: "other"},
-				Spec: imagev1.ImageStreamSpec{
-					Tags: []imagev1.TagReference{
-						{Name: "latest", From: &corev1.ObjectReference{Kind: "DockerImage", Name: "repo.com/somens/someimage:latest"}},
-						{Name: "other", From: &corev1.ObjectReference{Kind: "DockerImage", Name: "repo.com/somens/someimage:other"}},
-					},
-				},
-			},
-			expectedImages: []imagev1.ImageImportSpec{
-				{
-					From: corev1.ObjectReference{Kind: "DockerImage", Name: "repo.com/somens/someimage:latest"},
-					To:   &corev1.LocalObjectReference{Name: "latest"},
-				},
-				{
-					From: corev1.ObjectReference{Kind: "DockerImage", Name: "repo.com/somens/someimage:other"},
-					To:   &corev1.LocalObjectReference{Name: "other"},
-				},
-			},
-		},
-		"import all from .spec.tags with insecure annotation": {
-			name: "testis",
-			all:  true,
-			stream: &imagev1.ImageStream{
-				ObjectMeta: metav1.ObjectMeta{
-					Name:        "testis",
-					Namespace:   "other",
-					Annotations: map[string]string{imagev1.InsecureRepositoryAnnotation: "true"},
-				},
-				Spec: imagev1.ImageStreamSpec{
-					Tags: []imagev1.TagReference{
-						{Name: "latest", From: &corev1.ObjectReference{Kind: "DockerImage", Name: "repo.com/somens/someimage:latest"}},
-						{Name: "other", From: &corev1.ObjectReference{Kind: "DockerImage", Name: "repo.com/somens/someimage:other"}},
-					},
-				},
-			},
-			expectedImages: []imagev1.ImageImportSpec{
-				{
-					From:         corev1.ObjectReference{Kind: "DockerImage", Name: "repo.com/somens/someimage:latest"},
-					To:           &corev1.LocalObjectReference{Name: "latest"},
-					ImportPolicy: imagev1.TagImportPolicy{Insecure: true},
-				},
-				{
-					From:         corev1.ObjectReference{Kind: "DockerImage", Name: "repo.com/somens/someimage:other"},
-					To:           &corev1.LocalObjectReference{Name: "other"},
-					ImportPolicy: imagev1.TagImportPolicy{Insecure: true},
-				},
-			},
-		},
-		"import all from .spec.tags with insecure flag": {
-			name:     "testis",
-			all:      true,
-			insecure: newBool(true),
-			stream: &imagev1.ImageStream{
-				ObjectMeta: metav1.ObjectMeta{Name: "testis", Namespace: "other"},
-				Spec: imagev1.ImageStreamSpec{
-					Tags: []imagev1.TagReference{
-						{Name: "latest", From: &corev1.ObjectReference{Kind: "DockerImage", Name: "repo.com/somens/someimage:latest"}},
-						{Name: "other", From: &corev1.ObjectReference{Kind: "DockerImage", Name: "repo.com/somens/someimage:other"}},
-					},
-				},
-			},
-			expectedImages: []imagev1.ImageImportSpec{
-				{
-					From:         corev1.ObjectReference{Kind: "DockerImage", Name: "repo.com/somens/someimage:latest"},
-					To:           &corev1.LocalObjectReference{Name: "latest"},
-					ImportPolicy: imagev1.TagImportPolicy{Insecure: true},
-				},
-				{
-					From:         corev1.ObjectReference{Kind: "DockerImage", Name: "repo.com/somens/someimage:other"},
-					To:           &corev1.LocalObjectReference{Name: "other"},
-					ImportPolicy: imagev1.TagImportPolicy{Insecure: true},
-				},
-			},
-		},
-		"import all from .spec.tags no DockerImage tags": {
-			name: "testis",
-			all:  true,
-			err:  "does not have tags pointing to external container images",
-			stream: &imagev1.ImageStream{
-				ObjectMeta: metav1.ObjectMeta{Name: "testis", Namespace: "other"},
-				Spec: imagev1.ImageStreamSpec{
-					Tags: []imagev1.TagReference{
-						{Name: "latest", From: &corev1.ObjectReference{Kind: "ImageStreamTag", Name: "otheris:latest"}},
-					},
-				},
-			},
-		},
-		"empty image stream": {
-			name: "testis",
-			err:  "the tag \"latest\" does not exist on the image stream - choose an existing tag to import or use the 'tag' command to create a new tag",
-			stream: &imagev1.ImageStream{
-				ObjectMeta: metav1.ObjectMeta{Name: "testis", Namespace: "other"},
-			},
-		},
-		"import latest tag": {
-			name: "testis:latest",
-			stream: &imagev1.ImageStream{
-				ObjectMeta: metav1.ObjectMeta{
-					Name:      "testis",
-					Namespace: "other",
-				},
-				Spec: imagev1.ImageStreamSpec{
-					Tags: []imagev1.TagReference{
-						{Name: "latest", From: &corev1.ObjectReference{Kind: "DockerImage", Name: "repo.com/somens/someimage:latest"}},
-					},
-				},
-			},
-			expectedImages: []imagev1.ImageImportSpec{{
-				From: corev1.ObjectReference{Kind: "DockerImage", Name: "repo.com/somens/someimage:latest"},
-				To:   &corev1.LocalObjectReference{Name: "latest"},
-			}},
-		},
-		"import existing tag": {
-			name: "testis:existing",
-			stream: &imagev1.ImageStream{
-				ObjectMeta: metav1.ObjectMeta{
-					Name:      "testis",
-					Namespace: "other",
-				},
-				Spec: imagev1.ImageStreamSpec{
-					Tags: []imagev1.TagReference{
-						{Name: "existing", From: &corev1.ObjectReference{Kind: "DockerImage", Name: "repo.com/somens/someimage:latest"}},
-					},
-				},
-			},
-			expectedImages: []imagev1.ImageImportSpec{{
-				From: corev1.ObjectReference{Kind: "DockerImage", Name: "repo.com/somens/someimage:latest"},
-				To:   &corev1.LocalObjectReference{Name: "existing"},
-			}},
-		},
-		"import non-existing tag": {
-			name: "testis:latest",
-			err:  "does not exist on the image stream",
-			stream: &imagev1.ImageStream{
-				ObjectMeta: metav1.ObjectMeta{
-					Name:      "testis",
-					Namespace: "other",
-				},
-				Spec: imagev1.ImageStreamSpec{
-					Tags: []imagev1.TagReference{
-						{Name: "nonlatest", From: &corev1.ObjectReference{Kind: "DockerImage", Name: "repo.com/somens/someimage:latest"}},
-					},
-				},
-			},
-		},
-		"import tag from .spec.tags": {
-			name: "testis:mytag",
-			stream: &imagev1.ImageStream{
-				ObjectMeta: metav1.ObjectMeta{
-					Name:      "testis",
-					Namespace: "other",
-				},
-				Spec: imagev1.ImageStreamSpec{
-					Tags: []imagev1.TagReference{
-						{
-							Name: "mytag",
-							From: &corev1.ObjectReference{Kind: "DockerImage", Name: "repo.com/somens/someimage:mytag"},
-						},
-					},
-				},
-			},
-			expectedImages: []imagev1.ImageImportSpec{{
-				From: corev1.ObjectReference{Kind: "DockerImage", Name: "repo.com/somens/someimage:mytag"},
-				To:   &corev1.LocalObjectReference{Name: "mytag"},
-			}},
-		},
-		"use tag aliases": {
-			name: "testis:mytag",
-			stream: &imagev1.ImageStream{
-				ObjectMeta: metav1.ObjectMeta{
-					Name:      "testis",
-					Namespace: "other",
-				},
-				Spec: imagev1.ImageStreamSpec{
-					Tags: []imagev1.TagReference{
-						{Name: "mytag", From: &corev1.ObjectReference{Kind: "DockerImage", Name: "repo.com/somens/someimage:mytag"}},
-						{Name: "other1", From: &corev1.ObjectReference{Kind: "ImageStreamTag", Name: "testis:mytag"}},
-						{Name: "other2", From: &corev1.ObjectReference{Kind: "ImageStreamTag", Name: "mytag"}},
-					},
-				},
-			},
-			expectedImages: []imagev1.ImageImportSpec{{
-				From: corev1.ObjectReference{Kind: "DockerImage", Name: "repo.com/somens/someimage:mytag"},
-				To:   &corev1.LocalObjectReference{Name: "mytag"},
-			}},
-		},
-		"import tag from alias of cross-image-stream": {
-			name: "testis:mytag",
-			stream: &imagev1.ImageStream{
-				ObjectMeta: metav1.ObjectMeta{
-					Name:      "testis",
-					Namespace: "other",
-				},
-				Spec: imagev1.ImageStreamSpec{
-					Tags: []imagev1.TagReference{
-						{
-							Name: "mytag",
-							From: &corev1.ObjectReference{Kind: "ImageStreamTag", Name: "otherimage:mytag"},
-						},
-					},
-				},
-			},
-			err: "tag \"mytag\" points to an imagestreamtag from another ImageStream",
-		},
-		"import tag from alias of circular reference": {
-			name: "testis:mytag",
-			stream: &imagev1.ImageStream{
-				ObjectMeta: metav1.ObjectMeta{
-					Name:      "testis",
-					Namespace: "other",
-				},
-				Spec: imagev1.ImageStreamSpec{
-					Tags: []imagev1.TagReference{
-						{
-							Name: "mytag",
-							From: &corev1.ObjectReference{Kind: "ImageStreamTag", Name: "mytag"},
-						},
-					},
-				},
-			},
-			err: "tag \"mytag\" on the image stream is a reference to same tag",
-		},
-		"import tag from non existing alias": {
-			name: "testis:mytag",
-			stream: &imagev1.ImageStream{
-				ObjectMeta: metav1.ObjectMeta{
-					Name:      "testis",
-					Namespace: "other",
-				},
-				Spec: imagev1.ImageStreamSpec{
-					Tags: []imagev1.TagReference{
-						{
-							Name: "mytag",
-							From: &corev1.ObjectReference{Kind: "ImageStreamTag", Name: "nonexisting"},
-						},
-					},
-				},
-			},
-			err: "the tag \"mytag\" does not exist on the image stream - choose an existing tag to import or use the 'tag' command to create a new tag",
-		},
-		"use insecure annotation": {
-			name: "testis",
-			stream: &imagev1.ImageStream{
-				ObjectMeta: metav1.ObjectMeta{
-					Name:        "testis",
-					Namespace:   "other",
-					Annotations: map[string]string{imagev1.InsecureRepositoryAnnotation: "true"},
-				},
-				Spec: imagev1.ImageStreamSpec{
-					DockerImageRepository: "repo.com/somens/someimage",
-					Tags:                  []imagev1.TagReference{},
-				},
-			},
-			expectedImages: []imagev1.ImageImportSpec{{
-				From:         corev1.ObjectReference{Kind: "DockerImage", Name: "repo.com/somens/someimage"},
-				To:           &corev1.LocalObjectReference{Name: "latest"},
-				ImportPolicy: imagev1.TagImportPolicy{Insecure: true},
-			}},
-		},
-		"insecure flag overrides insecure annotation": {
-			name:     "testis",
-			insecure: newBool(false),
-			stream: &imagev1.ImageStream{
-				ObjectMeta: metav1.ObjectMeta{
-					Name:        "testis",
-					Namespace:   "other",
-					Annotations: map[string]string{imagev1.InsecureRepositoryAnnotation: "true"},
-				},
-				Spec: imagev1.ImageStreamSpec{
-					DockerImageRepository: "repo.com/somens/someimage",
-					Tags:                  []imagev1.TagReference{},
-				},
-			},
-			expectedImages: []imagev1.ImageImportSpec{{
-				From:         corev1.ObjectReference{Kind: "DockerImage", Name: "repo.com/somens/someimage"},
-				To:           &corev1.LocalObjectReference{Name: "latest"},
-				ImportPolicy: imagev1.TagImportPolicy{Insecure: false},
-			}},
-		},
-		"import tag setting referencePolicy": {
-			name:            "testis:mytag",
-			referencePolicy: tag.LocalReferencePolicy,
-			stream: &imagev1.ImageStream{
-				ObjectMeta: metav1.ObjectMeta{Name: "testis", Namespace: "other"},
-				Spec: imagev1.ImageStreamSpec{
-					Tags: []imagev1.TagReference{
-						{
-							Name: "mytag",
-							From: &corev1.ObjectReference{Kind: "DockerImage", Name: "repo.com/somens/someimage:mytag"},
-						},
-					},
-				},
-			},
-			expectedImages: []imagev1.ImageImportSpec{{
-				From:            corev1.ObjectReference{Kind: "DockerImage", Name: "repo.com/somens/someimage:mytag"},
-				To:              &corev1.LocalObjectReference{Name: "mytag"},
-				ReferencePolicy: imagev1.TagReferencePolicy{Type: imagev1.LocalTagReferencePolicy},
-			}},
-		},
-		"import all from .spec.tags setting referencePolicy": {
-			name:            "testis",
-			all:             true,
-			referencePolicy: tag.LocalReferencePolicy,
-			stream: &imagev1.ImageStream{
-				ObjectMeta: metav1.ObjectMeta{Name: "testis", Namespace: "other"},
-				Spec: imagev1.ImageStreamSpec{
-					Tags: []imagev1.TagReference{
-						{Name: "mytag", From: &corev1.ObjectReference{Kind: "DockerImage", Name: "repo.com/somens/someimage:mytag"}},
-						{Name: "other", From: &corev1.ObjectReference{Kind: "DockerImage", Name: "repo.com/somens/someimage:other"}},
-					},
-				},
-			},
-			expectedImages: []imagev1.ImageImportSpec{
-				{
-					From:            corev1.ObjectReference{Kind: "DockerImage", Name: "repo.com/somens/someimage:mytag"},
-					To:              &corev1.LocalObjectReference{Name: "mytag"},
-					ReferencePolicy: imagev1.TagReferencePolicy{Type: imagev1.LocalTagReferencePolicy},
-				},
-				{
-					From:            corev1.ObjectReference{Kind: "DockerImage", Name: "repo.com/somens/someimage:other"},
-					To:              &corev1.LocalObjectReference{Name: "other"},
-					ReferencePolicy: imagev1.TagReferencePolicy{Type: imagev1.LocalTagReferencePolicy},
-				},
-			},
-		},
-		"import all from .spec.dockerImageRepository setting referencePolicy": {
-			name:            "testis",
-			all:             true,
-			referencePolicy: tag.LocalReferencePolicy,
-			stream: &imagev1.ImageStream{
-				ObjectMeta: metav1.ObjectMeta{Name: "testis", Namespace: "other"},
-				Spec: imagev1.ImageStreamSpec{
-					DockerImageRepository: "repo.com/somens/someimage",
-					Tags:                  []imagev1.TagReference{},
-				},
-			},
-			expectedRepository: &imagev1.RepositoryImportSpec{
-				From:            corev1.ObjectReference{Kind: "DockerImage", Name: "repo.com/somens/someimage"},
-				ReferencePolicy: imagev1.TagReferencePolicy{Type: imagev1.LocalTagReferencePolicy},
-			},
-		},
-		"import tag setting scheduled": {
-			name:      "testis:mytag",
-			scheduled: true,
-			stream: &imagev1.ImageStream{
-				ObjectMeta: metav1.ObjectMeta{Name: "testis", Namespace: "other"},
-				Spec: imagev1.ImageStreamSpec{
-					Tags: []imagev1.TagReference{
-						{
-							Name: "mytag",
-							From: &corev1.ObjectReference{Kind: "DockerImage", Name: "repo.com/somens/someimage:mytag"},
-						},
-					},
-				},
-			},
-			expectedImages: []imagev1.ImageImportSpec{{
-				From:         corev1.ObjectReference{Kind: "DockerImage", Name: "repo.com/somens/someimage:mytag"},
-				To:           &corev1.LocalObjectReference{Name: "mytag"},
-				ImportPolicy: imagev1.TagImportPolicy{Scheduled: true},
-			}},
-		},
-		"import already scheduled tag without setting scheduled": {
-			name:      "testis:mytag",
-			scheduled: false,
-			stream: &imagev1.ImageStream{
-				ObjectMeta: metav1.ObjectMeta{Name: "testis", Namespace: "other"},
-				Spec: imagev1.ImageStreamSpec{
-					Tags: []imagev1.TagReference{
-						{
-							Name:         "mytag",
-							From:         &corev1.ObjectReference{Kind: "DockerImage", Name: "repo.com/somens/someimage:mytag"},
-							ImportPolicy: imagev1.TagImportPolicy{Scheduled: true},
-						},
-					},
-				},
-			},
-			expectedImages: []imagev1.ImageImportSpec{{
-				From:         corev1.ObjectReference{Kind: "DockerImage", Name: "repo.com/somens/someimage:mytag"},
-				To:           &corev1.LocalObjectReference{Name: "mytag"},
-				ImportPolicy: imagev1.TagImportPolicy{Scheduled: true},
-			}},
-		},
-		"import all from .spec.tags setting scheduled": {
-			name:      "testis",
-			all:       true,
-			scheduled: true,
-			stream: &imagev1.ImageStream{
-				ObjectMeta: metav1.ObjectMeta{Name: "testis", Namespace: "other"},
-				Spec: imagev1.ImageStreamSpec{
-					Tags: []imagev1.TagReference{
-						{Name: "mytag", From: &corev1.ObjectReference{Kind: "DockerImage", Name: "repo.com/somens/someimage:mytag"}},
-						{Name: "other", From: &corev1.ObjectReference{Kind: "DockerImage", Name: "repo.com/somens/someimage:other"}},
-					},
-				},
-			},
-			expectedImages: []imagev1.ImageImportSpec{
-				{
-					From:         corev1.ObjectReference{Kind: "DockerImage", Name: "repo.com/somens/someimage:mytag"},
-					To:           &corev1.LocalObjectReference{Name: "mytag"},
-					ImportPolicy: imagev1.TagImportPolicy{Scheduled: true},
-				},
-				{
-					From:         corev1.ObjectReference{Kind: "DockerImage", Name: "repo.com/somens/someimage:other"},
-					To:           &corev1.LocalObjectReference{Name: "other"},
-					ImportPolicy: imagev1.TagImportPolicy{Scheduled: true},
-				},
-			},
-		},
-		"import all from .spec.tags, some already scheduled, without setting scheduled": {
-			name:      "testis",
-			all:       true,
-			scheduled: false,
-			stream: &imagev1.ImageStream{
-				ObjectMeta: metav1.ObjectMeta{Name: "testis", Namespace: "other"},
-				Spec: imagev1.ImageStreamSpec{
-					Tags: []imagev1.TagReference{
-						{
-							Name:         "mytag",
-							From:         &corev1.ObjectReference{Kind: "DockerImage", Name: "repo.com/somens/someimage:mytag"},
-							ImportPolicy: imagev1.TagImportPolicy{Scheduled: true},
-						},
-						{Name: "other", From: &corev1.ObjectReference{Kind: "DockerImage", Name: "repo.com/somens/someimage:other"}},
-					},
-				},
-			},
-			expectedImages: []imagev1.ImageImportSpec{
-				{
-					From:         corev1.ObjectReference{Kind: "DockerImage", Name: "repo.com/somens/someimage:mytag"},
-					To:           &corev1.LocalObjectReference{Name: "mytag"},
-					ImportPolicy: imagev1.TagImportPolicy{Scheduled: true},
-				},
-				{
-					From:         corev1.ObjectReference{Kind: "DockerImage", Name: "repo.com/somens/someimage:other"},
-					To:           &corev1.LocalObjectReference{Name: "other"},
-					ImportPolicy: imagev1.TagImportPolicy{Scheduled: false},
-				},
-			},
-		},
-		"import all from .spec.dockerImageRepository setting scheduled": {
-			name:      "testis",
-			all:       true,
-			scheduled: true,
-			stream: &imagev1.ImageStream{
-				ObjectMeta: metav1.ObjectMeta{Name: "testis", Namespace: "other"},
-				Spec: imagev1.ImageStreamSpec{
-					DockerImageRepository: "repo.com/somens/someimage",
-					Tags:                  []imagev1.TagReference{},
-				},
-			},
-			expectedRepository: &imagev1.RepositoryImportSpec{
-				From:         corev1.ObjectReference{Kind: "DockerImage", Name: "repo.com/somens/someimage"},
-				ImportPolicy: imagev1.TagImportPolicy{Scheduled: true},
-			},
-		},
-	}
-
-	for name, test := range testCases {
-		var fake *imagefake.Clientset
-		if test.stream == nil {
-			fake = imagefake.NewSimpleClientset()
-		} else {
-			fake = imagefake.NewSimpleClientset(test.stream)
-		}
-		o := ImportImageOptions{
-			Target:          test.name,
-			From:            test.from,
-			All:             test.all,
-			Scheduled:       test.scheduled,
-			ReferencePolicy: test.referencePolicy,
-			Confirm:         test.confirm,
-			isClient:        fake.ImageV1().ImageStreams("other"),
-		}
-
-		if test.insecure != nil {
-			o.Insecure = *test.insecure
-			o.InsecureFlagProvided = true
-		}
-
-		if err := o.parseImageReference(); err != nil {
-			t.Errorf("unexpected error: %v", err)
-			continue
-		}
-
-		_, isi, err := o.createImageImport()
-		// check errors
-		if len(test.err) > 0 {
-			if err == nil || !strings.Contains(err.Error(), test.err) {
-				t.Errorf("%s: unexpected error: expected %s, got %v", name, test.err, err)
-			}
-			if isi != nil {
-				t.Errorf("%s: unexpected import spec: expected nil, got %#v", name, isi)
-			}
-			continue
-		}
-		if len(test.err) == 0 && err != nil {
-			t.Errorf("%s: unexpected error: %v", name, err)
-			continue
-		}
-		// check values
-		if !listEqual(isi.Spec.Images, test.expectedImages) {
-			t.Errorf("%s: unexpected import images, expected %#v, got %#v", name, test.expectedImages, isi.Spec.Images)
-		}
-		if !kapihelper.Semantic.DeepEqual(isi.Spec.Repository, test.expectedRepository) {
-			t.Errorf("%s: unexpected import repository, expected %#v, got %#v", name, test.expectedRepository, isi.Spec.Repository)
-		}
-	}
-}
-
-func TestWasError(t *testing.T) {
-	testCases := map[string]struct {
-		isi      *imagev1.ImageStreamImport
-		expected bool
-	}{
-		"no error": {
-			isi:      &imagev1.ImageStreamImport{},
-			expected: false,
-		},
-		"error importing images": {
-			isi: &imagev1.ImageStreamImport{
-				Status: imagev1.ImageStreamImportStatus{
-					Images: []imagev1.ImageImportStatus{
-						{Status: metav1.Status{Status: metav1.StatusFailure}},
-					},
-				},
-			},
-			expected: true,
-		},
-		"error importing repository": {
-			isi: &imagev1.ImageStreamImport{
-				Status: imagev1.ImageStreamImportStatus{
-					Repository: &imagev1.RepositoryImportStatus{
-						Status: metav1.Status{Status: metav1.StatusFailure},
-					},
-				},
-			},
-			expected: true,
-		},
-	}
-
-	for name, test := range testCases {
-		if a, e := wasError(test.isi), test.expected; a != e {
-			t.Errorf("%s: expected %v, got %v", name, e, a)
-		}
-	}
-}
-
-func listEqual(actual, expected []imagev1.ImageImportSpec) bool {
-	if len(actual) != len(expected) {
-		return false
-	}
-
-	for _, a := range actual {
-		found := false
-		for _, e := range expected {
-			if kapihelper.Semantic.DeepEqual(a, e) {
-				found = true
-				break
-			}
-		}
-		if !found {
-			return false
-		}
-	}
-	return true
-}
-
-func newBool(a bool) *bool {
-	r := new(bool)
-	*r = a
-	return r
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/kubectl_compat_test.go b/vendor/github.com/openshift/oc/pkg/cli/kubectl_compat_test.go
deleted file mode 100644
index cf742e2544e5..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/kubectl_compat_test.go
+++ /dev/null
@@ -1,85 +0,0 @@
-package cli
-
-import (
-	"bytes"
-	"io/ioutil"
-	"testing"
-
-	"k8s.io/apimachinery/pkg/util/sets"
-	kcmd "k8s.io/kubernetes/pkg/kubectl/cmd"
-)
-
-// MissingCommands is the list of commands we're already missing.
-// NEVER ADD TO THIS LIST
-// TODO kill this list
-var MissingCommands = sets.NewString(
-	"namespace",
-	"rolling-update",
-
-	// are on admin commands
-	"cordon",
-	"drain",
-	"uncordon",
-	"taint",
-	"top",
-	"certificate",
-
-	// TODO commands to assess
-	"run-container",
-	"alpha",
-)
-
-// WhitelistedCommands is the list of commands we're never going to have,
-// defend each one with a comment
-var WhitelistedCommands = sets.NewString()
-
-func TestKubectlCompatibility(t *testing.T) {
-	oc := NewOcCommand("oc", "oc", &bytes.Buffer{}, ioutil.Discard, ioutil.Discard)
-	kubectl := kcmd.NewKubectlCommand(nil, ioutil.Discard, ioutil.Discard)
-
-kubectlLoop:
-	for _, kubecmd := range kubectl.Commands() {
-		for _, occmd := range oc.Commands() {
-			if kubecmd.Name() == occmd.Name() {
-				if MissingCommands.Has(kubecmd.Name()) {
-					t.Errorf("%s was supposed to be missing", kubecmd.Name())
-					continue
-				}
-				if WhitelistedCommands.Has(kubecmd.Name()) {
-					t.Errorf("%s was supposed to be whitelisted", kubecmd.Name())
-					continue
-				}
-				continue kubectlLoop
-			}
-		}
-		if MissingCommands.Has(kubecmd.Name()) || WhitelistedCommands.Has(kubecmd.Name()) {
-			continue
-		}
-
-		t.Errorf("missing %q,", kubecmd.Name())
-	}
-}
-
-// this only checks one level deep for nested commands, but it does ensure that we've gotten several
-// --validate flags.  Based on that we can reasonably assume we got them in the kube commands since they
-// all share the same registration.
-func TestValidateDisabled(t *testing.T) {
-	oc := NewOcCommand("oc", "oc", &bytes.Buffer{}, ioutil.Discard, ioutil.Discard)
-	kubectl := kcmd.NewKubectlCommand(nil, ioutil.Discard, ioutil.Discard)
-
-	for _, kubecmd := range kubectl.Commands() {
-		for _, occmd := range oc.Commands() {
-			if kubecmd.Name() == occmd.Name() {
-				ocValidateFlag := occmd.Flags().Lookup("validate")
-				if ocValidateFlag == nil {
-					continue
-				}
-
-				if ocValidateFlag.Value.String() != "false" {
-					t.Errorf("%s --validate is not defaulting to false", occmd.Name())
-				}
-			}
-		}
-	}
-
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/kubectlwrappers/wrappers.go b/vendor/github.com/openshift/oc/pkg/cli/kubectlwrappers/wrappers.go
deleted file mode 100644
index 127ee3417dad..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/kubectlwrappers/wrappers.go
+++ /dev/null
@@ -1,476 +0,0 @@
-package kubectlwrappers
-
-import (
-	"bufio"
-	"flag"
-	"fmt"
-	"path"
-	"strings"
-
-	"github.com/spf13/cobra"
-	"github.com/spf13/pflag"
-
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	kclientcmd "k8s.io/client-go/tools/clientcmd"
-	"k8s.io/kubernetes/pkg/kubectl/cmd/annotate"
-	"k8s.io/kubernetes/pkg/kubectl/cmd/apiresources"
-	"k8s.io/kubernetes/pkg/kubectl/cmd/apply"
-	"k8s.io/kubernetes/pkg/kubectl/cmd/attach"
-	kcmdauth "k8s.io/kubernetes/pkg/kubectl/cmd/auth"
-	"k8s.io/kubernetes/pkg/kubectl/cmd/autoscale"
-	"k8s.io/kubernetes/pkg/kubectl/cmd/clusterinfo"
-	"k8s.io/kubernetes/pkg/kubectl/cmd/completion"
-	"k8s.io/kubernetes/pkg/kubectl/cmd/config"
-	"k8s.io/kubernetes/pkg/kubectl/cmd/convert"
-	"k8s.io/kubernetes/pkg/kubectl/cmd/cp"
-	kcreate "k8s.io/kubernetes/pkg/kubectl/cmd/create"
-	"k8s.io/kubernetes/pkg/kubectl/cmd/delete"
-	"k8s.io/kubernetes/pkg/kubectl/cmd/describe"
-	"k8s.io/kubernetes/pkg/kubectl/cmd/edit"
-	"k8s.io/kubernetes/pkg/kubectl/cmd/exec"
-	"k8s.io/kubernetes/pkg/kubectl/cmd/explain"
-	kget "k8s.io/kubernetes/pkg/kubectl/cmd/get"
-	"k8s.io/kubernetes/pkg/kubectl/cmd/label"
-	"k8s.io/kubernetes/pkg/kubectl/cmd/patch"
-	"k8s.io/kubernetes/pkg/kubectl/cmd/plugin"
-	"k8s.io/kubernetes/pkg/kubectl/cmd/portforward"
-	"k8s.io/kubernetes/pkg/kubectl/cmd/proxy"
-	"k8s.io/kubernetes/pkg/kubectl/cmd/replace"
-	"k8s.io/kubernetes/pkg/kubectl/cmd/run"
-	"k8s.io/kubernetes/pkg/kubectl/cmd/scale"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	kwait "k8s.io/kubernetes/pkg/kubectl/cmd/wait"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-
-	"github.com/openshift/oc/pkg/cli/create"
-	cmdutil "github.com/openshift/oc/pkg/helpers/cmd"
-)
-
-func adjustCmdExamples(cmd *cobra.Command, fullName string, name string) {
-	for _, subCmd := range cmd.Commands() {
-		adjustCmdExamples(subCmd, fullName, cmd.Name())
-	}
-	cmd.Example = strings.Replace(cmd.Example, "kubectl", fullName, -1)
-	tabbing := "  "
-	examples := []string{}
-	scanner := bufio.NewScanner(strings.NewReader(cmd.Example))
-	for scanner.Scan() {
-		examples = append(examples, tabbing+strings.TrimSpace(scanner.Text()))
-	}
-	cmd.Example = strings.Join(examples, "\n")
-}
-
-// NewCmdGet is a wrapper for the Kubernetes cli get command
-func NewCmdGet(fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	return cmdutil.ReplaceCommandName("kubectl", fullName, templates.Normalize(kget.NewCmdGet(fullName, f, streams)))
-}
-
-// NewCmdReplace is a wrapper for the Kubernetes cli replace command
-func NewCmdReplace(fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	return cmdutil.ReplaceCommandName("kubectl", fullName, templates.Normalize(replace.NewCmdReplace(f, streams)))
-}
-
-func NewCmdClusterInfo(fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	return cmdutil.ReplaceCommandName("kubectl", fullName, templates.Normalize(clusterinfo.NewCmdClusterInfo(f, streams)))
-}
-
-// NewCmdPatch is a wrapper for the Kubernetes cli patch command
-func NewCmdPatch(fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	return cmdutil.ReplaceCommandName("kubectl", fullName, templates.Normalize(patch.NewCmdPatch(f, streams)))
-}
-
-// NewCmdDelete is a wrapper for the Kubernetes cli delete command
-func NewCmdDelete(fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	return cmdutil.ReplaceCommandName("kubectl", fullName, templates.Normalize(delete.NewCmdDelete(f, streams)))
-}
-
-// NewCmdCreate is a wrapper for the Kubernetes cli create command
-func NewCmdCreate(fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	cmd := cmdutil.ReplaceCommandName("kubectl", fullName, templates.Normalize(kcreate.NewCmdCreate(f, streams)))
-
-	// create subcommands
-	cmd.AddCommand(create.NewCmdCreateRoute(fullName, f, streams))
-	cmd.AddCommand(create.NewCmdCreateDeploymentConfig(create.DeploymentConfigRecommendedName, fullName+" create "+create.DeploymentConfigRecommendedName, f, streams))
-	cmd.AddCommand(create.NewCmdCreateClusterQuota(create.ClusterQuotaRecommendedName, fullName+" create "+create.ClusterQuotaRecommendedName, f, streams))
-
-	cmd.AddCommand(create.NewCmdCreateUser(create.UserRecommendedName, fullName+" create "+create.UserRecommendedName, f, streams))
-	cmd.AddCommand(create.NewCmdCreateIdentity(create.IdentityRecommendedName, fullName+" create "+create.IdentityRecommendedName, f, streams))
-	cmd.AddCommand(create.NewCmdCreateUserIdentityMapping(create.UserIdentityMappingRecommendedName, fullName+" create "+create.UserIdentityMappingRecommendedName, f, streams))
-	cmd.AddCommand(create.NewCmdCreateImageStream(create.ImageStreamRecommendedName, fullName+" create "+create.ImageStreamRecommendedName, f, streams))
-	cmd.AddCommand(create.NewCmdCreateImageStreamTag(create.ImageStreamTagRecommendedName, fullName+" create "+create.ImageStreamTagRecommendedName, f, streams))
-
-	adjustCmdExamples(cmd, fullName, "create")
-
-	return cmd
-}
-
-var (
-	completionLong = templates.LongDesc(`
-		This command prints shell code which must be evaluated to provide interactive
-		completion of %s commands.`)
-
-	completionExample = templates.Examples(`
-		# Generate the %s completion code for bash
-	  %s completion bash > bash_completion.sh
-	  source bash_completion.sh
-
-	  # The above example depends on the bash-completion framework.
-	  # It must be sourced before sourcing the openshift cli completion,
-		# i.e. on the Mac:
-
-	  brew install bash-completion
-	  source $(brew --prefix)/etc/bash_completion
-	  %s completion bash > bash_completion.sh
-	  source bash_completion.sh
-
-	  # In zsh*, the following will load openshift cli zsh completion:
-	  source <(%s completion zsh)
-
-	  * zsh completions are only supported in versions of zsh >= 5.2`)
-)
-
-func NewCmdCompletion(fullName string, streams genericclioptions.IOStreams) *cobra.Command {
-	cmdHelpName := fullName
-
-	if strings.HasSuffix(fullName, "completion") {
-		cmdHelpName = "openshift"
-	}
-
-	cmd := completion.NewCmdCompletion(streams.Out, "\n")
-	cmd.Long = fmt.Sprintf(completionLong, cmdHelpName)
-	cmd.Example = fmt.Sprintf(completionExample, cmdHelpName, cmdHelpName, cmdHelpName, cmdHelpName)
-	// mark all statically included flags as hidden to prevent them appearing in completions
-	cmd.PreRun = func(c *cobra.Command, _ []string) {
-		pflag.CommandLine.VisitAll(func(flag *pflag.Flag) {
-			flag.Hidden = true
-		})
-		hideGlobalFlags(c.Root(), flag.CommandLine)
-	}
-	return cmd
-}
-
-// hideGlobalFlags marks any flag that is in the global flag set as
-// hidden to prevent completion from varying by platform due to conditional
-// includes. This means that some completions will not be possible unless
-// they are registered in cobra instead of being added to flag.CommandLine.
-func hideGlobalFlags(c *cobra.Command, fs *flag.FlagSet) {
-	fs.VisitAll(func(flag *flag.Flag) {
-		if f := c.PersistentFlags().Lookup(flag.Name); f != nil {
-			f.Hidden = true
-		}
-		if f := c.LocalFlags().Lookup(flag.Name); f != nil {
-			f.Hidden = true
-		}
-	})
-	for _, child := range c.Commands() {
-		hideGlobalFlags(child, fs)
-	}
-}
-
-// NewCmdExec is a wrapper for the Kubernetes cli exec command
-func NewCmdExec(fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	cmd := cmdutil.ReplaceCommandName("kubectl", fullName, templates.Normalize(exec.NewCmdExec(f, streams)))
-	cmd.Use = "exec [flags] POD [-c CONTAINER] -- COMMAND [args...]"
-	return cmd
-}
-
-// NewCmdPortForward is a wrapper for the Kubernetes cli port-forward command
-func NewCmdPortForward(fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	return cmdutil.ReplaceCommandName("kubectl", fullName, templates.Normalize(portforward.NewCmdPortForward(f, streams)))
-}
-
-var (
-	describeLong = templates.LongDesc(`
-		Show details of a specific resource
-
-		This command joins many API calls together to form a detailed description of a
-		given resource.`)
-
-	describeExample = templates.Examples(`
-		# Provide details about the ruby-22-centos7 image repository
-	  %[1]s describe imageRepository ruby-22-centos7
-
-	  # Provide details about the ruby-sample-build build configuration
-	  %[1]s describe bc ruby-sample-build`)
-)
-
-// NewCmdDescribe is a wrapper for the Kubernetes cli describe command
-func NewCmdDescribe(fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	cmd := describe.NewCmdDescribe(fullName, f, streams)
-	cmd.Long = describeLong
-	cmd.Example = fmt.Sprintf(describeExample, fullName)
-	return cmd
-}
-
-// NewCmdProxy is a wrapper for the Kubernetes cli proxy command
-func NewCmdProxy(fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	return cmdutil.ReplaceCommandName("kubectl", fullName, templates.Normalize(proxy.NewCmdProxy(f, streams)))
-}
-
-var (
-	scaleLong = templates.LongDesc(`
-		Set a new size for a deployment or replication controller
-
-		Scale also allows users to specify one or more preconditions for the scale action.
-		If --current-replicas or --resource-version is specified, it is validated before the
-		scale is attempted, and it is guaranteed that the precondition holds true when the
-		scale is sent to the server.
-
-		Note that scaling a deployment configuration with no deployments will update the
-		desired replicas in the configuration template.
-
-		Supported resources:
-		%q`)
-
-	scaleExample = templates.Examples(`
-		# Scale replication controller named 'foo' to 3.
-	  %[1]s scale --replicas=3 replicationcontrollers foo
-
-	  # If the replication controller named foo's current size is 2, scale foo to 3.
-	  %[1]s scale --current-replicas=2 --replicas=3 replicationcontrollers foo
-
-	  # Scale the latest deployment of 'bar'. In case of no deployment, bar's template
-	  # will be scaled instead.
-	  %[1]s scale --replicas=10 dc bar`)
-)
-
-// NewCmdScale is a wrapper for the Kubernetes cli scale command
-func NewCmdScale(fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	cmd := scale.NewCmdScale(f, streams)
-	cmd.ValidArgs = append(cmd.ValidArgs, "deploymentconfig")
-	cmd.Short = "Change the number of pods in a deployment"
-	cmd.Long = fmt.Sprintf(scaleLong, cmd.ValidArgs)
-	cmd.Example = fmt.Sprintf(scaleExample, fullName)
-	return cmd
-}
-
-var (
-	autoScaleLong = templates.LongDesc(`
-		Autoscale a deployment config or replication controller.
-
-		Looks up a deployment config or replication controller by name and creates an autoscaler that uses
-		this deployment config or replication controller as a reference. An autoscaler can automatically
-		increase or decrease number of pods deployed within the system as needed.`)
-
-	autoScaleExample = templates.Examples(`
-		# Auto scale a deployment config "foo", with the number of pods between 2 to
-		# 10, target CPU utilization at a default value that server applies:
-	  %[1]s autoscale dc/foo --min=2 --max=10
-
-	  # Auto scale a replication controller "foo", with the number of pods between
-		# 1 to 5, target CPU utilization at 80%%
-	  %[1]s autoscale rc/foo --max=5 --cpu-percent=80`)
-)
-
-// NewCmdAutoscale is a wrapper for the Kubernetes cli autoscale command
-func NewCmdAutoscale(fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	cmd := autoscale.NewCmdAutoscale(f, streams)
-	cmd.Short = "Autoscale a deployment config, deployment, replication controller, or replica set"
-	cmd.Long = autoScaleLong
-	cmd.Example = fmt.Sprintf(autoScaleExample, fullName)
-	cmd.ValidArgs = append(cmd.ValidArgs, "deploymentconfig")
-	return cmd
-}
-
-var (
-	runLong = templates.LongDesc(`
-		Create and run a particular image, possibly replicated
-
-		Creates a deployment config to manage the created container(s). You can choose to run in the
-		foreground for an interactive container execution.  You may pass 'run/v1' to
-		--generator to create a replication controller instead of a deployment config.`)
-
-	runExample = templates.Examples(`
-		# Start a single instance of nginx.
-		%[1]s run nginx --image=nginx
-
-		# Start a single instance of hazelcast and let the container expose port 5701 .
-		%[1]s run hazelcast --image=hazelcast --port=5701
-
-		# Start a single instance of hazelcast and set environment variables "DNS_DOMAIN=cluster"
-		# and "POD_NAMESPACE=default" in the container.
-		%[1]s run hazelcast --image=hazelcast --env="DNS_DOMAIN=cluster" --env="POD_NAMESPACE=default"
-
-		# Start a replicated instance of nginx.
-		%[1]s run nginx --image=nginx --replicas=5
-
-		# Dry run. Print the corresponding API objects without creating them.
-		%[1]s run nginx --image=nginx --dry-run
-
-		# Start a single instance of nginx, but overload the spec of the deployment config with
-		# a partial set of values parsed from JSON.
-		%[1]s run nginx --image=nginx --overrides='{ "apiVersion": "v1", "spec": { ... } }'
-
-		# Start a pod of busybox and keep it in the foreground, don't restart it if it exits.
-		%[1]s run -i -t busybox --image=busybox --restart=Never
-
-		# Start the nginx container using the default command, but use custom arguments (arg1 .. argN)
-		# for that command.
-		%[1]s run nginx --image=nginx --   ... 
-
-		# Start the nginx container using a different command and custom arguments.
-		%[1]s run nginx --image=nginx --command --   ... 
-
-		# Start the job to compute π to 2000 places and print it out.
-		%[1]s run pi --image=perl --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(2000)'
-
-		# Start the cron job to compute π to 2000 places and print it out every 5 minutes.
-		%[1]s run pi --schedule="0/5 * * * ?" --image=perl --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(2000)'`)
-)
-
-// NewCmdRun is a wrapper for the Kubernetes cli run command
-func NewCmdRun(fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	cmd := run.NewCmdRun(f, streams)
-	cmd.Long = runLong
-	cmd.Example = fmt.Sprintf(runExample, fullName)
-	cmd.Flags().Set("generator", "")
-	cmd.Flag("generator").Usage = "The name of the API generator to use.  Default is 'deploymentconfig/v1' if --restart=Always, otherwise the default is 'run-pod/v1'."
-	cmd.Flag("generator").DefValue = ""
-	cmd.Flag("generator").Changed = false
-	return cmd
-}
-
-// NewCmdAttach is a wrapper for the Kubernetes cli attach command
-func NewCmdAttach(fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	return cmdutil.ReplaceCommandName("kubectl", fullName, templates.Normalize(attach.NewCmdAttach(f, streams)))
-}
-
-// NewCmdAnnotate is a wrapper for the Kubernetes cli annotate command
-func NewCmdAnnotate(fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	return cmdutil.ReplaceCommandName("kubectl", fullName, templates.Normalize(annotate.NewCmdAnnotate(fullName, f, streams)))
-}
-
-// NewCmdLabel is a wrapper for the Kubernetes cli label command
-func NewCmdLabel(fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	return cmdutil.ReplaceCommandName("kubectl", fullName, templates.Normalize(label.NewCmdLabel(f, streams)))
-}
-
-// NewCmdApply is a wrapper for the Kubernetes cli apply command
-func NewCmdApply(fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	return cmdutil.ReplaceCommandName("kubectl", fullName, templates.Normalize(apply.NewCmdApply(fullName, f, streams)))
-}
-
-// NewCmdExplain is a wrapper for the Kubernetes cli explain command
-func NewCmdExplain(fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	return cmdutil.ReplaceCommandName("kubectl", fullName, templates.Normalize(explain.NewCmdExplain(fullName, f, streams)))
-}
-
-// NewCmdConvert is a wrapper for the Kubernetes cli convert command
-func NewCmdConvert(fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	return cmdutil.ReplaceCommandName("kubectl", fullName, templates.Normalize(convert.NewCmdConvert(f, streams)))
-}
-
-var (
-	editLong = templates.LongDesc(`
-		Edit a resource from the default editor
-
-		The edit command allows you to directly edit any API resource you can retrieve via the
-		command line tools. It will open the editor defined by your OC_EDITOR, or EDITOR environment
-		variables, or fall back to 'vi' for Linux or 'notepad' for Windows. You can edit multiple
-		objects, although changes are applied one at a time. The command accepts filenames as well
-		as command line arguments, although the files you point to must be previously saved versions
-		of resources.
-
-		The files to edit will be output in the default API version, or a version specified
-		by --output-version. The default format is YAML - if you would like to edit in JSON
-		pass -o json. The flag --windows-line-endings can be used to force Windows line endings,
-		otherwise the default for your operating system will be used.
-
-		In the event an error occurs while updating, a temporary file will be created on disk
-		that contains your unapplied changes. The most common error when updating a resource
-		is another editor changing the resource on the server. When this occurs, you will have
-		to apply your changes to the newer version of the resource, or update your temporary
-		saved copy to include the latest resource version.`)
-
-	editExample = templates.Examples(`
-		# Edit the service named 'docker-registry':
-	  %[1]s edit svc/docker-registry
-
-	  # Edit the DeploymentConfig named 'my-deployment':
-	  %[1]s edit dc/my-deployment
-
-	  # Use an alternative editor
-	  OC_EDITOR="nano" %[1]s edit dc/my-deployment
-
-	  # Edit the service 'docker-registry' in JSON using the v1 API format:
-	  %[1]s edit svc/docker-registry --output-version=v1 -o json`)
-)
-
-// NewCmdEdit is a wrapper for the Kubernetes cli edit command
-func NewCmdEdit(fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	cmd := edit.NewCmdEdit(f, streams)
-	cmd.Long = editLong
-	cmd.Example = fmt.Sprintf(editExample, fullName)
-	return cmd
-}
-
-var (
-	configLong = templates.LongDesc(`
-		Manage the client config files
-
-		The client stores configuration in the current user's home directory (under the .kube directory as
-		config). When you login the first time, a new config file is created, and subsequent project changes with the
-		'project' command will set the current context. These subcommands allow you to manage the config directly.
-
-		Reference: https://github.com/kubernetes/kubernetes/blob/master/docs/user-guide/kubeconfig-file.md`)
-
-	configExample = templates.Examples(`
-		# Change the config context to use
-	  %[1]s %[2]s use-context my-context
-
-	  # Set the value of a config preference
-	  %[1]s %[2]s set preferences.some true`)
-)
-
-// NewCmdConfig is a wrapper for the Kubernetes cli config command
-func NewCmdConfig(fullName, name string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	pathOptions := &kclientcmd.PathOptions{
-		GlobalFile:       kclientcmd.RecommendedHomeFile,
-		EnvVar:           kclientcmd.RecommendedConfigPathEnvVar,
-		ExplicitFileFlag: genericclioptions.OpenShiftKubeConfigFlagName,
-
-		GlobalFileSubpath: path.Join(kclientcmd.RecommendedHomeDir, kclientcmd.RecommendedFileName),
-
-		LoadingRules: kclientcmd.NewDefaultClientConfigLoadingRules(),
-	}
-	pathOptions.LoadingRules.DoNotResolvePaths = true
-
-	cmd := config.NewCmdConfig(f, pathOptions, streams)
-	cmd.Short = "Change configuration files for the client"
-	cmd.Long = configLong
-	cmd.Example = fmt.Sprintf(configExample, fullName, name)
-	// normalize long descs and examples
-	// TODO remove when normalization is moved upstream
-	templates.NormalizeAll(cmd)
-	adjustCmdExamples(cmd, fullName, name)
-	return cmd
-}
-
-// NewCmdCp is a wrapper for the Kubernetes cli cp command
-func NewCmdCp(fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	return cmdutil.ReplaceCommandName("kubectl", fullName, templates.Normalize(cp.NewCmdCp(f, streams)))
-}
-
-func NewCmdWait(fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	return kwait.NewCmdWait(f, streams)
-}
-
-func NewCmdAuth(fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	return cmdutil.ReplaceCommandName("kubectl", fullName, templates.Normalize(kcmdauth.NewCmdAuth(f, streams)))
-}
-
-func NewCmdPlugin(fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	// list of accepted plugin executable filename prefixes that we will look for
-	// when executing a plugin. Order matters here, we want to first see if a user
-	// has prefixed their plugin with "oc-", before defaulting to upstream behavior.
-	plugin.ValidPluginFilenamePrefixes = []string{"oc", "kubectl"}
-	return plugin.NewCmdPlugin(f, streams)
-}
-
-func NewCmdApiResources(fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	return cmdutil.ReplaceCommandName("kubectl", fullName, templates.Normalize(apiresources.NewCmdAPIResources(f, streams)))
-}
-
-func NewCmdApiVersions(fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	return cmdutil.ReplaceCommandName("kubectl", fullName, templates.Normalize(apiresources.NewCmdAPIVersions(f, streams)))
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/login/error_translation.go b/vendor/github.com/openshift/oc/pkg/cli/login/error_translation.go
deleted file mode 100644
index 7f03d73f1447..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/login/error_translation.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package login
-
-import (
-	"crypto/x509"
-	"errors"
-	"fmt"
-	"strings"
-)
-
-const (
-	unknownReason = iota
-	noServerFoundReason
-	certificateAuthorityUnknownReason
-	certificateHostnameErrorReason
-	certificateInvalidReason
-	tlsOversizedRecordReason
-
-	certificateAuthorityUnknownMsg = "The server uses a certificate signed by unknown authority. You may need to use the --certificate-authority flag to provide the path to a certificate file for the certificate authority, or --insecure-skip-tls-verify to bypass the certificate check and use insecure connections."
-	notConfiguredMsg               = `The client is not configured. You need to run the login command in order to create a default config for your server and credentials:
-  oc login
-You can also run this command again providing the path to a config file directly, either through the --config flag of the KUBECONFIG environment variable.
-`
-	tlsOversizedRecordMsg = `Unable to connect to %[2]s using TLS: %[1]s.
-Ensure the specified server supports HTTPS.`
-)
-
-// GetPrettyMessageForServer prettifys the message of the provided error
-func getPrettyMessageForServer(err error, serverName string) string {
-	if err == nil {
-		return ""
-	}
-
-	reason := detectReason(err)
-
-	switch reason {
-	case noServerFoundReason:
-		return notConfiguredMsg
-
-	case certificateAuthorityUnknownReason:
-		return certificateAuthorityUnknownMsg
-
-	case tlsOversizedRecordReason:
-		if len(serverName) == 0 {
-			serverName = "server"
-		}
-		return fmt.Sprintf(tlsOversizedRecordMsg, err, serverName)
-
-	case certificateHostnameErrorReason:
-		return fmt.Sprintf("The server is using a certificate that does not match its hostname: %s", err)
-
-	case certificateInvalidReason:
-		return fmt.Sprintf("The server is using an invalid certificate: %s", err)
-	}
-
-	return err.Error()
-}
-
-// GetPrettyErrorForServer prettifys the message of the provided error
-func getPrettyErrorForServer(err error, serverName string) error {
-	return errors.New(getPrettyMessageForServer(err, serverName))
-}
-
-func detectReason(err error) int {
-	if err != nil {
-		switch {
-		case strings.Contains(err.Error(), "certificate signed by unknown authority"):
-			return certificateAuthorityUnknownReason
-		case strings.Contains(err.Error(), "no server defined"):
-			return noServerFoundReason
-		case strings.Contains(err.Error(), "tls: oversized record received"):
-			return tlsOversizedRecordReason
-		}
-		switch err.(type) {
-		case x509.UnknownAuthorityError:
-			return certificateAuthorityUnknownReason
-		case x509.HostnameError:
-			return certificateHostnameErrorReason
-		case x509.CertificateInvalidError:
-			return certificateInvalidReason
-		}
-	}
-	return unknownReason
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/login/helpers.go b/vendor/github.com/openshift/oc/pkg/cli/login/helpers.go
deleted file mode 100644
index 8f222a66c452..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/login/helpers.go
+++ /dev/null
@@ -1,109 +0,0 @@
-package login
-
-import (
-	"bytes"
-	"crypto/x509"
-	"fmt"
-	"io"
-	"net/http"
-	"net/url"
-	"os"
-
-	"k8s.io/apimachinery/pkg/util/sets"
-	restclient "k8s.io/client-go/rest"
-	clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
-	kclientcmdapi "k8s.io/client-go/tools/clientcmd/api"
-	kterm "k8s.io/kubernetes/pkg/kubectl/util/term"
-
-	"github.com/openshift/oc/pkg/helpers/term"
-)
-
-// getMatchingClusters examines the kubeconfig for all clusters that point to the same server
-func getMatchingClusters(clientConfig restclient.Config, kubeconfig clientcmdapi.Config) sets.String {
-	ret := sets.String{}
-
-	for key, cluster := range kubeconfig.Clusters {
-		if (cluster.Server == clientConfig.Host) && (cluster.InsecureSkipTLSVerify == clientConfig.Insecure) && (cluster.CertificateAuthority == clientConfig.CAFile) && (bytes.Compare(cluster.CertificateAuthorityData, clientConfig.CAData) == 0) {
-			ret.Insert(key)
-		}
-	}
-
-	return ret
-}
-
-// findExistingClientCA returns *either* the existing client CA file name as a string,
-// *or* data in a []byte for a given host, and true if it exists in the given config
-func findExistingClientCA(host string, kubeconfig clientcmdapi.Config) (string, []byte, bool) {
-	for _, cluster := range kubeconfig.Clusters {
-		if cluster.Server == host {
-			if len(cluster.CertificateAuthority) > 0 {
-				return cluster.CertificateAuthority, nil, true
-			}
-			if len(cluster.CertificateAuthorityData) > 0 {
-				return "", cluster.CertificateAuthorityData, true
-			}
-		}
-	}
-	return "", nil, false
-}
-
-// dialToServer takes the Server URL from the given clientConfig and dials to
-// make sure the server is reachable. Note the config received is not mutated.
-func dialToServer(clientConfig restclient.Config) error {
-	// take a RoundTripper based on the config we already have (TLS, proxies, etc)
-	rt, err := restclient.TransportFor(&clientConfig)
-	if err != nil {
-		return err
-	}
-
-	parsedURL, err := url.Parse(clientConfig.Host)
-	if err != nil {
-		return err
-	}
-
-	// Do a HEAD request to serverPathToDial to make sure the server is alive.
-	// We don't care about the response, any err != nil is valid for the sake of reachability.
-	serverURLToDial := (&url.URL{Scheme: parsedURL.Scheme, Host: parsedURL.Host, Path: "/"}).String()
-	req, err := http.NewRequest(http.MethodHead, serverURLToDial, nil)
-	if err != nil {
-		return err
-	}
-
-	res, err := rt.RoundTrip(req)
-	if err != nil {
-		return err
-	}
-
-	defer res.Body.Close()
-	return nil
-}
-
-func promptForInsecureTLS(reader io.Reader, out io.Writer, reason error) bool {
-	var insecureTLSRequestReason string
-	if reason != nil {
-		switch reason.(type) {
-		case x509.UnknownAuthorityError:
-			insecureTLSRequestReason = "The server uses a certificate signed by an unknown authority."
-		case x509.HostnameError:
-			insecureTLSRequestReason = fmt.Sprintf("The server is using a certificate that does not match its hostname: %s", reason.Error())
-		case x509.CertificateInvalidError:
-			insecureTLSRequestReason = fmt.Sprintf("The server is using an invalid certificate: %s", reason.Error())
-		}
-	}
-	var input bool
-	if kterm.IsTerminal(reader) {
-		if len(insecureTLSRequestReason) > 0 {
-			fmt.Fprintln(out, insecureTLSRequestReason)
-		}
-		fmt.Fprintln(out, "You can bypass the certificate check, but any data you send to the server could be intercepted by others.")
-		input = term.PromptForBool(os.Stdin, out, "Use insecure connections? (y/n): ")
-		fmt.Fprintln(out)
-	}
-	return input
-}
-
-func hasExistingInsecureCluster(clientConfigToTest restclient.Config, kubeconfig kclientcmdapi.Config) bool {
-	clientConfigToTest.Insecure = true
-	matchingClusters := getMatchingClusters(clientConfigToTest, kubeconfig)
-	return len(matchingClusters) > 0
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/login/login.go b/vendor/github.com/openshift/oc/pkg/cli/login/login.go
deleted file mode 100644
index b36c41b6f7a7..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/login/login.go
+++ /dev/null
@@ -1,188 +0,0 @@
-package login
-
-import (
-	"errors"
-	"fmt"
-	"net/url"
-	"os"
-
-	"github.com/spf13/cobra"
-
-	kapierrors "k8s.io/apimachinery/pkg/api/errors"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	kclientcmd "k8s.io/client-go/tools/clientcmd"
-	kclientcmdapi "k8s.io/client-go/tools/clientcmd/api"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-	"k8s.io/kubernetes/pkg/kubectl/util/term"
-
-	"github.com/openshift/oc/pkg/helpers/flagtypes"
-	kubeconfiglib "github.com/openshift/oc/pkg/helpers/kubeconfig"
-)
-
-var (
-	loginLong = templates.LongDesc(`
-		Log in to your server and save login for subsequent use
-
-		First-time users of the client should run this command to connect to a server,
-		establish an authenticated session, and save connection to the configuration file. The
-		default configuration will be saved to your home directory under
-		".kube/config".
-
-		The information required to login -- like username and password, a session token, or
-		the server details -- can be provided through flags. If not provided, the command will
-		prompt for user input as needed.`)
-
-	loginExample = templates.Examples(`
-		# Log in interactively
-	  %[1]s login
-
-	  # Log in to the given server with the given certificate authority file
-	  %[1]s login localhost:8443 --certificate-authority=/path/to/cert.crt
-
-	  # Log in to the given server with the given credentials (will not prompt interactively)
-	  %[1]s login localhost:8443 --username=myuser --password=mypass`)
-)
-
-// NewCmdLogin implements the OpenShift cli login command
-func NewCmdLogin(fullName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	o := NewLoginOptions(streams)
-	cmds := &cobra.Command{
-		Use:     "login [URL]",
-		Short:   "Log in to a server",
-		Long:    loginLong,
-		Example: fmt.Sprintf(loginExample, fullName),
-		Run: func(cmd *cobra.Command, args []string) {
-			kcmdutil.CheckErr(o.Complete(f, cmd, args, fullName))
-			kcmdutil.CheckErr(o.Validate(cmd, kcmdutil.GetFlagString(cmd, "server"), args))
-
-			if err := o.Run(); kapierrors.IsUnauthorized(err) {
-				fmt.Fprintln(streams.Out, "Login failed (401 Unauthorized)")
-				fmt.Fprintln(streams.Out, "Verify you have provided correct credentials.")
-
-				if err, isStatusErr := err.(*kapierrors.StatusError); isStatusErr {
-					if details := err.Status().Details; details != nil {
-						for _, cause := range details.Causes {
-							fmt.Fprintln(streams.Out, cause.Message)
-						}
-					}
-				}
-
-				os.Exit(1)
-
-			} else {
-				kcmdutil.CheckErr(err)
-			}
-		},
-	}
-
-	// Login is the only command that can negotiate a session token against the auth server using basic auth
-	cmds.Flags().StringVarP(&o.Username, "username", "u", o.Username, "Username, will prompt if not provided")
-	cmds.Flags().StringVarP(&o.Password, "password", "p", o.Password, "Password, will prompt if not provided")
-
-	return cmds
-}
-
-func (o *LoginOptions) Complete(f kcmdutil.Factory, cmd *cobra.Command, args []string, commandName string) error {
-	kubeconfig, err := f.ToRawKubeConfigLoader().RawConfig()
-	o.StartingKubeConfig = &kubeconfig
-	if err != nil {
-		if !os.IsNotExist(err) {
-			return err
-		}
-		// build a valid object to use if we failed on a non-existent file
-		o.StartingKubeConfig = kclientcmdapi.NewConfig()
-	}
-
-	unparsedTimeout := kcmdutil.GetFlagString(cmd, "request-timeout")
-	timeout, err := kclientcmd.ParseTimeout(unparsedTimeout)
-	if err != nil {
-		return err
-	}
-	o.RequestTimeout = timeout
-
-	o.CommandName = commandName
-	if o.CommandName == "" {
-		o.CommandName = "oc"
-	}
-
-	parsedDefaultClusterURL, err := url.Parse(defaultClusterURL)
-	if err != nil {
-		return err
-	}
-	addr := flagtypes.Addr{Value: parsedDefaultClusterURL.Host, DefaultScheme: parsedDefaultClusterURL.Scheme, AllowPrefix: true}.Default()
-
-	if serverFlag := kcmdutil.GetFlagString(cmd, "server"); len(serverFlag) > 0 {
-		if err := addr.Set(serverFlag); err != nil {
-			return err
-		}
-		o.Server = addr.String()
-
-	} else if len(args) == 1 {
-		if err := addr.Set(args[0]); err != nil {
-			return err
-		}
-		o.Server = addr.String()
-
-	} else if len(o.Server) == 0 {
-		if defaultContext, defaultContextExists := o.StartingKubeConfig.Contexts[o.StartingKubeConfig.CurrentContext]; defaultContextExists {
-			if cluster, exists := o.StartingKubeConfig.Clusters[defaultContext.Cluster]; exists {
-				o.Server = cluster.Server
-			}
-		}
-	}
-
-	o.CertFile = kcmdutil.GetFlagString(cmd, "client-certificate")
-	o.KeyFile = kcmdutil.GetFlagString(cmd, "client-key")
-
-	o.CAFile = kcmdutil.GetFlagString(cmd, "certificate-authority")
-	o.InsecureTLS = kcmdutil.GetFlagBool(cmd, "insecure-skip-tls-verify")
-	o.Token = kcmdutil.GetFlagString(cmd, "token")
-
-	o.DefaultNamespace, _, _ = f.ToRawKubeConfigLoader().Namespace()
-
-	o.PathOptions = kubeconfiglib.NewPathOptions(cmd)
-
-	return nil
-}
-
-func (o LoginOptions) Validate(cmd *cobra.Command, serverFlag string, args []string) error {
-	if len(args) > 1 {
-		return errors.New("Only the server URL may be specified as an argument")
-	}
-
-	if (len(serverFlag) > 0) && (len(args) == 1) {
-		return errors.New("--server and passing the server URL as an argument are mutually exclusive")
-	}
-
-	if (len(o.Server) == 0) && !term.IsTerminal(o.In) {
-		return errors.New("A server URL must be specified")
-	}
-
-	if len(o.Username) > 0 && len(o.Token) > 0 {
-		return errors.New("--token and --username are mutually exclusive")
-	}
-
-	if o.StartingKubeConfig == nil {
-		return errors.New("Must have a config file already created")
-	}
-
-	return nil
-}
-
-// RunLogin contains all the necessary functionality for the OpenShift cli login command
-func (o LoginOptions) Run() error {
-	if err := o.GatherInfo(); err != nil {
-		return err
-	}
-
-	newFileCreated, err := o.SaveConfig()
-	if err != nil {
-		return err
-	}
-
-	if newFileCreated {
-		fmt.Fprintf(o.Out, "Welcome! See '%s help' to get started.\n", o.CommandName)
-	}
-	return nil
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/login/loginoptions.go b/vendor/github.com/openshift/oc/pkg/cli/login/loginoptions.go
deleted file mode 100644
index b5dae1ad3de6..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/login/loginoptions.go
+++ /dev/null
@@ -1,417 +0,0 @@
-package login
-
-import (
-	"bytes"
-	"crypto/tls"
-	"crypto/x509"
-	"fmt"
-	"net"
-	"os"
-	"path/filepath"
-	"time"
-
-	kerrors "k8s.io/apimachinery/pkg/api/errors"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/util/sets"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	restclient "k8s.io/client-go/rest"
-	kclientcmd "k8s.io/client-go/tools/clientcmd"
-	kclientcmdapi "k8s.io/client-go/tools/clientcmd/api"
-	kterm "k8s.io/kubernetes/pkg/kubectl/util/term"
-
-	userv1 "github.com/openshift/api/user/v1"
-	projectv1typedclient "github.com/openshift/client-go/project/clientset/versioned/typed/project/v1"
-	"github.com/openshift/oc/pkg/helpers/errors"
-	cliconfig "github.com/openshift/oc/pkg/helpers/kubeconfig"
-	"github.com/openshift/oc/pkg/helpers/originkubeconfignames"
-	"github.com/openshift/oc/pkg/helpers/project"
-	loginutil "github.com/openshift/oc/pkg/helpers/project"
-	"github.com/openshift/oc/pkg/helpers/term"
-	"github.com/openshift/oc/pkg/helpers/tokencmd"
-)
-
-const defaultClusterURL = "https://localhost:8443"
-
-const projectsItemsSuppressThreshold = 50
-
-// LoginOptions is a helper for the login and setup process, gathers all information required for a
-// successful login and eventual update of config files.
-// Depending on the Reader present it can be interactive, asking for terminal input in
-// case of any missing information.
-// Notice that some methods mutate this object so it should not be reused. The Config
-// provided as a pointer will also mutate (handle new auth tokens, etc).
-type LoginOptions struct {
-	Server      string
-	CAFile      string
-	InsecureTLS bool
-
-	// flags and printing helpers
-	Username string
-	Password string
-	Project  string
-
-	// infra
-	StartingKubeConfig *kclientcmdapi.Config
-	DefaultNamespace   string
-	Config             *restclient.Config
-
-	// cert data to be used when authenticating
-	CertFile string
-	KeyFile  string
-
-	Token string
-
-	PathOptions *kclientcmd.PathOptions
-
-	CommandName    string
-	RequestTimeout time.Duration
-
-	genericclioptions.IOStreams
-}
-
-func NewLoginOptions(streams genericclioptions.IOStreams) *LoginOptions {
-	return &LoginOptions{
-		IOStreams: streams,
-	}
-}
-
-// Gather all required information in a comprehensive order.
-func (o *LoginOptions) GatherInfo() error {
-	if err := o.gatherAuthInfo(); err != nil {
-		return err
-	}
-	if err := o.gatherProjectInfo(); err != nil {
-		return err
-	}
-	return nil
-}
-
-// getClientConfig returns back the current clientConfig as we know it.  If there is no clientConfig, it builds one with enough information
-// to talk to a server.  This may involve user prompts.  This method is not threadsafe.
-func (o *LoginOptions) getClientConfig() (*restclient.Config, error) {
-	if o.Config != nil {
-		return o.Config, nil
-	}
-
-	if len(o.Server) == 0 {
-		// we need to have a server to talk to
-		if kterm.IsTerminal(o.In) {
-			for !o.serverProvided() {
-				defaultServer := defaultClusterURL
-				promptMsg := fmt.Sprintf("Server [%s]: ", defaultServer)
-				o.Server = term.PromptForStringWithDefault(o.In, o.Out, defaultServer, promptMsg)
-			}
-		}
-	}
-
-	clientConfig := &restclient.Config{}
-
-	// ensure clientConfig has timeout option
-	if o.RequestTimeout > 0 {
-		clientConfig.Timeout = o.RequestTimeout
-	}
-
-	// normalize the provided server to a format expected by config
-	serverNormalized, err := originkubeconfignames.NormalizeServerURL(o.Server)
-	if err != nil {
-		return nil, err
-	}
-	o.Server = serverNormalized
-	clientConfig.Host = o.Server
-
-	// use specified CA or find existing CA
-	if len(o.CAFile) > 0 {
-		clientConfig.CAFile = o.CAFile
-		clientConfig.CAData = nil
-	} else if caFile, caData, ok := findExistingClientCA(clientConfig.Host, *o.StartingKubeConfig); ok {
-		clientConfig.CAFile = caFile
-		clientConfig.CAData = caData
-	}
-
-	// try to TCP connect to the server to make sure it's reachable, and discover
-	// about the need of certificates or insecure TLS
-	if err := dialToServer(*clientConfig); err != nil {
-		switch err.(type) {
-		// certificate authority unknown, check or prompt if we want an insecure
-		// connection or if we already have a cluster stanza that tells us to
-		// connect to this particular server insecurely
-		case x509.UnknownAuthorityError, x509.HostnameError, x509.CertificateInvalidError:
-			if o.InsecureTLS ||
-				hasExistingInsecureCluster(*clientConfig, *o.StartingKubeConfig) ||
-				promptForInsecureTLS(o.In, o.Out, err) {
-				clientConfig.Insecure = true
-				clientConfig.CAFile = ""
-				clientConfig.CAData = nil
-			} else {
-				return nil, getPrettyErrorForServer(err, o.Server)
-			}
-		// TLS record header errors, like oversized record which usually means
-		// the server only supports "http"
-		case tls.RecordHeaderError:
-			return nil, getPrettyErrorForServer(err, o.Server)
-		default:
-			if _, ok := err.(*net.OpError); ok {
-				return nil, fmt.Errorf("%v - verify you have provided the correct host and port and that the server is currently running.", err)
-			}
-			return nil, err
-		}
-	}
-
-	o.Config = clientConfig
-
-	return o.Config, nil
-}
-
-// Negotiate a bearer token with the auth server, or try to reuse one based on the
-// information already present. In case of any missing information, ask for user input
-// (usually username and password, interactive depending on the Reader).
-func (o *LoginOptions) gatherAuthInfo() error {
-	directClientConfig, err := o.getClientConfig()
-	if err != nil {
-		return err
-	}
-
-	// make a copy and use it to avoid mutating the original
-	t := *directClientConfig
-	clientConfig := &t
-
-	// if a token were explicitly provided, try to use it
-	if o.tokenProvided() {
-		clientConfig.BearerToken = o.Token
-		if me, err := project.WhoAmI(clientConfig); err == nil {
-			o.Username = me.Name
-			o.Config = clientConfig
-
-			fmt.Fprintf(o.Out, "Logged into %q as %q using the token provided.\n\n", o.Config.Host, o.Username)
-			return nil
-
-		} else {
-			if kerrors.IsUnauthorized(err) {
-				return fmt.Errorf("The token provided is invalid or expired.\n\n")
-			}
-
-			return err
-		}
-	}
-
-	// if a username was provided try to make use of it, but if a password were provided we force a token
-	// request which will return a proper response code for that given password
-	if o.usernameProvided() && !o.passwordProvided() {
-		// search all valid contexts with matching server stanzas to see if we have a matching user stanza
-		kubeconfig := *o.StartingKubeConfig
-		matchingClusters := getMatchingClusters(*clientConfig, kubeconfig)
-
-		for key, context := range o.StartingKubeConfig.Contexts {
-			if matchingClusters.Has(context.Cluster) {
-				clientcmdConfig := kclientcmd.NewDefaultClientConfig(kubeconfig, &kclientcmd.ConfigOverrides{CurrentContext: key})
-				if kubeconfigClientConfig, err := clientcmdConfig.ClientConfig(); err == nil {
-					if me, err := project.WhoAmI(kubeconfigClientConfig); err == nil && (o.Username == me.Name) {
-						clientConfig.BearerToken = kubeconfigClientConfig.BearerToken
-						clientConfig.CertFile = kubeconfigClientConfig.CertFile
-						clientConfig.CertData = kubeconfigClientConfig.CertData
-						clientConfig.KeyFile = kubeconfigClientConfig.KeyFile
-						clientConfig.KeyData = kubeconfigClientConfig.KeyData
-
-						o.Config = clientConfig
-
-						fmt.Fprintf(o.Out, "Logged into %q as %q using existing credentials.\n\n", o.Config.Host, o.Username)
-
-						return nil
-					}
-				}
-			}
-		}
-	}
-
-	// if kubeconfig doesn't already have a matching user stanza...
-	clientConfig.BearerToken = ""
-	clientConfig.CertData = []byte{}
-	clientConfig.KeyData = []byte{}
-	clientConfig.CertFile = o.CertFile
-	clientConfig.KeyFile = o.KeyFile
-	token, err := tokencmd.RequestToken(o.Config, o.In, o.Username, o.Password)
-	if err != nil {
-		return err
-	}
-	clientConfig.BearerToken = token
-
-	me, err := project.WhoAmI(clientConfig)
-	if err != nil {
-		return err
-	}
-	o.Username = me.Name
-	o.Config = clientConfig
-	fmt.Fprint(o.Out, "Login successful.\n\n")
-
-	return nil
-}
-
-// Discover the projects available for the established session and take one to use. It
-// fails in case of no existing projects, and print out useful information in case of
-// multiple projects.
-// Requires o.Username to be set.
-func (o *LoginOptions) gatherProjectInfo() error {
-	me, err := o.whoAmI()
-	if err != nil {
-		return err
-	}
-
-	if o.Username != me.Name {
-		return fmt.Errorf("current user, %v, does not match expected user %v", me.Name, o.Username)
-	}
-
-	projectClient, err := projectv1typedclient.NewForConfig(o.Config)
-	if err != nil {
-		return err
-	}
-
-	projectsList, err := projectClient.Projects().List(metav1.ListOptions{})
-	// if we're running on kube (or likely kube), just set it to "default"
-	if kerrors.IsNotFound(err) || kerrors.IsForbidden(err) {
-		fmt.Fprintf(o.Out, "Using \"default\".  You can switch projects with:\n\n '%s project '\n", o.CommandName)
-		o.Project = "default"
-		return nil
-	}
-	if err != nil {
-		return err
-	}
-
-	projectsItems := projectsList.Items
-	projects := sets.String{}
-	for _, project := range projectsItems {
-		projects.Insert(project.Name)
-	}
-
-	if len(o.DefaultNamespace) > 0 && !projects.Has(o.DefaultNamespace) {
-		// Attempt a direct get of our current project in case it hasn't appeared in the list yet
-		if currentProject, err := projectClient.Projects().Get(o.DefaultNamespace, metav1.GetOptions{}); err == nil {
-			// If we get it successfully, add it to the list
-			projectsItems = append(projectsItems, *currentProject)
-			projects.Insert(currentProject.Name)
-		}
-	}
-
-	switch len(projectsItems) {
-	case 0:
-		canRequest, err := loginutil.CanRequestProjects(o.Config, o.DefaultNamespace)
-		if err != nil {
-			return err
-		}
-		msg := errors.NoProjectsExistMessage(canRequest, o.CommandName)
-		fmt.Fprintf(o.Out, msg)
-		o.Project = ""
-
-	case 1:
-		o.Project = projectsItems[0].Name
-		fmt.Fprintf(o.Out, "You have one project on this server: %q\n\n", o.Project)
-		fmt.Fprintf(o.Out, "Using project %q.\n", o.Project)
-
-	default:
-		namespace := o.DefaultNamespace
-		if !projects.Has(namespace) {
-			if namespace != metav1.NamespaceDefault && projects.Has(metav1.NamespaceDefault) {
-				namespace = metav1.NamespaceDefault
-			} else {
-				namespace = projects.List()[0]
-			}
-		}
-
-		current, err := projectClient.Projects().Get(namespace, metav1.GetOptions{})
-		if err != nil && !kerrors.IsNotFound(err) && !kerrors.IsForbidden(err) {
-			return err
-		}
-		o.Project = current.Name
-
-		// Suppress project listing if the number of projects available to the user is greater than the threshold. Prevents unnecessarily noisy logins on clusters with large numbers of projects
-		if len(projectsItems) > projectsItemsSuppressThreshold {
-			fmt.Fprintf(o.Out, "You have access to %d projects, the list has been suppressed. You can list all projects with '%s projects'\n\n", len(projectsItems), o.CommandName)
-		} else {
-			fmt.Fprintf(o.Out, "You have access to the following projects and can switch between them with '%s project ':\n\n", o.CommandName)
-			for _, p := range projects.List() {
-				if o.Project == p {
-					fmt.Fprintf(o.Out, "  * %s\n", p)
-				} else {
-					fmt.Fprintf(o.Out, "    %s\n", p)
-				}
-			}
-			fmt.Fprintln(o.Out)
-		}
-		fmt.Fprintf(o.Out, "Using project %q.\n", o.Project)
-	}
-
-	return nil
-}
-
-// Save all the information present in this helper to a config file. An explicit config
-// file path can be provided, if not use the established conventions about config
-// loading rules. Will create a new config file if one can't be found at all. Will only
-// succeed if all required info is present.
-func (o *LoginOptions) SaveConfig() (bool, error) {
-	if len(o.Username) == 0 {
-		return false, fmt.Errorf("Insufficient data to merge configuration.")
-	}
-
-	globalExistedBefore := true
-	if _, err := os.Stat(o.PathOptions.GlobalFile); os.IsNotExist(err) {
-		globalExistedBefore = false
-	}
-
-	newConfig, err := cliconfig.CreateConfig(o.Project, o.Config)
-	if err != nil {
-		return false, err
-	}
-
-	cwd, err := os.Getwd()
-	if err != nil {
-		return false, err
-	}
-	baseDir, err := kclientcmdapi.MakeAbs(filepath.Dir(o.PathOptions.GetDefaultFilename()), cwd)
-	if err != nil {
-		return false, err
-	}
-	if err := cliconfig.RelativizeClientConfigPaths(newConfig, baseDir); err != nil {
-		return false, err
-	}
-
-	configToWrite, err := cliconfig.MergeConfig(*o.StartingKubeConfig, *newConfig)
-	if err != nil {
-		return false, err
-	}
-
-	if err := kclientcmd.ModifyConfig(o.PathOptions, *configToWrite, true); err != nil {
-		if !os.IsPermission(err) {
-			return false, err
-		}
-
-		out := &bytes.Buffer{}
-		fmt.Fprintf(out, errors.ErrKubeConfigNotWriteable(o.PathOptions.GetDefaultFilename(), o.PathOptions.IsExplicitFile(), err).Error())
-		return false, fmt.Errorf("%v", out)
-	}
-
-	created := false
-	if _, err := os.Stat(o.PathOptions.GlobalFile); err == nil {
-		created = created || !globalExistedBefore
-	}
-
-	return created, nil
-}
-
-func (o LoginOptions) whoAmI() (*userv1.User, error) {
-	return project.WhoAmI(o.Config)
-}
-
-func (o *LoginOptions) usernameProvided() bool {
-	return len(o.Username) > 0
-}
-
-func (o *LoginOptions) passwordProvided() bool {
-	return len(o.Password) > 0
-}
-
-func (o *LoginOptions) serverProvided() bool {
-	return (len(o.Server) > 0)
-}
-
-func (o *LoginOptions) tokenProvided() bool {
-	return len(o.Token) > 0
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/login/loginoptions_test.go b/vendor/github.com/openshift/oc/pkg/cli/login/loginoptions_test.go
deleted file mode 100644
index 916d7c16ed02..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/login/loginoptions_test.go
+++ /dev/null
@@ -1,384 +0,0 @@
-package login
-
-import (
-	"crypto/tls"
-	"encoding/json"
-	"fmt"
-	"net/http"
-	"net/http/httptest"
-	"regexp"
-	"testing"
-
-	"github.com/MakeNowJust/heredoc"
-
-	"github.com/openshift/library-go/pkg/oauth/oauthdiscovery"
-	"github.com/openshift/oc/pkg/helpers/originkubeconfignames"
-
-	kapierrs "k8s.io/apimachinery/pkg/api/errors"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	restclient "k8s.io/client-go/rest"
-	kclientcmdapi "k8s.io/client-go/tools/clientcmd/api"
-)
-
-const (
-	oauthMetadataEndpoint = "/.well-known/oauth-authorization-server"
-)
-
-func TestNormalizeServerURL(t *testing.T) {
-	testCases := []struct {
-		originalServerURL   string
-		normalizedServerURL string
-	}{
-		{
-			originalServerURL:   "localhost",
-			normalizedServerURL: "https://localhost:443",
-		},
-		{
-			originalServerURL:   "https://localhost",
-			normalizedServerURL: "https://localhost:443",
-		},
-		{
-			originalServerURL:   "localhost:443",
-			normalizedServerURL: "https://localhost:443",
-		},
-		{
-			originalServerURL:   "https://localhost:443",
-			normalizedServerURL: "https://localhost:443",
-		},
-		{
-			originalServerURL:   "http://localhost",
-			normalizedServerURL: "http://localhost:80",
-		},
-		{
-			originalServerURL:   "localhost:8443",
-			normalizedServerURL: "https://localhost:8443",
-		},
-	}
-
-	for _, test := range testCases {
-		t.Logf("evaluating test: normalize %s -> %s", test.originalServerURL, test.normalizedServerURL)
-		normalized, err := originkubeconfignames.NormalizeServerURL(test.originalServerURL)
-		if err != nil {
-			t.Errorf("unexpected error normalizing %s: %s", test.originalServerURL, err)
-		}
-		if normalized != test.normalizedServerURL {
-			t.Errorf("unexpected server URL normalization result for %s: expected %s, got %s", test.originalServerURL, test.normalizedServerURL, normalized)
-		}
-	}
-}
-
-func TestTLSWithCertificateNotMatchingHostname(t *testing.T) {
-	// generated by 'go run src/crypto/tls/generate_cert.go --rsa-bits 1024 --host invalidhost.com,8.8.8.8 --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h'
-	invalidHostCert := heredoc.Doc(`
-		-----BEGIN CERTIFICATE-----
-		MIICBjCCAW+gAwIBAgIRALOIWXyeLzunaiVkP2itHAEwDQYJKoZIhvcNAQELBQAw
-		EjEQMA4GA1UEChMHQWNtZSBDbzAgFw03MDAxMDEwMDAwMDBaGA8yMDg0MDEyOTE2
-		MDAwMFowEjEQMA4GA1UEChMHQWNtZSBDbzCBnzANBgkqhkiG9w0BAQEFAAOBjQAw
-		gYkCgYEAuKDlC4aMBbHaXgS+MFud5h3zeE4boSqKgFI6HceySF/a+qg0v+ID6EwQ
-		DpJ2W5AdJGEBfixo+tym6q3oKWHJUX0hInkJ6dXIdUbVOeO5dIsGG0fZmRD7DDDx
-		snkXrDB/E0JglHNckRbIh/jvznbDfbddIcdgZ7JVIfnNpigtHZECAwEAAaNaMFgw
-		DgYDVR0PAQH/BAQDAgKkMBMGA1UdJQQMMAoGCCsGAQUFBwMBMA8GA1UdEwEB/wQF
-		MAMBAf8wIAYDVR0RBBkwF4IPaW52YWxpZGhvc3QuY29thwQICAgIMA0GCSqGSIb3
-		DQEBCwUAA4GBAAkPU044aFkBl4f/muwSh/oPGinnA4fp8ei0KMnLk+0/CjNb3Waa
-		GtuRVIudRTK2M/RzdpUrwfWlVmkezV4BR1K/aOH9a29zqDTkEjnkIbWwe+piAs+w
-		VxIxrTqM8rqq8qxeWS54AyF/OaLJgXzDpCFnCb7kY3iyHv6lcmCjluLW
-		-----END CERTIFICATE-----`)
-	invalidHostKey := heredoc.Doc(`
-		-----BEGIN RSA PRIVATE KEY-----
-		MIICXQIBAAKBgQC4oOULhowFsdpeBL4wW53mHfN4ThuhKoqAUjodx7JIX9r6qDS/
-		4gPoTBAOknZbkB0kYQF+LGj63KbqregpYclRfSEieQnp1ch1RtU547l0iwYbR9mZ
-		EPsMMPGyeResMH8TQmCUc1yRFsiH+O/OdsN9t10hx2BnslUh+c2mKC0dkQIDAQAB
-		AoGAZ0ZAuNC7NFhHEL5QcJZe3aC1Vv9B/0XfkWXtckkJFejggcNjNk5D50Xc2Xnd
-		0NvtITNN9Xj8BA83IyDCM5uqUwDbOLIc6qYgAGWzxZZSDAQg1iOAAZoXmMTNS6Zf
-		hQhNUIwB68ELGvbcq7cxQL7L9n4GfISz7PKOOUKTZp0Q8G0CQQD07K7NES340c3I
-		QVkCW5/ygNK0GuQ8nTcG5yC8R5SDS47N8YzPp17Pajah8+wawYiemY1fUmD7P/bq
-		Cjl2RtIHAkEAwPo1GzJubN7PSYgPir3TxUGtMJoyc3jfdjblXyGJHwTu2YxeRjd2
-		YUPVRpu9JvNjZc+GONvTbTZeNWCvy0JNpwJBAKEsi49JCd6eefOZBTDnCKd1nLKG
-		q8Ezl/2D5WfhFtsbwrrFhOs1cc++Tnte3/VvfC8aTwz2UfmkyyCSX+P0kMsCQCIL
-		glb7/LNEU7mbQXKurq+8OHu8mG36wyGt6aVw2yoXyrOiqfclTcM3HmdIjoRSqBSM
-		Ghfp4FECKHiuSBVJ6z0CQQDF37CRpdQRDPnAedhyApLcIxSbYo1oUm7FxBLyVb7V
-		HQjFvsOylsSCABXz0FyC7zXQxkEo6CiSahVI/PHz6Zta
-		-----END RSA PRIVATE KEY-----`)
-
-	server, err := newTLSServer(invalidHostCert, invalidHostKey)
-	if err != nil {
-		t.Errorf(err.Error())
-	}
-	server.StartTLS()
-	defer server.Close()
-
-	testCases := map[string]struct {
-		serverURL      string
-		skipTLSVerify  bool
-		expectedErrMsg *regexp.Regexp
-	}{
-		"succeed skipping tls": {
-			serverURL:     server.URL,
-			skipTLSVerify: true,
-		},
-		"certificate hostname doesn't match": {
-			serverURL:      server.URL,
-			expectedErrMsg: regexp.MustCompile(`The server is using a certificate that does not match its hostname(.*)is valid for 8\.8\.8\.8`),
-		},
-	}
-
-	for name, test := range testCases {
-		t.Logf("evaluating test: %s", name)
-		options := &LoginOptions{
-			Server:             test.serverURL,
-			InsecureTLS:        test.skipTLSVerify,
-			StartingKubeConfig: &kclientcmdapi.Config{},
-		}
-
-		if _, err = options.getClientConfig(); err != nil {
-			if !test.expectedErrMsg.MatchString(err.Error()) {
-				t.Errorf("%s: expected error %q but got %q", name, test.expectedErrMsg, err)
-			}
-			if test.expectedErrMsg == nil {
-				t.Errorf("%s: unexpected error: %v", name, err)
-			}
-		} else {
-			if test.expectedErrMsg != nil {
-				t.Errorf("%s: expected error but got nothing", name)
-			}
-		}
-	}
-}
-
-func TestTLSWithExpiredCertificate(t *testing.T) {
-	// generated by 'go run src/crypto/tls/generate_cert.go --rsa-bits 1024 --host 127.0.0.1,::1,example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1h'
-	expiredCert := heredoc.Doc(`
-		-----BEGIN CERTIFICATE-----
-		MIICEjCCAXugAwIBAgIRALf82bYpro/jQS8fP74dG5EwDQYJKoZIhvcNAQELBQAw
-		EjEQMA4GA1UEChMHQWNtZSBDbzAeFw03MDAxMDEwMDAwMDBaFw03MDAxMDEwMTAw
-		MDBaMBIxEDAOBgNVBAoTB0FjbWUgQ28wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJ
-		AoGBAONNgDXBk2Q1i/aJjTwt03KpQ3nQblMS3IX/H9JWw6ta6UublKBOaD/2o5Xt
-		FM+Q7XDEnzYw88CK5KHdyejkJo5IBpUjQYJZFzUJ1BC8Lw7yy6dXWYBJboRR1S+1
-		JhkMJOtpPecv+4cTaynplYj0WMBjcQthg2RM7tdpyUYpsp2rAgMBAAGjaDBmMA4G
-		A1UdDwEB/wQEAwICpDATBgNVHSUEDDAKBggrBgEFBQcDATAPBgNVHRMBAf8EBTAD
-		AQH/MC4GA1UdEQQnMCWCC2V4YW1wbGUuY29thwR/AAABhxAAAAAAAAAAAAAAAAAA
-		AAABMA0GCSqGSIb3DQEBCwUAA4GBAFpdiiM5YAQQN0H5ZMNuHWGlprjp7qVilO8/
-		WFePZRWY2vQF8g7/c1cX4bPqG+qFJd+9j2UZNjhadNfMCxvu6BY7NCupOHVHmnRQ
-		ocvkPoSqobE7qDPfiUuU1J+61Libu6b2IjV3/K9pvZkLiBrqn0YhoXXa0PG+rG1L
-		9X7+mb5z
-		-----END CERTIFICATE-----`)
-	expiredKey := heredoc.Doc(`
-		-----BEGIN RSA PRIVATE KEY-----
-		MIICXQIBAAKBgQDjTYA1wZNkNYv2iY08LdNyqUN50G5TEtyF/x/SVsOrWulLm5Sg
-		Tmg/9qOV7RTPkO1wxJ82MPPAiuSh3cno5CaOSAaVI0GCWRc1CdQQvC8O8sunV1mA
-		SW6EUdUvtSYZDCTraT3nL/uHE2sp6ZWI9FjAY3ELYYNkTO7XaclGKbKdqwIDAQAB
-		AoGBAJPFWKqZ9CZboWhfuE/9Qs/yNonE9VRQmMkMOTXXblHCQpUCyjcFgkTDJUpc
-		3QCsKZD8Yr0qSe1M3qJUu+UKHf18LqwiL/ynnalYggxIFS5/SidWCngKvIuEfkLK
-		VsnCK3jt5qx21iljGHU6bQZHnHB9IGEiBYcnQlvvw/WdvRDBAkEA8/KMpJVwnI1W
-		7fzcZ1+mbMeSJoAVIa9u7MgI+LIRZMokDRYeAMvEjm3GYpZDqA5l1dp7KochMep/
-		0vSSTHt7ewJBAO6IbcUIDhXuh2qdxR/Xk5DdDCoxaD1o4ivyj9JsSlGa9JWD7kKN
-		6ZFFrn8i7uQuniC1Rwc/4yHhs6OqbiF695ECQQCBwVKzvFUwwDEr1yK4zXStSZ3g
-		YqJaz4CV63RyK+z6ilaQq2H8FGaRR6yNBdYozre1/0ciAMxUS6H/6Fzk141/AkBe
-		SguqIP8AaGObH3Z2mc65KsfOPe2IqNcOrDlx4mCWVXxtRdN+933mcPcDRpnMFSlo
-		oH/NO9Ha6M8L2SjjjyohAkBJHU61+OWz/TAy1nxsMbFsISLn/JrdEZIf2uFORlDN
-		Z3/XIQ+yeg4Jk1VbTMZ0/fHf9xMFR8acC/7n7jxnzQau
-		-----END RSA PRIVATE KEY-----`)
-
-	server, err := newTLSServer(expiredCert, expiredKey)
-	if err != nil {
-		t.Errorf(err.Error())
-	}
-	server.StartTLS()
-	defer server.Close()
-
-	testCases := map[string]struct {
-		serverURL      string
-		skipTLSVerify  bool
-		expectedErrMsg *regexp.Regexp
-	}{
-		"succeed skipping tls": {
-			serverURL:     server.URL,
-			skipTLSVerify: true,
-		},
-		"certificate expired": {
-			serverURL:      server.URL,
-			expectedErrMsg: regexp.MustCompile(`The server is using an invalid certificate(.*)has expired`),
-		},
-	}
-
-	for name, test := range testCases {
-		t.Logf("evaluating test: %s", name)
-		options := &LoginOptions{
-			Server:             test.serverURL,
-			InsecureTLS:        test.skipTLSVerify,
-			StartingKubeConfig: &kclientcmdapi.Config{},
-		}
-
-		if _, err = options.getClientConfig(); err != nil {
-			if !test.expectedErrMsg.MatchString(err.Error()) {
-				t.Errorf("%s: expected error %q but got %q", name, test.expectedErrMsg, err)
-			}
-			if test.expectedErrMsg == nil {
-				t.Errorf("%s: unexpected error: %v", name, err)
-			}
-		} else {
-			if test.expectedErrMsg != nil {
-				t.Errorf("%s: expected error but got nothing", name)
-			}
-		}
-	}
-}
-
-func TestDialToHTTPServer(t *testing.T) {
-	invoked := make(chan struct{}, 1)
-	server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
-		invoked <- struct{}{}
-		w.WriteHeader(http.StatusOK)
-	}))
-	defer server.Close()
-
-	testCases := map[string]struct {
-		serverURL       string
-		evalExpectedErr func(error) bool
-	}{
-		"succeed dialing": {
-			serverURL: server.URL,
-		},
-	}
-
-	for name, test := range testCases {
-		t.Logf("evaluating test: %s", name)
-		clientConfig := &restclient.Config{
-			Host: test.serverURL,
-		}
-		if err := dialToServer(*clientConfig); err != nil {
-			if test.evalExpectedErr == nil || !test.evalExpectedErr(err) {
-				t.Errorf("%s: unexpected error: %v", name, err)
-			}
-		} else {
-			if test.evalExpectedErr != nil {
-				t.Errorf("%s: expected error but got nothing", name)
-			}
-		}
-	}
-}
-
-type oauthMetadataResponse struct {
-	metadata *oauthdiscovery.OauthAuthorizationServerMetadata
-}
-
-func (r *oauthMetadataResponse) Serialize() ([]byte, error) {
-	b, err := json.Marshal(r.metadata)
-	if err != nil {
-		return []byte{}, err
-	}
-
-	return b, nil
-}
-
-func TestPreserveErrTypeAuthInfo(t *testing.T) {
-	invoked := make(chan struct{}, 3)
-	oauthResponse := []byte{}
-
-	server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
-		select {
-		case invoked <- struct{}{}:
-			t.Logf("saw %s request for path: %s", r.Method, r.URL.String())
-		default:
-			t.Fatalf("unexpected request handled by test server: %v: %v", r.Method, r.URL)
-		}
-
-		if r.URL.Path == oauthMetadataEndpoint {
-			w.WriteHeader(http.StatusOK)
-			w.Write(oauthResponse)
-			return
-		}
-		w.WriteHeader(http.StatusUnauthorized)
-	}))
-	defer server.Close()
-
-	metadataResponse := &oauthMetadataResponse{}
-	metadataResponse.metadata = &oauthdiscovery.OauthAuthorizationServerMetadata{
-		Issuer:                        server.URL,
-		AuthorizationEndpoint:         server.URL + "/oauth/authorize",
-		TokenEndpoint:                 server.URL + "/oauth/token",
-		CodeChallengeMethodsSupported: []string{"plain", "S256"},
-	}
-
-	oauthResponse, err := metadataResponse.Serialize()
-	if err != nil {
-		t.Fatalf("unexpected error: %v", err)
-	}
-
-	options := &LoginOptions{
-		Server:             server.URL,
-		StartingKubeConfig: &kclientcmdapi.Config{},
-		Username:           "test",
-		Password:           "test",
-
-		Config: &restclient.Config{
-			Host: server.URL,
-		},
-
-		IOStreams: genericclioptions.NewTestIOStreamsDiscard(),
-	}
-
-	err = options.gatherAuthInfo()
-	if err == nil {
-		t.Fatalf("expecting unauthorized error when gathering authinfo")
-	}
-
-	if !kapierrs.IsUnauthorized(err) {
-		t.Fatalf("expecting error of type metav1.StatusReasonUnauthorized, but got type %T: %v", err, err)
-	}
-}
-
-func TestDialToHTTPSServer(t *testing.T) {
-	invoked := make(chan struct{}, 1)
-	server := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
-		invoked <- struct{}{}
-		w.WriteHeader(http.StatusOK)
-	}))
-	defer server.Close()
-
-	testCases := map[string]struct {
-		serverURL       string
-		skipTLSVerify   bool
-		evalExpectedErr func(error) bool
-	}{
-		"succeed dialing": {
-			serverURL:     server.URL,
-			skipTLSVerify: true,
-		},
-	}
-
-	for name, test := range testCases {
-		t.Logf("evaluating test: %s", name)
-		clientConfig := &restclient.Config{
-			Host: test.serverURL,
-			TLSClientConfig: restclient.TLSClientConfig{
-				Insecure: test.skipTLSVerify,
-			},
-		}
-		if err := dialToServer(*clientConfig); err != nil {
-			if test.evalExpectedErr == nil || !test.evalExpectedErr(err) {
-				t.Errorf("%s: unexpected error: %v", name, err)
-			}
-		} else {
-			if test.evalExpectedErr != nil {
-				t.Errorf("%s: expected error but got nothing", name)
-			}
-		}
-	}
-}
-
-func newTLSServer(certString, keyString string) (*httptest.Server, error) {
-	invoked := make(chan struct{}, 1)
-	server := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
-		invoked <- struct{}{}
-		w.WriteHeader(http.StatusOK)
-	}))
-	cert, err := tls.X509KeyPair([]byte(certString), []byte(keyString))
-	if err != nil {
-		return nil, fmt.Errorf("error configuring server cert: %s", err)
-	}
-	server.TLS = &tls.Config{
-		Certificates: []tls.Certificate{cert},
-	}
-	return server, nil
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/logout/logout.go b/vendor/github.com/openshift/oc/pkg/cli/logout/logout.go
deleted file mode 100644
index 07b8b5fd7657..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/logout/logout.go
+++ /dev/null
@@ -1,149 +0,0 @@
-package logout
-
-import (
-	"errors"
-	"fmt"
-
-	"github.com/spf13/cobra"
-	"k8s.io/klog"
-
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	restclient "k8s.io/client-go/rest"
-	kclientcmd "k8s.io/client-go/tools/clientcmd"
-	kclientcmdapi "k8s.io/client-go/tools/clientcmd/api"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-
-	oauthv1client "github.com/openshift/client-go/oauth/clientset/versioned/typed/oauth/v1"
-	kubeconfiglib "github.com/openshift/oc/pkg/helpers/kubeconfig"
-	"github.com/openshift/oc/pkg/helpers/project"
-)
-
-type LogoutOptions struct {
-	StartingKubeConfig *kclientcmdapi.Config
-	Config             *restclient.Config
-
-	PathOptions *kclientcmd.PathOptions
-
-	genericclioptions.IOStreams
-}
-
-var (
-	logoutLong = templates.LongDesc(`
-		Log out of the active session out by clearing saved tokens
-
-		An authentication token is stored in the config file after login - this command will delete
-		that token on the server, and then remove the token from the configuration file.
-
-		If you are using an alternative authentication method like Kerberos or client certificates,
-		your ticket or client certificate will not be removed from the current system since these
-		are typically managed by other programs. Instead, you can delete your config file to remove
-		the local copy of that certificate or the record of your server login.
-
-		After logging out, if you want to log back into the server use '%[1]s'.`)
-
-	logoutExample = templates.Examples(`
-	  # Logout
-	  %[1]s`)
-)
-
-func NewLogoutOptions(streams genericclioptions.IOStreams) *LogoutOptions {
-	return &LogoutOptions{
-		IOStreams: streams,
-	}
-}
-
-// NewCmdLogout implements the OpenShift cli logout command
-func NewCmdLogout(name, fullName, ocLoginFullCommand string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	o := NewLogoutOptions(streams)
-	cmds := &cobra.Command{
-		Use:     name,
-		Short:   "End the current server session",
-		Long:    fmt.Sprintf(logoutLong, ocLoginFullCommand),
-		Example: fmt.Sprintf(logoutExample, fullName),
-		Run: func(cmd *cobra.Command, args []string) {
-			kcmdutil.CheckErr(o.Complete(f, cmd, args))
-			kcmdutil.CheckErr(o.Validate(args))
-			kcmdutil.CheckErr(o.RunLogout())
-		},
-	}
-
-	// TODO: support --all which performs the same logic on all users in your config file.
-
-	return cmds
-}
-
-func (o *LogoutOptions) Complete(f kcmdutil.Factory, cmd *cobra.Command, args []string) error {
-	kubeconfig, err := f.ToRawKubeConfigLoader().RawConfig()
-	if err != nil {
-		return err
-	}
-	o.StartingKubeConfig = &kubeconfig
-
-	o.Config, err = f.ToRESTConfig()
-	if err != nil {
-		return err
-	}
-
-	o.PathOptions = kubeconfiglib.NewPathOptions(cmd)
-
-	return nil
-}
-
-func (o LogoutOptions) Validate(args []string) error {
-	if len(args) > 0 {
-		return errors.New("No arguments are allowed")
-	}
-
-	if o.StartingKubeConfig == nil {
-		return errors.New("Must have a config file already created")
-	}
-
-	if len(o.Config.BearerToken) == 0 {
-		return errors.New("You must have a token in order to logout.")
-	}
-
-	return nil
-}
-
-func (o LogoutOptions) RunLogout() error {
-	token := o.Config.BearerToken
-
-	client, err := oauthv1client.NewForConfig(o.Config)
-	if err != nil {
-		return err
-	}
-
-	userInfo, err := project.WhoAmI(o.Config)
-	if err != nil {
-		return err
-	}
-
-	if err := client.OAuthAccessTokens().Delete(token, &metav1.DeleteOptions{}); err != nil {
-		klog.V(1).Infof("%v", err)
-	}
-
-	configErr := deleteTokenFromConfig(*o.StartingKubeConfig, o.PathOptions, token)
-	if configErr == nil {
-		klog.V(1).Infof("Removed token from your local configuration.")
-
-		// only return error instead of successful message if removing token from client
-		// config fails. Any error that occurs deleting token using api is logged above.
-		fmt.Fprintf(o.Out, "Logged %q out on %q\n", userInfo.Name, o.Config.Host)
-	}
-
-	return configErr
-}
-
-func deleteTokenFromConfig(config kclientcmdapi.Config, pathOptions *kclientcmd.PathOptions, bearerToken string) error {
-	for key, value := range config.AuthInfos {
-		if value.Token == bearerToken {
-			value.Token = ""
-			config.AuthInfos[key] = value
-			// don't break, its possible that more than one user stanza has the same token.
-		}
-	}
-
-	return kclientcmd.ModifyConfig(pathOptions, config, true)
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/logs/logs.go b/vendor/github.com/openshift/oc/pkg/cli/logs/logs.go
deleted file mode 100644
index b09c27488061..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/logs/logs.go
+++ /dev/null
@@ -1,281 +0,0 @@
-package logs
-
-import (
-	"errors"
-	"fmt"
-
-	"k8s.io/kubernetes/pkg/kubectl/cmd/logs"
-
-	"github.com/spf13/cobra"
-
-	corev1 "k8s.io/api/core/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/runtime"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	"k8s.io/cli-runtime/pkg/resource"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/scheme"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-
-	appsv1 "github.com/openshift/api/apps/v1"
-	buildv1 "github.com/openshift/api/build/v1"
-	buildv1client "github.com/openshift/client-go/build/clientset/versioned/typed/build/v1"
-	buildhelpers "github.com/openshift/oc/pkg/helpers/build"
-)
-
-// LogsRecommendedCommandName is the recommended command name
-// TODO: Probably move this pattern upstream?
-const LogsRecommendedCommandName = "logs"
-
-var (
-	logsLong = templates.LongDesc(`
-		Print the logs for a resource
-
-		Supported resources are builds, build configs (bc), deployment configs (dc), and pods.
-		When a pod is specified and has more than one container, the container name should be
-		specified via -c. When a build config or deployment config is specified, you can view
-		the logs for a particular version of it via --version.
-
-		If your pod is failing to start, you may need to use the --previous option to see the
-		logs of the last attempt.`)
-
-	logsExample = templates.Examples(`
-		# Start streaming the logs of the most recent build of the openldap build config.
-	  %[1]s %[2]s -f bc/openldap
-
-	  # Start streaming the logs of the latest deployment of the mysql deployment config.
-	  %[1]s %[2]s -f dc/mysql
-
-	  # Get the logs of the first deployment for the mysql deployment config. Note that logs
-	  # from older deployments may not exist either because the deployment was successful
-	  # or due to deployment pruning or manual deletion of the deployment.
-	  %[1]s %[2]s --version=1 dc/mysql
-
-	  # Return a snapshot of ruby-container logs from pod backend.
-	  %[1]s %[2]s backend -c ruby-container
-
-	  # Start streaming of ruby-container logs from pod backend.
-	  %[1]s %[2]s -f pod/backend -c ruby-container`)
-)
-
-// LogsOptions holds all the necessary options for running oc logs.
-type LogsOptions struct {
-	// Options should hold our own *LogOptions objects.
-	Options runtime.Object
-	// KubeLogOptions contains all the necessary options for
-	// running the upstream logs command.
-	KubeLogOptions *logs.LogsOptions
-	// Client enables access to the Build object when processing
-	// build logs for Jenkins Pipeline Strategy builds
-	Client buildv1client.BuildV1Interface
-	// Namespace is a required parameter when accessing the Build object when processing
-	// build logs for Jenkins Pipeline Strategy builds
-	Namespace string
-
-	Builder   func() *resource.Builder
-	Resources []string
-
-	Version int64
-
-	genericclioptions.IOStreams
-}
-
-func NewLogsOptions(streams genericclioptions.IOStreams) *LogsOptions {
-	return &LogsOptions{
-		KubeLogOptions: logs.NewLogsOptions(streams, false),
-		IOStreams:      streams,
-	}
-}
-
-// NewCmdLogs creates a new logs command that supports OpenShift resources.
-func NewCmdLogs(name, baseName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	o := NewLogsOptions(streams)
-	cmd := logs.NewCmdLogs(f, streams)
-	cmd.Short = "Print the logs for a resource"
-	cmd.Long = logsLong
-	cmd.Example = fmt.Sprintf(logsExample, baseName, name)
-	cmd.SuggestFor = []string{"builds", "deployments"}
-	cmd.Run = func(cmd *cobra.Command, args []string) {
-		kcmdutil.CheckErr(o.Complete(f, cmd, args))
-		kcmdutil.CheckErr(o.Validate(args))
-		kcmdutil.CheckErr(o.RunLog())
-	}
-
-	cmd.Flags().Int64Var(&o.Version, "version", o.Version, "View the logs of a particular build or deployment by version if greater than zero")
-
-	return cmd
-}
-
-func isPipelineBuild(obj runtime.Object) (bool, *buildv1.BuildConfig, bool, *buildv1.Build, bool) {
-	bc, isBC := obj.(*buildv1.BuildConfig)
-	build, isBld := obj.(*buildv1.Build)
-	isPipeline := false
-	switch {
-	case isBC:
-		isPipeline = bc.Spec.CommonSpec.Strategy.JenkinsPipelineStrategy != nil
-	case isBld:
-		isPipeline = build.Spec.CommonSpec.Strategy.JenkinsPipelineStrategy != nil
-	}
-	return isPipeline, bc, isBC, build, isBld
-}
-
-// Complete calls the upstream Complete for the logs command and then resolves the
-// resource a user requested to view its logs and creates the appropriate logOptions
-// object for it.
-func (o *LogsOptions) Complete(f kcmdutil.Factory, cmd *cobra.Command, args []string) error {
-	// manually bind all flag values from the upstream command
-	// TODO: once the upstream command supports binding flags
-	// by outside callers, this will no longer be needed.
-	o.KubeLogOptions.AllContainers = kcmdutil.GetFlagBool(cmd, "all-containers")
-	o.KubeLogOptions.Container = kcmdutil.GetFlagString(cmd, "container")
-	o.KubeLogOptions.Selector = kcmdutil.GetFlagString(cmd, "selector")
-	o.KubeLogOptions.Follow = kcmdutil.GetFlagBool(cmd, "follow")
-	o.KubeLogOptions.Previous = kcmdutil.GetFlagBool(cmd, "previous")
-	o.KubeLogOptions.Timestamps = kcmdutil.GetFlagBool(cmd, "timestamps")
-	o.KubeLogOptions.SinceTime = kcmdutil.GetFlagString(cmd, "since-time")
-	o.KubeLogOptions.LimitBytes = kcmdutil.GetFlagInt64(cmd, "limit-bytes")
-	o.KubeLogOptions.Tail = kcmdutil.GetFlagInt64(cmd, "tail")
-	o.KubeLogOptions.SinceSeconds = kcmdutil.GetFlagDuration(cmd, "since")
-	o.KubeLogOptions.ContainerNameSpecified = cmd.Flag("container").Changed
-
-	if err := o.KubeLogOptions.Complete(f, cmd, args); err != nil {
-		return err
-	}
-
-	var err error
-	o.KubeLogOptions.GetPodTimeout, err = kcmdutil.GetPodRunningTimeoutFlag(cmd)
-	if err != nil {
-		return err
-	}
-
-	o.Namespace, _, err = f.ToRawKubeConfigLoader().Namespace()
-	if err != nil {
-		return err
-	}
-
-	clientConfig, err := f.ToRESTConfig()
-	if err != nil {
-		return err
-	}
-	o.Client, err = buildv1client.NewForConfig(clientConfig)
-	if err != nil {
-		return err
-	}
-
-	o.Builder = f.NewBuilder
-	o.Resources = args
-
-	return nil
-}
-
-// Validate runs the upstream validation for the logs command and then it
-// will validate any OpenShift-specific log options.
-func (o *LogsOptions) Validate(args []string) error {
-	if err := o.KubeLogOptions.Validate(); err != nil {
-		return err
-	}
-	if o.Options == nil {
-		return nil
-	}
-	switch t := o.Options.(type) {
-	case *buildv1.BuildLogOptions:
-		if t.Previous && t.Version != nil {
-			return errors.New("cannot use both --previous and --version")
-		}
-	case *appsv1.DeploymentLogOptions:
-		if t.Previous && t.Version != nil {
-			return errors.New("cannot use both --previous and --version")
-		}
-	default:
-		return errors.New("invalid log options object provided")
-	}
-	return nil
-}
-
-// RunLog will run the upstream logs command and may use an OpenShift
-// logOptions object.
-func (o *LogsOptions) RunLog() error {
-	podLogOptions := o.KubeLogOptions.Options.(*corev1.PodLogOptions)
-	infos, err := o.Builder().
-		WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...).
-		NamespaceParam(o.Namespace).DefaultNamespace().
-		ResourceNames("pods", o.Resources...).
-		SingleResourceType().RequireObject(false).
-		Do().Infos()
-	if err != nil {
-		return err
-	}
-	if len(infos) != 1 {
-		return errors.New("expected a resource")
-	}
-
-	// TODO: podLogOptions should be included in our own logOptions objects.
-	switch gr := infos[0].Mapping.Resource.GroupResource(); gr {
-	case buildv1.Resource("builds"),
-		buildv1.Resource("buildconfigs"):
-		bopts := &buildv1.BuildLogOptions{
-			Follow:       podLogOptions.Follow,
-			Previous:     podLogOptions.Previous,
-			SinceSeconds: podLogOptions.SinceSeconds,
-			SinceTime:    podLogOptions.SinceTime,
-			Timestamps:   podLogOptions.Timestamps,
-			TailLines:    podLogOptions.TailLines,
-			LimitBytes:   podLogOptions.LimitBytes,
-		}
-		if o.Version != 0 {
-			bopts.Version = &o.Version
-		}
-		o.Options = bopts
-
-	case appsv1.Resource("deploymentconfigs"):
-		dopts := &appsv1.DeploymentLogOptions{
-			Container:    podLogOptions.Container,
-			Follow:       podLogOptions.Follow,
-			Previous:     podLogOptions.Previous,
-			SinceSeconds: podLogOptions.SinceSeconds,
-			SinceTime:    podLogOptions.SinceTime,
-			Timestamps:   podLogOptions.Timestamps,
-			TailLines:    podLogOptions.TailLines,
-			LimitBytes:   podLogOptions.LimitBytes,
-		}
-		if o.Version != 0 {
-			dopts.Version = &o.Version
-		}
-		o.Options = dopts
-	default:
-		o.Options = nil
-	}
-
-	return o.runLogPipeline()
-}
-
-func (o *LogsOptions) runLogPipeline() error {
-	if o.Options != nil {
-		// Use our own options object.
-		o.KubeLogOptions.Options = o.Options
-	}
-	isPipeline, bc, isBC, build, isBld := isPipelineBuild(o.KubeLogOptions.Object)
-	if !isPipeline {
-		return o.KubeLogOptions.RunLogs()
-	}
-
-	switch {
-	case isBC:
-		buildName := buildhelpers.BuildNameForConfigVersion(bc.ObjectMeta.Name, int(bc.Status.LastVersion))
-		build, _ = o.Client.Builds(o.Namespace).Get(buildName, metav1.GetOptions{})
-		if build == nil {
-			return fmt.Errorf("the build %s for build config %s was not found", buildName, bc.Name)
-		}
-		fallthrough
-	case isBld:
-		urlString, _ := build.Annotations[buildv1.BuildJenkinsBlueOceanLogURLAnnotation]
-		if len(urlString) == 0 {
-			return fmt.Errorf("the pipeline strategy build %s does not yet contain the log URL; wait a few moments, then try again", build.Name)
-		}
-		fmt.Fprintf(o.Out, "info: logs available at %s\n", urlString)
-	default:
-		return fmt.Errorf("a pipeline strategy build log operation peformed against invalid object %#v", o.KubeLogOptions.Object)
-	}
-
-	return nil
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/logs/logs_test.go b/vendor/github.com/openshift/oc/pkg/cli/logs/logs_test.go
deleted file mode 100644
index f0ba7b4e55d8..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/logs/logs_test.go
+++ /dev/null
@@ -1,175 +0,0 @@
-package logs
-
-import (
-	"reflect"
-	"strings"
-	"testing"
-
-	"github.com/spf13/pflag"
-
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/runtime"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	"k8s.io/kubernetes/pkg/kubectl/cmd/logs"
-
-	appsv1 "github.com/openshift/api/apps/v1"
-	buildv1 "github.com/openshift/api/build/v1"
-	buildfake "github.com/openshift/client-go/build/clientset/versioned/fake"
-)
-
-// TestLogsFlagParity makes sure that our copied flags don't slip during rebases
-func TestLogsFlagParity(t *testing.T) {
-	streams := genericclioptions.NewTestIOStreamsDiscard()
-	kubeCmd := logs.NewCmdLogs(nil, streams)
-	originCmd := NewCmdLogs("oc", "logs", nil, streams)
-
-	kubeCmd.LocalFlags().VisitAll(func(kubeFlag *pflag.Flag) {
-		originFlag := originCmd.LocalFlags().Lookup(kubeFlag.Name)
-		if originFlag == nil {
-			t.Errorf("missing %v flag", kubeFlag.Name)
-			return
-		}
-
-		if !reflect.DeepEqual(originFlag, kubeFlag) {
-			t.Errorf("flag %v %v does not match %v", kubeFlag.Name, kubeFlag, originFlag)
-		}
-	})
-}
-
-type fakeWriter struct {
-	data []byte
-}
-
-func (f *fakeWriter) Write(p []byte) (n int, err error) {
-	f.data = p
-	return len(p), nil
-}
-
-func TestRunLogForPipelineStrategy(t *testing.T) {
-	bld := buildv1.Build{
-		ObjectMeta: metav1.ObjectMeta{
-			Name:        "foo-0",
-			Namespace:   "foo",
-			Annotations: map[string]string{buildv1.BuildJenkinsBlueOceanLogURLAnnotation: "https://foo"},
-		},
-		Spec: buildv1.BuildSpec{
-			CommonSpec: buildv1.CommonSpec{
-				Strategy: buildv1.BuildStrategy{
-					JenkinsPipelineStrategy: &buildv1.JenkinsPipelineBuildStrategy{},
-				},
-			},
-		},
-	}
-
-	fakebc := buildfake.NewSimpleClientset(&bld)
-	streams, _, out, _ := genericclioptions.NewTestIOStreams()
-
-	testCases := []struct {
-		o runtime.Object
-	}{
-		{
-			o: &bld,
-		},
-		{
-			o: &buildv1.BuildConfig{
-				ObjectMeta: metav1.ObjectMeta{
-					Namespace: "foo",
-					Name:      "foo",
-				},
-				Spec: buildv1.BuildConfigSpec{
-					CommonSpec: buildv1.CommonSpec{
-						Strategy: buildv1.BuildStrategy{
-							JenkinsPipelineStrategy: &buildv1.JenkinsPipelineBuildStrategy{},
-						},
-					},
-				},
-			},
-		},
-	}
-
-	for _, tc := range testCases {
-		o := &LogsOptions{
-			IOStreams: streams,
-			KubeLogOptions: &logs.LogsOptions{
-				IOStreams: streams,
-				Object:    tc.o,
-				Namespace: "foo",
-			},
-			Client: fakebc.BuildV1(),
-		}
-		if err := o.runLogPipeline(); err != nil {
-			t.Errorf("%#v: RunLog error %v", tc.o, err)
-		}
-		if !strings.Contains(out.String(), "https://foo") {
-			t.Errorf("%#v: RunLog did not have https://foo, but rather had: %s", tc.o, out.String())
-		}
-	}
-
-}
-
-func TestIsPipelineBuild(t *testing.T) {
-	testCases := []struct {
-		o          runtime.Object
-		isPipeline bool
-	}{
-		{
-			o: &buildv1.Build{
-				Spec: buildv1.BuildSpec{
-					CommonSpec: buildv1.CommonSpec{
-						Strategy: buildv1.BuildStrategy{
-							JenkinsPipelineStrategy: &buildv1.JenkinsPipelineBuildStrategy{},
-						},
-					},
-				},
-			},
-			isPipeline: true,
-		},
-		{
-			o: &buildv1.Build{
-				Spec: buildv1.BuildSpec{
-					CommonSpec: buildv1.CommonSpec{
-						Strategy: buildv1.BuildStrategy{
-							SourceStrategy: &buildv1.SourceBuildStrategy{},
-						},
-					},
-				},
-			},
-			isPipeline: false,
-		},
-		{
-			o: &buildv1.BuildConfig{
-				Spec: buildv1.BuildConfigSpec{
-					CommonSpec: buildv1.CommonSpec{
-						Strategy: buildv1.BuildStrategy{
-							JenkinsPipelineStrategy: &buildv1.JenkinsPipelineBuildStrategy{},
-						},
-					},
-				},
-			},
-			isPipeline: true,
-		},
-		{
-			o: &buildv1.BuildConfig{
-				Spec: buildv1.BuildConfigSpec{
-					CommonSpec: buildv1.CommonSpec{
-						Strategy: buildv1.BuildStrategy{
-							DockerStrategy: &buildv1.DockerBuildStrategy{},
-						},
-					},
-				},
-			},
-			isPipeline: false,
-		},
-		{
-			o:          &appsv1.DeploymentConfig{},
-			isPipeline: false,
-		},
-	}
-
-	for _, tc := range testCases {
-		isPipeline, _, _, _, _ := isPipelineBuild(tc.o)
-		if isPipeline != tc.isPipeline {
-			t.Errorf("%#v, unexpected results expected isPipeline %v returned isPipeline %v", tc.o, tc.isPipeline, isPipeline)
-		}
-	}
-}
diff --git a/vendor/github.com/openshift/oc/pkg/cli/newapp/OWNERS b/vendor/github.com/openshift/oc/pkg/cli/newapp/OWNERS
deleted file mode 100644
index 75caac287c99..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/newapp/OWNERS
+++ /dev/null
@@ -1,6 +0,0 @@
-reviewers:
-  - adambkaplan
-  - gmontero
-  - coreydaley
-approvers:
-  - adambkaplan
diff --git a/vendor/github.com/openshift/oc/pkg/cli/newapp/newapp.go b/vendor/github.com/openshift/oc/pkg/cli/newapp/newapp.go
deleted file mode 100644
index 6a582595d9e6..000000000000
--- a/vendor/github.com/openshift/oc/pkg/cli/newapp/newapp.go
+++ /dev/null
@@ -1,1231 +0,0 @@
-package newapp
-
-import (
-	"bytes"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"sort"
-	"strings"
-	"time"
-
-	"github.com/MakeNowJust/heredoc"
-	docker "github.com/fsouza/go-dockerclient"
-	"github.com/spf13/cobra"
-	"k8s.io/klog"
-
-	corev1 "k8s.io/api/core/v1"
-	kapierrors "k8s.io/apimachinery/pkg/api/errors"
-	"k8s.io/apimachinery/pkg/api/meta"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
-	"k8s.io/apimachinery/pkg/labels"
-	"k8s.io/apimachinery/pkg/runtime"
-	"k8s.io/apimachinery/pkg/runtime/schema"
-	"k8s.io/apimachinery/pkg/util/errors"
-	"k8s.io/apimachinery/pkg/util/sets"
-	"k8s.io/apimachinery/pkg/util/wait"
-	"k8s.io/cli-runtime/pkg/genericclioptions"
-	"k8s.io/cli-runtime/pkg/printers"
-	"k8s.io/client-go/dynamic"
-	"k8s.io/client-go/kubernetes"
-	corev1typedclient "k8s.io/client-go/kubernetes/typed/core/v1"
-	restclient "k8s.io/client-go/rest"
-	"k8s.io/kubernetes/pkg/kubectl/cmd/logs"
-	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
-	"k8s.io/kubernetes/pkg/kubectl/generate"
-	"k8s.io/kubernetes/pkg/kubectl/polymorphichelpers"
-	"k8s.io/kubernetes/pkg/kubectl/scheme"
-	"k8s.io/kubernetes/pkg/kubectl/util/templates"
-
-	appsv1 "github.com/openshift/api/apps/v1"
-	"github.com/openshift/api/build"
-	buildv1 "github.com/openshift/api/build/v1"
-	imagev1 "github.com/openshift/api/image/v1"
-	routev1 "github.com/openshift/api/route/v1"
-	imagev1typedclient "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1"
-	routev1typedclient "github.com/openshift/client-go/route/clientset/versioned/typed/route/v1"
-	templatev1typedclient "github.com/openshift/client-go/template/clientset/versioned/typed/template/v1"
-	"github.com/openshift/library-go/pkg/git"
-	"github.com/openshift/library-go/pkg/image/imageutil"
-	"github.com/openshift/oc/pkg/helpers/bulk"
-	cmdutil "github.com/openshift/oc/pkg/helpers/cmd"
-	imagehelpers "github.com/openshift/oc/pkg/helpers/image"
-	"github.com/openshift/oc/pkg/helpers/newapp"
-	newappapp "github.com/openshift/oc/pkg/helpers/newapp/app"
-	newcmd "github.com/openshift/oc/pkg/helpers/newapp/cmd"
-	dockerutil "github.com/openshift/oc/pkg/helpers/newapp/docker"
-)
-
-// NewAppRecommendedCommandName is the recommended command name.
-const NewAppRecommendedCommandName = "new-app"
-
-// ExposeRecommendedName is the recommended command name to expose app.
-const ExposeRecommendedName = "expose"
-
-// StatusRecommendedName is the recommended command name.
-const StatusRecommendedName = "status"
-
-// RoutePollTimoutSeconds sets how long new-app command waits for route host to be prepopulated
-const RoutePollTimeout = 5 * time.Second
-
-var (
-	newAppLong = templates.LongDesc(`
-		Create a new application by specifying source code, templates, and/or images
-
-		This command will try to build up the components of an application using images, templates,
-		or code that has a public repository. It will lookup the images on the local Docker installation
-		(if available), a container image registry, an integrated image stream, or stored templates.
-
-		If you specify a source code URL, it will set up a build that takes your source code and converts
-		it into an image that can run inside of a pod. Local source must be in a git repository that has a
-		remote repository that the server can see. The images will be deployed via a deployment
-		configuration, and a service will be connected to the first public port of the app. You may either specify
-		components using the various existing flags or let %[2]s autodetect what kind of components
-		you have provided.
-
-		If you provide source code, a new build will be automatically triggered.
-		You can use '%[1]s status' to check the progress.`)
-
-	newAppExample = templates.Examples(`
-	  # List all local templates and image streams that can be used to create an app
-	  %[1]s %[2]s --list
-
-	  # Create an application based on the source code in the current git repository (with a public remote) and a Docker image
-	  %[1]s %[2]s . --docker-image=repo/langimage
-
-	  # Create an application myapp with Docker based build strategy expecting binary input
-	  %[1]s %[2]s  --strategy=docker --binary --name myapp
-
-	  # Create a Ruby application based on the provided [image]~[source code] combination
-	  %[1]s %[2]s centos/ruby-25-centos7~https://github.com/sclorg/ruby-ex.git
-
-	  # Use the public Docker Hub MySQL image to create an app. Generated artifacts will be labeled with db=mysql
-	  %[1]s %[2]s mysql MYSQL_USER=user MYSQL_PASSWORD=pass MYSQL_DATABASE=testdb -l db=mysql
-
-	  # Use a MySQL image in a private registry to create an app and override application artifacts' names
-	  %[1]s %[2]s --docker-image=myregistry.com/mycompany/mysql --name=private
-
-	  # Create an application from a remote repository using its beta4 branch
-	  %[1]s %[2]s https://github.com/openshift/ruby-hello-world#beta4
-
-	  # Create an application based on a stored template, explicitly setting a parameter value
-	  %[1]s %[2]s --template=ruby-helloworld-sample --param=MYSQL_USER=admin
-
-	  # Create an application from a remote repository and specify a context directory
-	  %[1]s %[2]s https://github.com/youruser/yourgitrepo --context-dir=src/build
-
-	  # Create an application from a remote private repository and specify which existing secret to use
-	  %[1]s %[2]s https://github.com/youruser/yourgitrepo --source-secret=yoursecret
-
-	  # Create an application based on a template file, explicitly setting a parameter value
-	  %[1]s %[2]s --file=./example/myapp/template.json --param=MYSQL_USER=admin
-
-	  # Search all templates, image streams, and Docker images for the ones that match "ruby"
-	  %[1]s %[2]s --search ruby
-
-	  # Search for "ruby", but only in stored templates (--template, --image-stream and --docker-image
-	  # can be used to filter search results)
-	  %[1]s %[2]s --search --template=ruby
-
-	  # Search for "ruby" in stored templates and print the output as an YAML
-	  %[1]s %[2]s --search --template=ruby --output=yaml`)
-
-	newAppNoInput = `You must specify one or more images, image streams, templates, or source code locations to create an application.
-
-To list all local templates and image streams, use:
-
-  %[1]s %[2]s -L
-
-To search templates, image streams, and Docker images that match the arguments provided, use:
-
-  %[1]s %[2]s -S php
-  %[1]s %[2]s -S --template=ruby
-  %[1]s %[2]s -S --image-stream=mysql
-  %[1]s %[2]s -S --docker-image=python
-`
-)
-
-type ObjectGeneratorOptions struct {
-	PrintFlags *genericclioptions.PrintFlags
-	Action     bulk.BulkAction
-
-	Config *newcmd.AppConfig
-
-	BaseName    string
-	CommandPath string
-	CommandName string
-
-	LogsForObject polymorphichelpers.LogsForObjectFunc
-	Printer       printers.ResourcePrinter
-
-	genericclioptions.IOStreams
-}
-
-type AppOptions struct {
-	*ObjectGeneratorOptions
-
-	RESTClientGetter genericclioptions.RESTClientGetter
-
-	genericclioptions.IOStreams
-}
-
-//Complete sets all common default options for commands (new-app and new-build)
-func (o *ObjectGeneratorOptions) Complete(baseName, commandName string, f kcmdutil.Factory, c *cobra.Command, args []string) error {
-	cmdutil.WarnAboutCommaSeparation(o.ErrOut, o.Config.Environment, "--env")
-	cmdutil.WarnAboutCommaSeparation(o.ErrOut, o.Config.BuildEnvironment, "--build-env")
-
-	o.Action.IOStreams = o.IOStreams
-
-	if o.PrintFlags.OutputFormat != nil {
-		o.Action.Output = *o.PrintFlags.OutputFormat
-	}
-
-	// Only output="" should print descriptions of intermediate steps. Everything
-	// else should print only some specific output (json, yaml, go-template, ...)
-	o.Config.In = o.In
-	if len(o.Action.Output) == 0 {
-		o.Config.Out = o.Out
-	} else {
-		o.Config.Out = ioutil.Discard
-	}
-	o.Config.ErrOut = o.ErrOut
-
-	clientConfig, err := f.ToRESTConfig()
-	if err != nil {
-		return err
-	}
-	mapper, err := f.ToRESTMapper()
-	if err != nil {
-		return err
-	}
-	dynamicClient, err := dynamic.NewForConfig(clientConfig)
-	if err != nil {
-		return err
-	}
-
-	o.Action.Bulk.Scheme = newAppScheme
-	o.Action.Bulk.Op = bulk.Creator{Client: dynamicClient, RESTMapper: mapper}.Create
-	// Retry is used to support previous versions of the API server that will
-	// consider the presence of an unknown trigger type to be an error.
-	o.Action.Bulk.Retry = retryBuildConfig
-
-	o.Config.DryRun = o.Action.DryRun
-	o.CommandPath = c.CommandPath()
-	o.BaseName = baseName
-	o.CommandName = commandName
-
-	o.LogsForObject = polymorphichelpers.LogsForObjectFn
-	o.Printer, err = o.PrintFlags.ToPrinter()
-	if err != nil {
-		return err
-	}
-
-	if err := CompleteAppConfig(o.Config, f, c, args); err != nil {
-		return err
-	}
-	if err := setAppConfigLabels(c, o.Config); err != nil {
-		return err
-	}
-	return nil
-}
-
-func NewAppOptions(streams genericclioptions.IOStreams) *AppOptions {
-	config := newcmd.NewAppConfig()
-	config.Deploy = true
-
-	// disable the --template printFlag, as it is shadowed by the existing --template printing
-	// in this command - which is used to select existing app templates
-	printFlags := genericclioptions.NewPrintFlags("created")
-	printFlags.TemplatePrinterFlags.TemplateArgument = nil
-	*printFlags.TemplatePrinterFlags.AllowMissingKeys = false
-
-	return &AppOptions{
-		IOStreams: streams,
-		ObjectGeneratorOptions: &ObjectGeneratorOptions{
-			PrintFlags: printFlags,
-			IOStreams:  streams,
-			Config:     config,
-		},
-	}
-}
-
-// NewCmdNewApplication implements the OpenShift cli new-app command.
-func NewCmdNewApplication(name, baseName string, f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
-	o := NewAppOptions(streams)
-
-	cmd := &cobra.Command{
-		Use:        fmt.Sprintf("%s (IMAGE | IMAGESTREAM | TEMPLATE | PATH | URL ...)", name),
-		Short:      "Create a new application",
-		Long:       fmt.Sprintf(newAppLong, baseName, name),
-		Example:    fmt.Sprintf(newAppExample, baseName, name),
-		SuggestFor: []string{"app", "application"},
-		Run: func(c *cobra.Command, args []string) {
-			kcmdutil.CheckErr(o.Complete(baseName, name, f, c, args))
-			kcmdutil.CheckErr(o.RunNewApp())
-		},
-	}
-
-	o.PrintFlags.AddFlags(cmd)
-
-	cmd.Flags().BoolVar(&o.Config.AsTestDeployment, "as-test", o.Config.AsTestDeployment, "If true create this application as a test deployment, which validates that the deployment succeeds and then scales down.")
-	cmd.Flags().StringSliceVar(&o.Config.SourceRepositories, "code", o.Config.SourceRepositories, "Source code to use to build this application.")
-	cmd.Flags().StringVar(&o.Config.ContextDir, "context-dir", o.Config.ContextDir, "Context directory to be used for the build.")
-	cmd.Flags().StringSliceVarP(&o.Config.ImageStreams, "image", "", o.Config.ImageStreams, "Name of an image stream to use in the app. (deprecated)")
-	cmd.Flags().MarkDeprecated("image", "use --image-stream instead")
-	cmd.Flags().StringSliceVarP(&o.Config.ImageStreams, "image-stream", "i", o.Config.ImageStreams, "Name of an image stream to use in the app.")
-	cmd.Flags().StringSliceVar(&o.Config.DockerImages, "docker-image", o.Config.DockerImages, "Name of a Docker image to include in the app.")
-	cmd.Flags().StringSliceVar(&o.Config.Templates, "template", o.Config.Templates, "Name of a stored template to use in the app.")
-	cmd.Flags().StringSliceVarP(&o.Config.TemplateFiles, "file", "f", o.Config.TemplateFiles, "Path to a template file to use for the app.")
-	cmd.MarkFlagFilename("file", "yaml", "yml", "json")
-	cmd.Flags().StringArrayVarP(&o.Config.TemplateParameters, "param", "p", o.Config.TemplateParameters, "Specify a key-value pair (e.g., -p FOO=BAR) to set/override a parameter value in the template.")
-	cmd.Flags().StringArrayVar(&o.Config.TemplateParameterFiles, "param-file", o.Config.TemplateParameterFiles, "File containing parameter values to set/override in the template.")
-	cmd.MarkFlagFilename("param-file")
-	cmd.Flags().StringSliceVar(&o.Config.Groups, "group", o.Config.Groups, "Indicate components that should be grouped together as +.")
-	cmd.Flags().StringArrayVarP(&o.Config.Environment, "env", "e", o.Config.Environment, "Specify a key-value pair for an environment variable to set into each container.")
-	cmd.Flags().StringArrayVar(&o.Config.EnvironmentFiles, "env-file", o.Config.EnvironmentFiles, "File containing key-value pairs of environment variables to set into each container.")
-	cmd.MarkFlagFilename("env-file")
-	cmd.Flags().StringArrayVar(&o.Config.BuildEnvironment, "build-env", o.Config.BuildEnvironment, "Specify a key-value pair for an environment variable to set into each build image.")
-	cmd.Flags().StringArrayVar(&o.Config.BuildEnvironmentFiles, "build-env-file", o.Config.BuildEnvironmentFiles, "File containing key-value pairs of environment variables to set into each build image.")
-	cmd.MarkFlagFilename("build-env-file")
-	cmd.Flags().StringVar(&o.Config.Name, "name", o.Config.Name, "Set name to use for generated application artifacts")
-	cmd.Flags().Var(&o.Config.Strategy, "strategy", "Specify the build strategy to use if you don't want to detect (docker|pipeline|source).")
-	cmd.Flags().StringP("labels", "l", "", "Label to set in all resources for this application.")
-	cmd.Flags().BoolVar(&o.Config.IgnoreUnknownParameters, "ignore-unknown-parameters", o.Config.IgnoreUnknownParameters, "If true, will not stop processing if a provided parameter does not exist in the template.")
-	cmd.Flags().BoolVar(&o.Config.InsecureRegistry, "insecure-registry", o.Config.InsecureRegistry, "If true, indicates that the referenced Docker images are on insecure registries and should bypass certificate checking")
-	cmd.Flags().BoolVarP(&o.Config.AsList, "list", "L", o.Config.AsList, "List all local templates and image streams that can be used to create.")
-	cmd.Flags().BoolVarP(&o.Config.AsSearch, "search", "S", o.Config.AsSearch, "Search all templates, image streams, and Docker images that match the arguments provided.")
-	cmd.Flags().BoolVar(&o.Config.AllowMissingImages, "allow-missing-images", o.Config.AllowMissingImages, "If true, indicates that referenced Docker images that cannot be found locally or in a registry should still be used.")
-	cmd.Flags().BoolVar(&o.Config.AllowMissingImageStreamTags, "allow-missing-imagestream-tags", o.Config.AllowMissingImageStreamTags, "If true, indicates that image stream tags that don't exist should still be used.")
-	cmd.Flags().BoolVar(&o.Config.AllowSecretUse, "grant-install-rights", o.Config.AllowSecretUse, "If true, a component that requires access to your account may use your token to install software into your project. Only grant images you trust the right to run with your token.")
-	cmd.Flags().StringVar(&o.Config.SourceSecret, "source-secret", o.Config.SourceSecret, "The name of an existing secret that should be used for cloning a private git repository.")
-	cmd.Flags().BoolVar(&o.Config.SkipGeneration, "no-install", o.Config.SkipGeneration, "Do not attempt to run images that describe themselves as being installable")
-	cmd.Flags().BoolVar(&o.Config.BinaryBuild, "binary", o.Config.BinaryBuild, "Instead of expecting a source URL, set the build to expect binary contents. Will disable triggers.")
-
-	o.Action.BindForOutput(cmd.Flags(), "output", "template")
-	cmd.Flags().String("output-version", "", "The preferred API versions of the output objects")
-
-	return cmd
-}
-
-// Complete sets any default behavior for the command
-func (o *AppOptions) Complete(baseName, commandName string, f kcmdutil.Factory, c *cobra.Command, args []string) error {
-	o.RESTClientGetter = f
-
-	cmdutil.WarnAboutCommaSeparation(o.ErrOut, o.ObjectGeneratorOptions.Config.TemplateParameters, "--param")
-	err := o.ObjectGeneratorOptions.Complete(baseName, commandName, f, c, args)
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-// RunNewApp contains all the necessary functionality for the OpenShift cli new-app command
-func (o *AppOptions) RunNewApp() error {
-	config := o.Config
-	out := o.Action.Out
-
-	if config.Querying() {
-		result, err := config.RunQuery()
-		if err != nil {
-			return HandleError(err, o.BaseName, o.CommandName, o.CommandPath, config, TransformRunError)
-		}
-
-		if o.Action.ShouldPrint() {
-			list := &unstructured.UnstructuredList{
-				Object: map[string]interface{}{
-					"kind":       "List",
-					"apiVersion": "v1",
-					"metadata":   map[string]interface{}{},
-				},
-			}
-			for _, item := range result.List.Items {
-				unstructuredItem, err := runtime.DefaultUnstructuredConverter.ToUnstructured(item)
-				if err != nil {
-					return err
-				}
-				list.Items = append(list.Items, unstructured.Unstructured{Object: unstructuredItem})
-			}
-
-			return o.Printer.PrintObj(list, o.Out)
-		}
-
-		return printHumanReadableQueryResult(result, out, o.BaseName, o.CommandName)
-	}
-
-	CheckGitInstalled(out)
-
-	result, err := config.Run()
-	if err := HandleError(err, o.BaseName, o.CommandName, o.CommandPath, config, TransformRunError); err != nil {
-		return err
-	}
-
-	// set labels explicitly supplied by the user on the command line
-	if err := SetLabels(config.Labels, result); err != nil {
-		return err
-	}
-
-	if len(result.Name) > 0 {
-		// only set the computed implicit "app" label on objects if no object we've produced
-		// already has the "app" label.
-		appLabel := map[string]string{"app": result.Name}
-		hasAppLabel, err := hasLabel(appLabel, result)
-		if err != nil {
-			return err
-		}
-		if !hasAppLabel {
-			if err := SetLabels(appLabel, result); err != nil {
-				return err
-			}
-		}
-	}
-	if err := SetAnnotations(map[string]string{newcmd.GeneratedByNamespace: newcmd.GeneratedByNewApp}, result); err != nil {
-		return err
-	}
-
-	if o.Action.ShouldPrint() {
-		// TODO(juanvallejo): this needs to be fixed by updating QueryResult.List to be of type corev1.List
-		printableList := &corev1.List{
-			// this is ok because we know exactly how we want to be serialized
-			TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String(), Kind: "List"},
-		}
-		for _, obj := range result.List.Items {
-			printableList.Items = append(printableList.Items, runtime.RawExtension{
-				Object: obj,
-			})
-		}
-		return o.Printer.PrintObj(printableList, o.Out)
-	}
-
-	if result.GeneratedJobs {
-		o.Action.Compact()
-	}
-
-	if errs := o.Action.WithMessage(bulk.CreateMessage(config.Labels), "created").Run(result.List, result.Namespace); len(errs) > 0 {
-		return kcmdutil.ErrExit
-	}
-
-	if !o.Action.Verbose() || o.Action.DryRun {
-		return nil
-	}
-
-	supportedTypes := map[schema.GroupVersionKind]bool{
-		{Version: "v1", Kind: "Pod"}:                                   true,
-		{Group: buildv1.GroupName, Version: "v1", Kind: "BuildConfig"}: true,
-		{Group: imagev1.GroupName, Version: "v1", Kind: "ImageStream"}: true,
-		{Group: routev1.GroupName, Version: "v1", Kind: "Route"}:       true,
-	}
-
-	hasMissingRepo := false
-	installing := []*corev1.Pod{}
-	indent := o.Action.DefaultIndent()
-	containsRoute := false
-	for _, item := range result.List.Items {
-		// these are all unstructured
-		unstructuredObj := item.(*unstructured.Unstructured)
-
-		// Determine if dealing with a "known" resource, containing a switch case below.
-		// If so, go through with a conversion attempt, and fail if necessary.
-		if supported := supportedTypes[unstructuredObj.GroupVersionKind()]; !supported {
-			continue
-		}
-
-		obj, err := scheme.Scheme.New(unstructuredObj.GroupVersionKind())
-		if err != nil {
-			return err
-		}
-		if err := runtime.DefaultUnstructuredConverter.FromUnstructured(unstructuredObj.Object, obj); err != nil {
-			return err
-		}
-
-		switch t := obj.(type) {
-		case *corev1.Pod:
-			if t.Annotations[newcmd.GeneratedForJob] == "true" {
-				installing = append(installing, t)
-			}
-		case *buildv1.BuildConfig:
-			triggered := false
-			for _, trigger := range t.Spec.Triggers {
-				switch trigger.Type {
-				case buildv1.ImageChangeBuildTriggerType, buildv1.ConfigChangeBuildTriggerType:
-					triggered = true
-					break
-				}
-			}
-			if triggered {
-				fmt.Fprintf(out, "%[1]sBuild scheduled, use '%[3]s logs -f bc/%[2]s' to track its progress.\n", indent, t.Name, o.BaseName)
-			} else {
-				fmt.Fprintf(out, "%[1]sUse '%[3]s start-build %[2]s' to start a build.\n", indent, t.Name, o.BaseName)
-			}
-		case *imagev1.ImageStream:
-			if len(t.Status.DockerImageRepository) == 0 {
-				if hasMissingRepo {
-					continue
-				}
-				hasMissingRepo = true
-				fmt.Fprintf(out, "%sWARNING: No container image registry has been configured with the server. Automatic builds and deployments may not function.\n", indent)
-			}
-		case *routev1.Route:
-			containsRoute = true
-			if len(t.Spec.Host) > 0 {
-				var route *routev1.Route
-				//check if route processing was completed and host field is prepopulated by router
-				err := wait.PollImmediate(500*time.Millisecond, RoutePollTimeout, func() (bool, error) {
-					route, err = config.RouteClient.Routes(t.Namespace).Get(t.Name, metav1.GetOptions{})
-					if err != nil {
-						return false, fmt.Errorf("Error while polling route %s", t.Name)
-					}
-					if route.Spec.Host != "" {
-						return true, nil
-					}
-					return false, nil
-				})
-				if err != nil {
-					klog.V(4).Infof("Failed to poll route %s host field: %s", t.Name, err)
-				} else {
-					fmt.Fprintf(out, "%sAccess your application via route '%s' \n", indent, route.Spec.Host)
-				}
-			}
-
-		}
-	}
-	switch {
-	case len(installing) == 1:
-		return followInstallation(config, o.RESTClientGetter, installing[0], o.LogsForObject)
-	case len(installing) > 1:
-		for i := range installing {
-			fmt.Fprintf(out, "%sTrack installation of %s with '%s logs %s'.\n", indent, installing[i].Name, o.BaseName, installing[i].Name)
-		}
-	case len(result.List.Items) > 0:
-		//if we don't find a route we give a message to expose it
-		if !containsRoute {
-			//we if don't have any routes, but we have services - we suggest commands to expose those
-			svc := getServices(result.List.Items)
-			if len(svc) > 0 {
-				fmt.Fprintf(out, "%sApplication is not exposed. You can expose services to the outside world by executing one or more of the commands below:\n", indent)
-				for _, s := range svc {
-					fmt.Fprintf(out, "%s '%s %s svc/%s' \n", indent, o.BaseName, ExposeRecommendedName, s.Name)
-				}
-			}
-		}
-		fmt.Fprintf(out, "%sRun '%s %s' to view your app.\n", indent, o.BaseName, StatusRecommendedName)
-	}
-	return nil
-}
-
-func getServices(items []runtime.Object) []*corev1.Service {
-	var svc []*corev1.Service
-	for _, i := range items {
-		unstructuredObj := i.(*unstructured.Unstructured)
-		obj, err := scheme.Scheme.New(unstructuredObj.GroupVersionKind())
-		if err != nil {
-			klog.V(1).Info(err)
-			continue
-		}
-		if err := runtime.DefaultUnstructuredConverter.FromUnstructured(unstructuredObj.Object, obj); err != nil {
-			klog.V(1).Info(err)
-			continue
-		}
-
-		switch obj.(type) {
-		case *corev1.Service:
-			svc = append(svc, obj.(*corev1.Service))
-		}
-	}
-	return svc
-}
-
-func followInstallation(config *newcmd.AppConfig, clientGetter genericclioptions.RESTClientGetter, pod *corev1.Pod, logsForObjectFn polymorphichelpers.LogsForObjectFunc) error {
-	fmt.Fprintf(config.Out, "--> Installing ...\n")
-
-	// we cannot retrieve logs until the pod is out of pending
-	// TODO: move this to the server side
-	podClient := config.KubeClient.CoreV1().Pods(pod.Namespace)
-	if err := wait.PollImmediate(500*time.Millisecond, 60*time.Second, installationStarted(podClient, pod.Name, config.KubeClient.CoreV1().Secrets(pod.Namespace))); err != nil {
-		return err
-	}
-
-	opts := &logs.LogsOptions{
-		Namespace:   pod.Namespace,
-		ResourceArg: pod.Name,
-		Options: &corev1.PodLogOptions{
-			Follow:    true,
-			Container: pod.Spec.Containers[0].Name,
-		},
-		RESTClientGetter: clientGetter,
-		ConsumeRequestFn: logs.DefaultConsumeRequest,
-		LogsForObject:    logsForObjectFn,
-		IOStreams:        genericclioptions.IOStreams{Out: config.Out},
-	}
-	logErr := opts.RunLogs()
-
-	// status of the pod may take tens of seconds to propagate
-	if err := wait.PollImmediate(500*time.Millisecond, 30*time.Second, installationComplete(podClient, pod.Name, config.Out)); err != nil {
-		if err == wait.ErrWaitTimeout {
-			if logErr != nil {
-				// output the log error if one occurred
-				err = logErr
-			} else {
-				err = fmt.Errorf("installation may not have completed, see logs for %q for more information", pod.Name)
-			}
-		}
-		return err
-	}
-
-	return nil
-}
-
-func installationStarted(c corev1typedclient.PodInterface, name string, s corev1typedclient.SecretInterface) wait.ConditionFunc {
-	return func() (bool, error) {
-		pod, err := c.Get(name, metav1.GetOptions{})
-		if err != nil {
-			return false, err
-		}
-		if pod.Status.Phase == corev1.PodPending {
-			return false, nil
-		}
-		// delete a secret named the same as the pod if it exists
-		if secret, err := s.Get(name, metav1.GetOptions{}); err == nil {
-			if secret.Annotations[newcmd.GeneratedForJob] == "true" &&
-				secret.Annotations[newcmd.GeneratedForJobFor] == pod.Annotations[newcmd.GeneratedForJobFor] {
-				if err := s.Delete(name, nil); err != nil {
-					klog.V(4).Infof("Failed to delete install secret %s: %v", name, err)
-				}
-			}
-		}
-		return true, nil
-	}
-}
-
-func installationComplete(c corev1typedclient.PodInterface, name string, out io.Writer) wait.ConditionFunc {
-	return func() (bool, error) {
-		pod, err := c.Get(name, metav1.GetOptions{})
-		if err != nil {
-			if kapierrors.IsNotFound(err) {
-				return false, fmt.Errorf("installation pod was deleted; unable to determine whether it completed successfully")
-			}
-			return false, nil
-		}
-		switch pod.Status.Phase {
-		case corev1.PodSucceeded:
-			fmt.Fprintf(out, "--> Success\n")
-			if err := c.Delete(name, nil); err != nil {
-				klog.V(4).Infof("Failed to delete install pod %s: %v", name, err)
-			}
-			return true, nil
-		case corev1.PodFailed:
-			return true, fmt.Errorf("installation of %q did not complete successfully", name)
-		default:
-			return false, nil
-		}
-	}
-}
-
-func setAppConfigLabels(c *cobra.Command, config *newcmd.AppConfig) error {
-	labelStr := kcmdutil.GetFlagString(c, "labels")
-	if len(labelStr) != 0 {
-		var err error
-		config.Labels, err = generate.ParseLabels(labelStr)
-		if err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-// getDockerClient returns a client capable of communicating with the local
-// docker daemon.  If an error occurs (such as no local daemon being available),
-// it will return nil.
-func getDockerClient() (*docker.Client, error) {
-	dockerClient, _, err := dockerutil.NewHelper().GetClient()
-	if err == nil {
-		if err = dockerClient.Ping(); err != nil {
-			klog.V(4).Infof("Docker client did not respond to a ping: %v", err)
-			return nil, err
-		}
-		return dockerClient, nil
-	}
-	klog.V(2).Infof("No local Docker daemon detected: %v", err)
-	return nil, err
-}
-
-func CompleteAppConfig(config *newcmd.AppConfig, f kcmdutil.Factory, c *cobra.Command, args []string) error {
-	if config.Builder == nil {
-		config.Builder = f.NewBuilder()
-	}
-	mapper, err := f.ToRESTMapper()
-	if err != nil {
-		return err
-	}
-	if config.Mapper == nil {
-		config.Mapper = mapper
-	}
-	if config.Typer == nil {
-		config.Typer = scheme.Scheme
-	}
-
-	namespace, _, err := f.ToRawKubeConfigLoader().Namespace()
-	if err != nil {
-		return err
-	}
-
-	clientConfig, err := f.ToRESTConfig()
-	if err != nil {
-		return err
-	}
-
-	config.KubeClient, err = kubernetes.NewForConfig(clientConfig)
-
-	dockerClient, _ := getDockerClient()
-
-	imageClient, err := imagev1typedclient.NewForConfig(clientConfig)
-	if err != nil {
-		return err
-	}
-	templateClient, err := templatev1typedclient.NewForConfig(clientConfig)
-	if err != nil {
-		return err
-	}
-	routeClient, err := routev1typedclient.NewForConfig(clientConfig)
-	if err != nil {
-		return err
-	}
-	config.SetOpenShiftClient(imageClient, templateClient, routeClient, namespace, dockerClient)
-
-	if config.AllowSecretUse {
-		cfg, err := f.ToRESTConfig()
-		if err != nil {
-			return err
-		}
-		config.SecretAccessor = newConfigSecretRetriever(cfg)
-	}
-
-	unknown := config.AddArguments(args)
-	if len(unknown) != 0 {
-		buf := &bytes.Buffer{}
-		fmt.Fprintf(buf, "Did not recognize the following arguments: %v\n\n", unknown)
-		for _, argName := range unknown {
-			fmt.Fprintf(buf, "%s:\n", argName)
-			for _, classErr := range config.EnvironmentClassificationErrors {
-				if classErr.Value != nil {
-					fmt.Fprintf(buf, fmt.Sprintf("%s:  %v\n", classErr.Key, classErr.Value))
-				} else {
-					fmt.Fprintf(buf, fmt.Sprintf("%s\n", classErr.Key))
-				}
-			}
-			for _, classErr := range config.SourceClassificationErrors {
-				fmt.Fprintf(buf, fmt.Sprintf("%s:  %v\n", classErr.Key, classErr.Value))
-			}
-			for _, classErr := range config.TemplateClassificationErrors {
-				fmt.Fprintf(buf, fmt.Sprintf("%s:  %v\n", classErr.Key, classErr.Value))
-			}
-			for _, classErr := range config.ComponentClassificationErrors {
-				fmt.Fprintf(buf, fmt.Sprintf("%s:  %v\n", classErr.Key, classErr.Value))
-			}
-			fmt.Fprintln(buf)
-		}
-		return kcmdutil.UsageErrorf(c, heredoc.Docf(buf.String()))
-	}
-
-	if config.AllowMissingImages && config.AsSearch {
-		return kcmdutil.UsageErrorf(c, "--allow-missing-images and --search are mutually exclusive.")
-	}
-
-	if len(config.SourceImage) != 0 && len(config.SourceImagePath) == 0 {
-		return kcmdutil.UsageErrorf(c, "--source-image-path must be specified when --source-image is specified.")
-	}
-	if len(config.SourceImage) == 0 && len(config.SourceImagePath) != 0 {
-		return kcmdutil.UsageErrorf(c, "--source-image must be specified when --source-image-path is specified.")
-	}
-
-	if config.BinaryBuild && config.Strategy == newapp.StrategyPipeline {
-		return kcmdutil.UsageErrorf(c, "specifying binary builds and the pipeline strategy at the same time is not allowed.")
-	}
-
-	if len(config.BuildArgs) > 0 && config.Strategy != newapp.StrategyUnspecified && config.Strategy != newapp.StrategyDocker {
-		return kcmdutil.UsageErrorf(c, "Cannot use '--build-arg' without a Docker build")
-	}
-	return nil
-}
-
-func SetAnnotations(annotations map[string]string, result *newcmd.AppResult) error {
-	for _, object := range result.List.Items {
-		err := newcmd.AddObjectAnnotations(object, annotations)
-		if err != nil {
-			return fmt.Errorf("failed to add annotation to object of type %q, this resource type is probably unsupported by your client version.", object.GetObjectKind().GroupVersionKind())
-		}
-	}
-	return nil
-}
-
-// addDeploymentConfigNestedLabels adds new label(s) to a nested labels of a single DeploymentConfig object
-func addDeploymentConfigNestedLabels(obj *appsv1.DeploymentConfig, labels labels.Set) error {
-	if obj.Spec.Template == nil {
-		return nil
-	}
-	if obj.Spec.Template.Labels == nil {
-		obj.Spec.Template.Labels = make(map[string]string)
-	}
-	for k, v := range labels {
-		obj.Spec.Template.Labels[k] = v
-	}
-	return nil
-}
-
-func addObjectLabels(obj runtime.Object, labels labels.Set) error {
-	if labels == nil {
-		return nil
-	}
-
-	accessor, err := meta.Accessor(obj)
-	if err != nil {
-		return err
-	}
-
-	metaLabels := accessor.GetLabels()
-	if metaLabels == nil {
-		metaLabels = make(map[string]string)
-	}
-	for k, v := range labels {
-		metaLabels[k] = v
-	}
-	accessor.SetLabels(metaLabels)
-
-	switch objType := obj.(type) {
-	case *appsv1.DeploymentConfig:
-		if err := addDeploymentConfigNestedLabels(objType, labels); err != nil {
-			return fmt.Errorf("unable to add nested labels to %s/%s: %v", obj.GetObjectKind().GroupVersionKind(), accessor.GetName(), err)
-		}
-	}
-
-	return nil
-}
-
-func SetLabels(labels map[string]string, result *newcmd.AppResult) error {
-	for _, object := range result.List.Items {
-		err := addObjectLabels(object, labels)
-		if err != nil {
-			return fmt.Errorf("failed to add annotation to object of type %q, this resource type is probably unsupported by your client version.", object.GetObjectKind().GroupVersionKind())
-		}
-	}
-	return nil
-}
-
-func hasLabel(labels map[string]string, result *newcmd.AppResult) (bool, error) {
-	for _, obj := range result.List.Items {
-		accessor, err := meta.Accessor(obj)
-		if err != nil {
-			return false, err
-		}
-		for k := range accessor.GetLabels() {
-			if _, ok := labels[k]; ok {
-				return true, nil
-			}
-		}
-
-		switch objType := obj.(type) {
-		case *appsv1.DeploymentConfig:
-			if objType.Spec.Template == nil {
-				continue
-			}
-			for k := range objType.Spec.Template.Labels {
-				if _, ok := labels[k]; ok {
-					return true, nil
-				}
-			}
-		}
-	}
-	return false, nil
-}
-
-// isInvalidTriggerError returns true if the given error is
-// a validation error that contains 'invalid trigger type' in its
-// error message. This error is returned from older servers that
-// consider the presence of unknown trigger types to be an error.
-func isInvalidTriggerError(err error) bool {
-	if !kapierrors.IsInvalid(err) {
-		return false
-	}
-	statusErr, ok := err.(*kapierrors.StatusError)
-	if !ok {
-		return false
-	}
-	return strings.Contains(statusErr.Status().Message, "invalid trigger type")
-}
-
-// retryBuildConfig determines if the given error is caused by an invalid trigger
-// error on a BuildConfig. If that is the case, it will remove all triggers with a
-// type that is not in the whitelist for an older server.
-func retryBuildConfig(obj *unstructured.Unstructured, err error) *unstructured.Unstructured {
-	if obj == nil {
-		return nil
-	}
-	triggerTypeWhiteList := map[buildv1.BuildTriggerType]struct{}{
-		buildv1.GitHubWebHookBuildTriggerType:    {},
-		buildv1.GenericWebHookBuildTriggerType:   {},
-		buildv1.ImageChangeBuildTriggerType:      {},
-		buildv1.GitLabWebHookBuildTriggerType:    {},
-		buildv1.BitbucketWebHookBuildTriggerType: {},
-	}
-	if build.Kind("BuildConfig") == obj.GroupVersionKind().GroupKind() && isInvalidTriggerError(err) {
-		var bc *buildv1.BuildConfig
-		err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, bc)
-		if err != nil {
-			return nil
-		}
-
-		triggers := []buildv1.BuildTriggerPolicy{}
-		for _, t := range bc.Spec.Triggers {
-			if _, inList := triggerTypeWhiteList[t.Type]; inList {
-				triggers = append(triggers, t)
-			}
-		}
-		bc.Spec.Triggers = triggers
-
-		retUnstructured, err := runtime.DefaultUnstructuredConverter.ToUnstructured(bc)
-		if err != nil {
-			return nil
-		}
-		return &unstructured.Unstructured{Object: retUnstructured}
-	}
-	return nil
-}
-
-func HandleError(err error, baseName, commandName, commandPath string, config *newcmd.AppConfig, transformError func(err error, baseName, commandName, commandPath string, groups ErrorGroups, config *newcmd.AppConfig)) error {
-	if err == nil {
-		return nil
-	}
-	errs := []error{err}
-	if agg, ok := err.(errors.Aggregate); ok {
-		errs = agg.Errors()
-	}
-	groups := ErrorGroups{}
-	for _, err := range errs {
-		transformError(err, baseName, commandName, commandPath, groups, config)
-	}
-	buf := &bytes.Buffer{}
-	for _, group := range groups {
-		fmt.Fprint(buf, kcmdutil.MultipleErrors("error: ", group.errs))
-		if len(group.classification) > 0 {
-			fmt.Fprintln(buf)
-		}
-		fmt.Fprintf(buf, group.classification)
-		if len(group.suggestion) > 0 {
-			if len(group.classification) > 0 {
-				fmt.Fprintln(buf)
-			}
-			fmt.Fprintln(buf)
-		}
-		fmt.Fprint(buf, group.suggestion)
-	}
-	return fmt.Errorf(buf.String())
-}
-
-type ErrorGroup struct {
-	errs           []error
-	suggestion     string
-	classification string
-}
-type ErrorGroups map[string]ErrorGroup
-
-func (g ErrorGroups) Add(group string, suggestion string, classification string, err error, errs ...error) {
-	all := g[group]
-	all.errs = append(all.errs, errs...)
-	all.errs = append(all.errs, err)
-	all.suggestion = suggestion
-	all.classification = classification
-	g[group] = all
-}
-
-func TransformRunError(err error, baseName, commandName, commandPath string, groups ErrorGroups, config *newcmd.AppConfig) {
-	switch t := err.(type) {
-	case newcmd.ErrRequiresExplicitAccess:
-		if t.Input.Token != nil && t.Input.Token.ServiceAccount {
-			groups.Add(
-				"explicit-access-installer",
-				heredoc.Doc(`
-					WARNING: This will allow the pod to create and manage resources within your namespace -
-					ensure you trust the image with those permissions before you continue.
-
-					You can see more information about the image by adding the --dry-run flag.
-					If you trust the provided image, include the flag --grant-install-rights.`,
-				),
-				"",
-				fmt.Errorf("installing %q requires an 'installer' service account with project editor access", t.Match.Value),
-			)
-		} else {
-			groups.Add(
-				"explicit-access-you",
-				heredoc.Doc(`
-					WARNING: This will allow the pod to act as you across the entire cluster - ensure you
-					trust the image with those permissions before you continue.
-
-					You can see more information about the image by adding the --dry-run flag.
-					If you trust the provided image, include the flag --grant-install-rights.`,
-				),
-				"",
-				fmt.Errorf("installing %q requires that you grant the image access to run with your credentials", t.Match.Value),
-			)
-		}
-		return
-	case newappapp.ErrNoMatch:
-		classification, _ := config.ClassificationWinners[t.Value]
-		if classification.IncludeGitErrors {
-			notGitRepo, ok := config.SourceClassificationErrors[t.Value]
-			if ok {
-				t.Errs = append(t.Errs, notGitRepo.Value)
-			}
-		}
-		groups.Add(
-			"no-matches",
-			heredoc.Docf(`
-				The '%[1]s' command will match arguments to the following types:
-
-				  1. Images tagged into image streams in the current project or the 'openshift' project
-				     - if you don't specify a tag, we'll add ':latest'
-				  2. Images in the Docker Hub, on remote registries, or on the local Docker engine
-				  3. Templates in the current project or the 'openshift' project
-				  4. Git repository URLs or local paths that point to Git repositories
-
-				--allow-missing-images can be used to point to an image that does not exist yet.
-
-				See '%[1]s -h' for examples.`, commandPath,
-			),
-			classification.String(),
-			t,
-			t.Errs...,
-		)
-		return
-	case newappapp.ErrMultipleMatches:
-		classification, _ := config.ClassificationWinners[t.Value]
-		buf := &bytes.Buffer{}
-		for i, match := range t.Matches {
-
-			// If we have more than 5 matches, stop output and recommend searching
-			// after the fifth
-			if i >= 5 {
-				groups.Add(
-					"multiple-matches",
-					heredoc.Docf(`
-						The argument %[1]q could apply to the following Docker images, OpenShift image streams, or templates:
-
-						%[2]sTo view a full list of matches, use '%[3]s %[4]s -S %[1]s'`, t.Value, buf.String(), baseName, commandName,
-					),
-					classification.String(),
-					t,
-					t.Errs...,
-				)
-
-				return
-			}
-
-			fmt.Fprintf(buf, "* %s\n", match.Description)
-			fmt.Fprintf(buf, "  Use %[1]s to specify this image or template\n\n", match.Argument)
-		}
-
-		groups.Add(
-			"multiple-matches",
-			heredoc.Docf(`
-					The argument %[1]q could apply to the following Docker images, OpenShift image streams, or templates:
-
-					%[2]s`, t.Value, buf.String(),
-			),
-			classification.String(),
-			t,
-			t.Errs...,
-		)
-		return
-	case newappapp.ErrPartialMatch:
-		classification, _ := config.ClassificationWinners[t.Value]
-		buf := &bytes.Buffer{}
-		fmt.Fprintf(buf, "* %s\n", t.Match.Description)
-		fmt.Fprintf(buf, "  Use %[1]s to specify this image or template\n\n", t.Match.Argument)
-
-		groups.Add(
-			"partial-match",
-			heredoc.Docf(`
-					The argument %[1]q only partially matched the following Docker image, OpenShift image stream, or template:
-
-					%[2]s`, t.Value, buf.String(),
-			),
-			classification.String(),
-			t,
-			t.Errs...,
-		)
-		return
-	case newappapp.ErrNoTagsFound:
-		classification, _ := config.ClassificationWinners[t.Value]
-		buf := &bytes.Buffer{}
-		fmt.Fprintf(buf, "  Use --allow-missing-imagestream-tags to use this image stream\n\n")
-		groups.Add(
-			"no-tags",
-			heredoc.Docf(`
-					The image stream %[1]q exists, but it has no tags.
-
-					%[2]s`, t.Match.Name, buf.String(),
-			),
-			classification.String(),
-			t,
-			t.Errs...,
-		)
-		return
-	}
-	switch err {
-	case errNoTokenAvailable:
-		// TODO: improve by allowing token generation
-		groups.Add("", "", "", fmt.Errorf("to install components you must be logged in with an OAuth token (instead of only a certificate)"))
-	case newcmd.ErrNoInputs:
-		// TODO: suggest things to the user
-		groups.Add("", "", "", UsageError(commandPath, newAppNoInput, baseName, commandName))
-	default:
-		if runtime.IsNotRegisteredError(err) {
-			groups.Add("", "", "", fmt.Errorf(fmt.Sprintf("The template contained an object type unknown to `oc new-app`.  Use `oc process -f